repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mogoweb/chromium-crosswalk | build/android/pylib/monkey/test_runner.py | 1 | 2677 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs a monkey test on a single device."""
import random
from pylib.base import base_test_result
from pylib.base import base_test_runner
class TestRunner(base_test_runner.BaseTestRunner):
"""A TestRunner instance runs a monkey test on a single device."""
def __init__(self, test_options, device, shard_index):
super(TestRunner, self).__init__(device, None)
self.options = test_options
def _LaunchMonkeyTest(self):
"""Runs monkey test for a given package.
Returns:
Output from the monkey command on the device.
"""
timeout_ms = self.options.event_count * self.options.throttle * 1.5
cmd = ['monkey',
'-p %s' % self.options.package_name,
' '.join(['-c %s' % c for c in self.options.category]),
'--throttle %d' % self.options.throttle,
'-s %d' % (self.options.seed or random.randint(1, 100)),
'-v ' * self.options.verbose_count,
'--monitor-native-crashes',
'--kill-process-after-error',
self.options.extra_args,
'%d' % self.options.event_count]
return self.adb.RunShellCommand(' '.join(cmd), timeout_time=timeout_ms)
def RunTest(self, test_name):
"""Run a Monkey test on the device.
Args:
test_name: String to use for logging the test result.
Returns:
A tuple of (TestRunResults, retry).
"""
self.adb.StartActivity(self.options.package_name,
self.options.activity_name,
wait_for_completion=True,
action='android.intent.action.MAIN',
force_stop=True)
# Chrome crashes are not always caught by Monkey test runner.
# Verify Chrome has the same PID before and after the test.
before_pids = self.adb.ExtractPid(self.options.package_name)
# Run the test.
output = ''
if before_pids:
output = '\n'.join(self._LaunchMonkeyTest())
after_pids = self.adb.ExtractPid(self.options.package_name)
crashed = (not before_pids or not after_pids
or after_pids[0] != before_pids[0])
results = base_test_result.TestRunResults()
if 'Monkey finished' in output and not crashed:
result = base_test_result.BaseTestResult(
test_name, base_test_result.ResultType.PASS, log=output)
else:
result = base_test_result.BaseTestResult(
test_name, base_test_result.ResultType.FAIL, log=output)
results.AddResult(result)
return results, False
| bsd-3-clause | -4,246,792,533,944,500,700 | 33.766234 | 75 | 0.632798 | false |
machine-intelligence/rl-teacher-atari | agents/ga3c/ga3c/GA3C.py | 1 | 2331 | # Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# check python version; warn if not Python3
import sys
import warnings
if sys.version_info < (3,0):
warnings.warn("Optimized for Python3. Performance may suffer under Python2.", Warning)
from ga3c.Config import Config
from ga3c.Server import Server
# Parse arguments
for i in range(1, len(sys.argv)):
# Config arguments should be in format of Config=Value
# For setting booleans to False use Config=
x, y = sys.argv[i].split('=')
setattr(Config, x, type(getattr(Config, x))(y))
# Adjust configs for Play mode
if Config.PLAY_MODE:
Config.AGENTS = 1
Config.PREDICTORS = 1
Config.TRAINERS = 1
Config.DYNAMIC_SETTINGS = False
Config.LOAD_CHECKPOINT = True
Config.TRAIN_MODELS = False
Config.SAVE_MODELS = False
# Start main program
Server().main()
| mit | 7,804,375,501,185,156,000 | 41.381818 | 90 | 0.75547 | false |
Lisaveta-K/lisaveta-k.github.io | _site/tomat/apps/shops/migrations/0006_transport_company_removal.py | 1 | 3193 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'TransportCompany'
db.delete_table(u'shops_transportcompany')
def backwards(self, orm):
# Adding model 'TransportCompany'
db.create_table(u'shops_transportcompany', (
('is_visible', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'shops', ['TransportCompany'])
models = {
u'shops.city': {
'Meta': {'object_name': 'City', 'db_table': "'shops_cities'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'shops.delivery': {
'Meta': {'object_name': 'Delivery'},
'caption': ('django.db.models.fields.TextField', [], {}),
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_retail': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_wholesale': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'shops.discount': {
'Meta': {'object_name': 'Discount'},
'id': ('django.db.models.fields.PositiveIntegerField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'shops.shop': {
'Meta': {'object_name': 'Shop'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shops'", 'to': u"orm['shops.City']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phones': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'worktime': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['shops'] | mit | -2,002,103,304,827,569,700 | 50.516129 | 128 | 0.554964 | false |
drhoet/marantz-hue-adapter | analysis/color_space_analysis.py | 1 | 5469 | import numpy as np
from mayavi import mlab
from scipy.interpolate import splprep, splev
from traits.api import HasTraits, Instance, Button, on_trait_change, Float
from traitsui.api import View, Item, HSplit, Group, VGroup
from mayavi.core.ui.api import MlabSceneModel, SceneEditor
################################################################################
# Remark: I used this script to find a good frequency -> xy values mapping. #
# I generated, as carefully as I could, all xy values the app would send to #
# the lamp. I did this by SLOOOOOOOWLY sliding through the slider, and #
# printing out all xy values I received, in order. I made 3 of those files, #
# which are ofc a bit different due to the speed I used not being constant, as #
# I'm not a robot. #
# I then used this script to find a good B-spline interpolation. On the screen #
# you can input a value for the smoothing factor s and recalculate + redraw #
# the interpolated function. I found a value of 0.001 for s to be good. #
# On the interpolated function, 411 equidistant samples are taken, one for #
# 0.05 frequency in the range 87.5 -> 108.00. #
# The xy values are then printed to the console. #
# #
# These values are copied in the colorspaces.py, since I didn't want to add #
# the dependency to scipy there. #
# #
# I executed this script in Enthought Canopy Version: 1.7.4.3348 (64 bit). #
# Required packages: #
# - numpy 1.10.4-1 #
# - mayavi 4.4.3-10 #
# - vtk 6.3.0-4 #
# - scipy 0.17.1-1 #
# - traits 4.5.0-1 #
# - traitsui 5.1.0-1 #
################################################################################
def read_file(filename):
linenb = 0
data = [[], [], []]
for line in open(filename, 'r'):
fields = line[1:-2].split(',')
data[0].append(linenb)
data[1].append(float(fields[0]))
data[2].append(float(fields[1]))
linenb = linenb + 1
return np.array(data)
class MyDialog(HasTraits):
p0 = read_file('testGO0.txt')
p1 = read_file('testGO1.txt')
p2 = read_file('testGO2.txt')
new_u = x = y = None
scene1 = Instance(MlabSceneModel, ())
scene2 = Instance(MlabSceneModel, ())
button1 = Button('Redraw')
button2 = Button('Redraw')
buttonSave = Button('Save')
s1 = Float
s2 = Float
@on_trait_change('button1')
def redraw_scene1(self):
self.redraw_scene(self.scene1, self.s1)
@on_trait_change('button2')
def redraw_scene2(self):
self.redraw_scene(self.scene2, self.s2)
@on_trait_change('buttonSave')
def save(self):
f = open('outputGO.txt', 'w')
f.write('freq = [\n')
for i in range(0, len(self.new_u)):
f.write(' [%s,%s],\n' % (self.x[i], self.y[i]))
f.write(']')
f.close()
def redraw_scene(self, scene, s):
mlab.clf(figure=scene.mayavi_scene)
mlab.plot3d(np.divide(self.p0[0], 100), self.p0[1], self.p0[2], tube_radius=0.005, color=(1, 0, 0), figure=scene.mayavi_scene)
mlab.plot3d(np.divide(self.p1[0], 100), self.p1[1], self.p1[2], tube_radius=0.005, color=(0, 1, 0), figure=scene.mayavi_scene)
mlab.plot3d(np.divide(self.p2[0], 100), self.p2[1], self.p2[2], tube_radius=0.005, color=(0, 0, 1), figure=scene.mayavi_scene)
tck, u = splprep([self.p1[1], self.p1[2]], u=np.linspace(87.50, 108.00, len(self.p1[0])), s=s, k=3)
self.new_u = np.linspace(87.50, 108.00, 411)
self.x, self.y = splev(self.new_u, tck, ext=2)
mlab.plot3d(np.divide(self.new_u, 100), self.x, self.y, tube_radius=0.005, color=(1, 1, 1), figure=scene.mayavi_scene)
# The layout of the dialog created
view = View(VGroup(
HSplit(
Group(
Item('scene1', editor=SceneEditor(), height=250,
width=300),
'button1',
's1',
show_labels=False,
),
Group(
Item('scene2',
editor=SceneEditor(), height=250,
width=300, show_label=False),
'button2',
's2',
show_labels=False,
)
),
'buttonSave',
show_labels=False
),
resizable=True,
)
m = MyDialog()
m.configure_traits() | mit | 1,464,191,938,755,861,800 | 43.97479 | 134 | 0.449991 | false |
mike-lawrence/actichampy | pycorder/loadlibs.py | 1 | 3137 | # -*- coding: utf-8 -*-
'''
Load required libraries and check versions
PyCorder ActiChamp Recorder
------------------------------------------------------------
Copyright (C) 2010, Brain Products GmbH, Gilching
PyCorder is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyCorder. If not, see <http://www.gnu.org/licenses/>.
------------------------------------------------------------
@author: Norbert Hauser
@date: $Date: 2011-03-24 16:03:45 +0100 (Do, 24 Mrz 2011) $
@version: 1.0
B{Revision:} $LastChangedRevision: 62 $
'''
'''
------------------------------------------------------------
CHECK LIBRARY DEPENDENCIES
------------------------------------------------------------
'''
import sys
# required Python and library versions
ver_Python = "2.6"
ver_NumPy = ("1.3.0", "1.4.1")
ver_SciPy = ("0.7.1", "0.8.0")
ver_PyQt = ("4.5.2", "4.6.3")
ver_PyQwt = ("5.2.1",)
ver_lxml = ("2.2.4", "2.2.7")
# try to import python libraries, check versions
import_log = ""
if not ver_Python in sys.version:
import_log += "- Wrong Python version (%s), please install Python %s\r\n"%(sys.version, ver_Python)
try:
import numpy as np
if not np.__version__ in ver_NumPy:
import_log += "- Wrong NumPy version (%s), please install NumPy %s\r\n"%(np.__version__, ver_NumPy)
except ImportError:
import_log += "- NumPy missing, please install NumPy %s\r\n"%(str(ver_NumPy))
try:
import scipy as sc
if not sc.__version__ in ver_SciPy:
import_log += "- Wrong SciPy version (%s), please install SciPy %s\r\n"%(sc.__version__, ver_SciPy)
except ImportError:
import_log += "- SciPy missing, please install SciPy %s\r\n"%(str(ver_SciPy))
try:
from PyQt4 import Qt
if not Qt.QT_VERSION_STR in ver_PyQt:
import_log += "- Wrong PyQt version (%s), please install PyQt %s\r\n"%(Qt.QT_VERSION_STR, ver_PyQt)
except ImportError:
import_log += "- PyQt missing, please install PyQt %s\r\n"%(str(ver_PyQt))
try:
from PyQt4 import Qwt5 as Qwt
if not Qwt.QWT_VERSION_STR in ver_PyQwt:
import_log += "- Wrong PyQwt version (%s), please install PyQwt %s\r\n"%(Qwt.QWT_VERSION_STR, ver_PyQwt)
except ImportError:
import_log += "- PyQwt missing, please install PyQwt %s\r\n"%(str(ver_PyQwt))
try:
from lxml import etree
if not etree.__version__ in ver_lxml:
import_log += "- Wrong lxml version (%s), please install lxml %s\r\n"%(etree.__version__, ver_lxml)
except ImportError:
import_log += "- lxml missing, please install lxml %s\r\n"%(str(ver_lxml))
| gpl-3.0 | -9,222,157,332,096,377,000 | 32.855556 | 113 | 0.601211 | false |
ntymtsiv/tempest | tempest/services/compute/v3/json/servers_client.py | 1 | 16454 | # Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import urllib
from tempest.common.rest_client import RestClient
from tempest.common import waiters
from tempest import config
from tempest import exceptions
CONF = config.CONF
class ServersV3ClientJSON(RestClient):
def __init__(self, auth_provider):
super(ServersV3ClientJSON, self).__init__(auth_provider)
self.service = CONF.compute.catalog_v3_type
def create_server(self, name, image_ref, flavor_ref, **kwargs):
"""
Creates an instance of a server.
name (Required): The name of the server.
image_ref (Required): Reference to the image used to build the server.
flavor_ref (Required): The flavor used to build the server.
Following optional keyword arguments are accepted:
admin_password: Sets the initial root password.
key_name: Key name of keypair that was created earlier.
meta: A dictionary of values to be used as metadata.
security_groups: A list of security group dicts.
networks: A list of network dicts with UUID and fixed_ip.
user_data: User data for instance.
availability_zone: Availability zone in which to launch instance.
access_ip_v4: The IPv4 access address for the server.
access_ip_v6: The IPv6 access address for the server.
min_count: Count of minimum number of instances to launch.
max_count: Count of maximum number of instances to launch.
disk_config: Determines if user or admin controls disk configuration.
return_reservation_id: Enable/Disable the return of reservation id
"""
post_body = {
'name': name,
'image_ref': image_ref,
'flavor_ref': flavor_ref
}
for option in ['admin_password', 'key_name', 'networks',
('os-security-groups:security_groups',
'security_groups'),
('os-user-data:user_data', 'user_data'),
('os-availability-zone:availability_zone',
'availability_zone'),
('os-access-ips:access_ip_v4', 'access_ip_v4'),
('os-access-ips:access_ip_v6', 'access_ip_v6'),
('os-multiple-create:min_count', 'min_count'),
('os-multiple-create:max_count', 'max_count'),
('metadata', 'meta'),
('os-disk-config:disk_config', 'disk_config'),
('os-multiple-create:return_reservation_id',
'return_reservation_id')]:
if isinstance(option, tuple):
post_param = option[0]
key = option[1]
else:
post_param = option
key = option
value = kwargs.get(key)
if value is not None:
post_body[post_param] = value
post_body = json.dumps({'server': post_body})
resp, body = self.post('servers', post_body, self.headers)
body = json.loads(body)
# NOTE(maurosr): this deals with the case of multiple server create
# with return reservation id set True
if 'servers_reservation' in body:
return resp, body['servers_reservation']
return resp, body['server']
def update_server(self, server_id, name=None, meta=None, access_ip_v4=None,
access_ip_v6=None, disk_config=None):
"""
Updates the properties of an existing server.
server_id: The id of an existing server.
name: The name of the server.
access_ip_v4: The IPv4 access address for the server.
access_ip_v6: The IPv6 access address for the server.
"""
post_body = {}
if meta is not None:
post_body['metadata'] = meta
if name is not None:
post_body['name'] = name
if access_ip_v4 is not None:
post_body['os-access-ips:access_ip_v4'] = access_ip_v4
if access_ip_v6 is not None:
post_body['os-access-ips:access_ip_v6'] = access_ip_v6
if disk_config is not None:
post_body['os-disk-config:disk_config'] = disk_config
post_body = json.dumps({'server': post_body})
resp, body = self.put("servers/%s" % str(server_id),
post_body, self.headers)
body = json.loads(body)
return resp, body['server']
def get_server(self, server_id):
"""Returns the details of an existing server."""
resp, body = self.get("servers/%s" % str(server_id))
body = json.loads(body)
return resp, body['server']
def delete_server(self, server_id):
"""Deletes the given server."""
return self.delete("servers/%s" % str(server_id))
def list_servers(self, params=None):
"""Lists all servers for a user."""
url = 'servers'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body
def list_servers_with_detail(self, params=None):
"""Lists all servers in detail for a user."""
url = 'servers/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body
def wait_for_server_status(self, server_id, status, extra_timeout=0,
raise_on_error=True):
"""Waits for a server to reach a given status."""
return waiters.wait_for_server_status(self, server_id, status,
extra_timeout=extra_timeout,
raise_on_error=raise_on_error)
def wait_for_server_termination(self, server_id, ignore_error=False):
"""Waits for server to reach termination."""
start_time = int(time.time())
while True:
try:
resp, body = self.get_server(server_id)
except exceptions.NotFound:
return
server_status = body['status']
if server_status == 'ERROR' and not ignore_error:
raise exceptions.BuildErrorException(server_id=server_id)
if int(time.time()) - start_time >= self.build_timeout:
raise exceptions.TimeoutException
time.sleep(self.build_interval)
def list_addresses(self, server_id):
"""Lists all addresses for a server."""
resp, body = self.get("servers/%s/ips" % str(server_id))
body = json.loads(body)
return resp, body['addresses']
def list_addresses_by_network(self, server_id, network_id):
"""Lists all addresses of a specific network type for a server."""
resp, body = self.get("servers/%s/ips/%s" %
(str(server_id), network_id))
body = json.loads(body)
return resp, body
def action(self, server_id, action_name, response_key, **kwargs):
post_body = json.dumps({action_name: kwargs})
resp, body = self.post('servers/%s/action' % str(server_id),
post_body, self.headers)
if response_key is not None:
body = json.loads(body)[response_key]
return resp, body
def create_backup(self, server_id, backup_type, rotation, name):
"""Backup a server instance."""
return self.action(server_id, "create_backup", None,
backup_type=backup_type,
rotation=rotation,
name=name)
def change_password(self, server_id, admin_password):
"""Changes the root password for the server."""
return self.action(server_id, 'change_password', None,
admin_password=admin_password)
def reboot(self, server_id, reboot_type):
"""Reboots a server."""
return self.action(server_id, 'reboot', None, type=reboot_type)
def rebuild(self, server_id, image_ref, **kwargs):
"""Rebuilds a server with a new image."""
kwargs['image_ref'] = image_ref
if 'disk_config' in kwargs:
kwargs['os-disk-config:disk_config'] = kwargs['disk_config']
del kwargs['disk_config']
return self.action(server_id, 'rebuild', 'server', **kwargs)
def resize(self, server_id, flavor_ref, **kwargs):
"""Changes the flavor of a server."""
kwargs['flavor_ref'] = flavor_ref
if 'disk_config' in kwargs:
kwargs['os-disk-config:disk_config'] = kwargs['disk_config']
del kwargs['disk_config']
return self.action(server_id, 'resize', None, **kwargs)
def confirm_resize(self, server_id, **kwargs):
"""Confirms the flavor change for a server."""
return self.action(server_id, 'confirm_resize', None, **kwargs)
def revert_resize(self, server_id, **kwargs):
"""Reverts a server back to its original flavor."""
return self.action(server_id, 'revert_resize', None, **kwargs)
def create_image(self, server_id, name, meta=None):
"""Creates an image of the original server."""
post_body = {
'create_image': {
'name': name,
}
}
if meta is not None:
post_body['create_image']['metadata'] = meta
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % str(server_id),
post_body, self.headers)
return resp, body
def list_server_metadata(self, server_id):
resp, body = self.get("servers/%s/metadata" % str(server_id))
body = json.loads(body)
return resp, body['metadata']
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
if no_metadata_field:
post_body = ""
else:
post_body = json.dumps({'metadata': meta})
resp, body = self.put('servers/%s/metadata' % str(server_id),
post_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def update_server_metadata(self, server_id, meta):
post_body = json.dumps({'metadata': meta})
resp, body = self.post('servers/%s/metadata' % str(server_id),
post_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def get_server_metadata_item(self, server_id, key):
resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
body = json.loads(body)
return resp, body['metadata']
def set_server_metadata_item(self, server_id, key, meta):
post_body = json.dumps({'metadata': meta})
resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
post_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def delete_server_metadata_item(self, server_id, key):
resp, body = self.delete("servers/%s/metadata/%s" %
(str(server_id), key))
return resp, body
def stop(self, server_id, **kwargs):
return self.action(server_id, 'stop', None, **kwargs)
def start(self, server_id, **kwargs):
return self.action(server_id, 'start', None, **kwargs)
def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
"""Attaches a volume to a server instance."""
return self.action(server_id, 'attach', None, volume_id=volume_id,
device=device)
def detach_volume(self, server_id, volume_id):
"""Detaches a volume from a server instance."""
return self.action(server_id, 'detach', None, volume_id=volume_id)
def live_migrate_server(self, server_id, dest_host, use_block_migration):
"""This should be called with administrator privileges ."""
migrate_params = {
"disk_over_commit": False,
"block_migration": use_block_migration,
"host": dest_host
}
req_body = json.dumps({'migrate_live': migrate_params})
resp, body = self.post("servers/%s/action" % str(server_id),
req_body, self.headers)
return resp, body
def migrate_server(self, server_id, **kwargs):
"""Migrates a server to a new host."""
return self.action(server_id, 'migrate', None, **kwargs)
def lock_server(self, server_id, **kwargs):
"""Locks the given server."""
return self.action(server_id, 'lock', None, **kwargs)
def unlock_server(self, server_id, **kwargs):
"""UNlocks the given server."""
return self.action(server_id, 'unlock', None, **kwargs)
def suspend_server(self, server_id, **kwargs):
"""Suspends the provided server."""
return self.action(server_id, 'suspend', None, **kwargs)
def resume_server(self, server_id, **kwargs):
"""Un-suspends the provided server."""
return self.action(server_id, 'resume', None, **kwargs)
def pause_server(self, server_id, **kwargs):
"""Pauses the provided server."""
return self.action(server_id, 'pause', None, **kwargs)
def unpause_server(self, server_id, **kwargs):
"""Un-pauses the provided server."""
return self.action(server_id, 'unpause', None, **kwargs)
def reset_state(self, server_id, state='error'):
"""Resets the state of a server to active/error."""
return self.action(server_id, 'reset_state', None, state=state)
def shelve_server(self, server_id, **kwargs):
"""Shelves the provided server."""
return self.action(server_id, 'shelve', None, **kwargs)
def unshelve_server(self, server_id, **kwargs):
"""Un-shelves the provided server."""
return self.action(server_id, 'unshelve', None, **kwargs)
def get_console_output(self, server_id, length):
return self.action(server_id, 'get_console_output', 'output',
length=length)
def rescue_server(self, server_id, **kwargs):
"""Rescue the provided server."""
return self.action(server_id, 'rescue', None, **kwargs)
def unrescue_server(self, server_id):
"""Unrescue the provided server."""
return self.action(server_id, 'unrescue', None)
def get_server_diagnostics(self, server_id):
"""Get the usage data for a server."""
resp, body = self.get("servers/%s/os-server-diagnostics" %
str(server_id))
return resp, json.loads(body)
def list_instance_actions(self, server_id):
"""List the provided server action."""
resp, body = self.get("servers/%s/os-instance-actions" %
str(server_id))
body = json.loads(body)
return resp, body['instance_actions']
def get_instance_action(self, server_id, request_id):
"""Returns the action details of the provided server."""
resp, body = self.get("servers/%s/os-instance-actions/%s" %
(str(server_id), str(request_id)))
body = json.loads(body)
return resp, body['instance_action']
def force_delete_server(self, server_id, **kwargs):
"""Force delete a server."""
return self.action(server_id, 'force_delete', None, **kwargs)
def restore_soft_deleted_server(self, server_id, **kwargs):
"""Restore a soft-deleted server."""
return self.action(server_id, 'restore', None, **kwargs)
| apache-2.0 | 5,713,543,314,471,721,000 | 39.034063 | 79 | 0.581682 | false |
FlintHill/SUAS-Competition | UpdatedSyntheticDataset/SyntheticDataset2/ElementsCreator/cross.py | 1 | 1798 | from PIL import ImageDraw, Image
from SyntheticDataset2.ElementsCreator import Shape
class Cross(Shape):
def __init__(self, height, color, rotation):
"""
Initialize a Cross shape
:param height: height in pixels
:type height: int
:param color: color of shape - RGB
:type color: 3-tuple int
:param rotation: degrees counterclockwise shape will be rotated
:type rotation: int
"""
super(Cross, self).__init__(color, rotation)
self.height = height
self.coordinates = self.get_coordinates()
def get_coordinates(self):
"""
:param coordinates: drawing coordinates for the shape
:type coordinates: list of 2-tuple xy pixel coordinates
"""
x1 = self.height/3
y1 = 0
x2 = 2*self.height/3
y2 = 0
x3 = 2*self.height/3
y3 = self.height/3
x4 = self.height
y4 = self.height/3
x5 = self.height
y5 = 2*self.height/3
x6 = 2*self.height/3
y6 = 2*self.height/3
x7 = 2*self.height/3
y7 = self.height
x8 = self.height/3
y8 = self.height
x9 = self.height/3
y9 = 2*self.height/3
x10 = 0
y10 = 2*self.height/3
x11 = 0
y11 = self.height/3
x12 = self.height/3
y12 = self.height/3
return [(x1,y1),(x2,y2),(x3,y3),(x4,y4),(x5,y5),(x6,y6),(x7,y7),(x8,y8),(x9,y9),(x10,y10),(x11,y11),(x12,y12)]
def draw(self):
new_cross = Image.new('RGBA', (self.height,self.height), color=(255,255,255,0))
draw = ImageDraw.Draw(new_cross)
draw.polygon(self.coordinates, fill=self.color)
new_cross = new_cross.rotate(self.rotation, expand=1)
return new_cross
| mit | 8,255,110,933,483,183,000 | 29.474576 | 118 | 0.560623 | false |
amenonsen/ansible | lib/ansible/modules/network/fortios/fortios_system_external_resource.py | 1 | 10703 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_external_resource
short_description: Configure external resource in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS device by allowing the
user to set and modify system feature and external_resource category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
choices:
- present
- absent
system_external_resource:
description:
- Configure external resource.
default: null
type: dict
suboptions:
category:
description:
- User resource category.
type: int
comments:
description:
- Comment.
type: str
name:
description:
- External resource name.
required: true
type: str
refresh_rate:
description:
- Time interval to refresh external resource (1 - 43200 min, default = 5 min).
type: int
resource:
description:
- URI of external resource.
type: str
status:
description:
- Enable/disable user resource.
type: str
choices:
- enable
- disable
type:
description:
- User resource type.
type: str
choices:
- category
- address
- domain
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure external resource.
fortios_system_external_resource:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_external_resource:
category: "3"
comments: "<your_own_value>"
name: "default_name_5"
refresh_rate: "6"
resource: "<your_own_value>"
status: "enable"
type: "category"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_external_resource_data(json):
option_list = ['category', 'comments', 'name',
'refresh_rate', 'resource', 'status',
'type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_external_resource(data, fos):
vdom = data['vdom']
state = data['state']
system_external_resource_data = data['system_external_resource']
filtered_data = underscore_to_hyphen(filter_system_external_resource_data(system_external_resource_data))
if state == "present":
return fos.set('system',
'external-resource',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'external-resource',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_external_resource']:
resp = system_external_resource(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_external_resource": {
"required": False, "type": "dict", "default": None,
"options": {
"category": {"required": False, "type": "int"},
"comments": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"refresh_rate": {"required": False, "type": "int"},
"resource": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"type": {"required": False, "type": "str",
"choices": ["category", "address", "domain"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 169,487,628,331,970,240 | 29.234463 | 109 | 0.579464 | false |
pinballwizard/horse | horse/settings.py | 1 | 6060 | """
Django settings for horse project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+(k#-e2yrqo%^f!ga0ka7f!yy_cv0)_uj-h$avn-tgah%9umzg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
# 'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
# 'chart_tools',
'base',
'flatlease',
'car_leasing',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'horse.urls'
LOGIN_URL = 'login'
ADMINS = (
('Vasiliy', '[email protected]'),
)
MANAGERS = (
('Vasiliy', '[email protected]'),
('Vera', '[email protected]'),
)
SERVER_EMAIL = 'horse@django' # Адрес отправителя почты
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'horse.wsgi.application'
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'formatters': {
# 'verbose': {
# 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
# },
# 'simple': {
# 'format': '%(levelname)s %(message)s'
# },
# },
# 'handlers': {
# # 'file': {
# # 'level': 'DEBUG',
# # 'class': 'logging.handlers.TimedRotatingFileHandler',
# # 'when': 'd',
# # # 'interval': '1',
# # 'encoding': 'UTF8',
# # 'formatter': 'simple',
# # 'filename': '/var/log/horse/debug.log',
# # },
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'encoding': 'UTF8',
# 'formatter': 'verbose',
# 'filename': '/var/log/horse/debug.log',
# },
# 'null': {
# 'level': 'DEBUG',
# 'class': 'logging.NullHandler',
# },
# 'console': {
# 'level': 'INFO',
# 'class': 'logging.StreamHandler',
# 'formatter': 'simple'
# },
# 'mail_admins': {
# 'level': 'ERROR',
# 'class': 'django.utils.log.AdminEmailHandler',
# 'include_html': True,
# }
# },
# 'loggers': {
# 'django': {
# 'handlers': ['console', 'file'],
# 'propagate': True,
# 'level': 'INFO',
# },
# 'django.request': {
# 'handlers': ['mail_admins', 'console', 'file'],
# 'level': 'INFO',
# 'propagate': False,
# },
# 'flatlease': {
# 'handlers': ['console', 'file'],
# 'level': 'DEBUG',
# 'propagate': True,
# },
# 'django.db.backends': {
# 'handlers': ['mail_admins'],
# 'level': 'ERROR',
# 'propagate': False,
# },
# 'django.security.DisallowedHost': {
# 'handlers': ['mail_admins'],
# 'propagate': False,
# },
# },
# }
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '54.93.55.209',
'PORT': '5432',
'NAME': 'horse',
'USER': 'django',
'PASSWORD': '14875264',
},
'flat': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '54.93.55.209',
'PORT': '5432',
'NAME': 'flat',
'USER': 'django',
'PASSWORD': '14875264',
},
'car': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '54.93.55.209',
'PORT': '5432',
'NAME': 'car',
'USER': 'django',
'PASSWORD': '14875264',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'Asia/Krasnoyarsk'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.2006', '25.10.06'
'%d-%m-%Y', '%d/%m/%Y', '%d/%m/%y', # '25-10-2006', '25/10/2006', '25/10/06'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006',
'%d.%m.%Y',
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = 'media/' | gpl-3.0 | 4,460,702,238,426,681,000 | 26.207207 | 97 | 0.539493 | false |
MightyPork/stm32-asm-examples | registers/gen/to_asm_f30x.py | 1 | 4316 |
from cmsis_svd.parser import SVDParser
import json
import re
# ------------------------------------
svd_name = 'STM32F30x.svd'
want_ofs = True
want_len = True
# Do not print poripheral field definitions (same as first instance)
no_print_fields = [
'GPIOB',
'GPIOC',
'GPIOD',
'GPIOE',
'GPIOF',
'GPIOG',
'USART2',
'USART3',
'ADC2',
'ADC3',
'ADC4',
'ADC34',
'I2C2',
'I2C3',
'SPI2',
'SPI3',
]
# Same registers as... (points to first instance)
same_regs_as = {
'GPIOB': 'GPIOA',
'GPIOC': 'GPIOA',
'GPIOD': 'GPIOA',
'GPIOE': 'GPIOA',
'GPIOF': 'GPIOA',
'GPIOG': 'GPIOG',
'GPIOH': 'GPIOH',
'USART2': 'USART1',
'USART3': 'USART1',
'TIM4': 'TIM3',
'DAC2': 'DAC1',
'ADC2': 'ADC1',
'ADC3': 'ADC1',
'ADC4': 'ADC1',
'ADC34': 'ADC12',
'I2C2': 'I2C1',
'I2C3': 'I2C1',
'SPI2': 'SPI1',
'SPI3': 'SPI1',
}
# Rename peripheral when building field definitions
# Used for multiple instances (build fields only for the first)
periph_rename_for_field = {
'GPIOA': 'GPIO',
'USART1': 'USART',
'DAC1': 'DAC',
'ADC12': 'ADCC',
'I2C1': 'I2C'
}
# Rename peripheral when generating (bad name in SVD)
periph_rename = {
'ADC1_2': 'ADC12',
'ADC3_4': 'ADC34',
'Flash': 'FLASH'
}
# ------------------------------------
base_line = "{0:<30} EQU {1:#x}"
reg_line = "{0:<30} EQU ({1}_BASE + {2:#x})"
field_line = "{0:<30} EQU {1:#010x}"
field_ofs_line = "{0:<30} EQU {1:#d}"
field_len_line = field_ofs_line
def comment_str(x):
if x is None:
return ''
return '; %s' % re.sub(r"[\s\n]+", ' ', x.replace('\n',' '))
def comment(x):
print(comment_str(x))
def banner(x):
comment('==== {:=<55}'.format("%s " % x))
def caption(x):
print()
comment('---- {:-<55}'.format("%s " % x))
def comment(x):
print(comment_str(x))
# ------------------------------------
parser = SVDParser.for_packaged_svd('STMicro', svd_name)
device = parser.get_device()
print()
banner('%s PERIPHERALS' % device.name)
comment('')
comment('CTU Prague, FEL, Department of Measurement')
comment('')
comment('-' * 60)
comment('')
comment('Generated from "%s"' % svd_name)
comment('')
comment('SVD parsing library (c) Paul Osborne, 2015-2016')
comment(' https://github.com/posborne/cmsis-svd')
comment('ASM building script (c) Ondrej Hruska, 2016')
comment('')
comment('=' * 60)
print()
# periph registers
def print_registers(peripheral, pname=None):
if pname is None:
pname = periph_rename.get(peripheral.name, peripheral.name)
for register in peripheral.registers:
print(reg_line.format("%s_%s" % (pname, register.name), pname, register.address_offset), end=' ')
comment(register.description)
# periph fields
def print_fields(peripheral, pname=None):
if pname is None:
pname = periph_rename.get(peripheral.name, peripheral.name)
for register in peripheral.registers:
print()
comment('%s_%s fields:' % (pname, register.name))
print()
for field in register.fields:
mask = ((1 << field.bit_width) - 1) << field.bit_offset
f_pname = periph_rename_for_field.get(pname, pname)
print(field_line.format("%s_%s_%s" % (f_pname, register.name, field.name), mask), end=' ')
comment(field.description)
if want_ofs:
print(field_ofs_line.format("%s_%s_%s_ofs" % (f_pname, register.name, field.name), field.bit_offset))
if want_len:
print(field_len_line.format("%s_%s_%s_len" % (f_pname, register.name, field.name), field.bit_width))
print()
# Print the list
periph_dict = {}
for peripheral in device.peripherals:
periph_name = periph_rename.get(peripheral.name, peripheral.name)
# add to a dict for referencing by name
periph_dict[periph_name] = peripheral
# -----
caption(periph_name)
comment('Desc: %s' % peripheral.description)
print()
comment('%s base address:' % periph_name)
print(base_line.format("%s_BASE" % periph_name, peripheral.base_address))
print()
comment('%s registers:' % periph_name)
print()
# Registers
if periph_name in same_regs_as:
print_registers(periph_dict[same_regs_as[periph_name]], pname=periph_name)
else:
print_registers(peripheral)
if periph_name in no_print_fields:
comment('Fields the same as in the first instance.')
continue
# Fields
if periph_name in same_regs_as:
print_fields(periph_dict[same_regs_as[periph_name]], pname=periph_name)
else:
print_fields(peripheral)
print(' END\n')
| mit | -8,582,438,179,664,634,000 | 19.951456 | 105 | 0.63114 | false |
biocore/pyqi | pyqi/interfaces/optparse/config/make_command.py | 1 | 2842 | #!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, The BiPy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
__credits__ = ["Daniel McDonald", "Greg Caporaso", "Doug Wendel",
"Jai Ram Rideout"]
from pyqi.core.interfaces.optparse import (OptparseOption,
OptparseResult,
OptparseUsageExample)
from pyqi.core.interfaces.optparse.input_handler import string_list_handler
from pyqi.core.interfaces.optparse.output_handler import write_list_of_strings
from pyqi.core.command import (make_command_in_collection_lookup_f,
make_command_out_collection_lookup_f)
from pyqi.commands.make_command import CommandConstructor
cmd_in_lookup = make_command_in_collection_lookup_f(CommandConstructor)
cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
usage_examples = [
OptparseUsageExample(ShortDesc="Basic Command",
LongDesc="Create a basic Command with appropriate attribution",
Ex='%prog -n example -a "some author" -c "Copyright 2013, The pyqi project" -e "[email protected]" -l BSD --command-version "0.1" --credits "someone else","and another person" -o example.py')
]
inputs = [
OptparseOption(Parameter=cmd_in_lookup('name'),
ShortName='n'),
OptparseOption(Parameter=cmd_in_lookup('author'),
ShortName='a'),
OptparseOption(Parameter=cmd_in_lookup('email'),
ShortName='e'),
OptparseOption(Parameter=cmd_in_lookup('license'),
ShortName='l'),
OptparseOption(Parameter=cmd_in_lookup('copyright'),
ShortName='c'),
OptparseOption(Parameter=cmd_in_lookup('version'), Name='command-version'),
OptparseOption(Parameter=cmd_in_lookup('credits'),
Handler=string_list_handler,
Help='comma-separated list of other authors'),
OptparseOption(Parameter=cmd_in_lookup('test_code'),
Type=None, Action='store_true'),
OptparseOption(Parameter=None,
Type='new_filepath',
ShortName='o',
Name='output-fp',
Required=True,
Help='output filepath to store generated Python code')
]
outputs = [
### InputName is used to tie this output to output-fp, which is an input...
OptparseResult(Parameter=cmd_out_lookup('result'),
Handler=write_list_of_strings,
InputName='output-fp')
]
| bsd-3-clause | 3,186,206,807,258,815,000 | 44.83871 | 210 | 0.587262 | false |
alfredodeza/remoto | remoto/backends/__init__.py | 1 | 11528 | import inspect
import json
import socket
import sys
import execnet
import logging
from remoto.process import check
class BaseConnection(object):
"""
Base class for Connection objects. Provides a generic interface to execnet
for setting up the connection
"""
executable = ''
remote_import_system = 'legacy'
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True,
detect_sudo=False, use_ssh=False, interpreter=None, ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None # wait for ever
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because "
"%s is not installed there" % self.interpreter
)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(
self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(
self._make_connection_string(self.hostname, use_sudo=False)
)
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())'
)
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (
self.ssh_options, hostname, interpreter
)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.logger,
python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send("%s(%s)" % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
# Error will come as a string of a traceback, remove everything
# up to the actual exception since we do get garbage otherwise
# that points to non-existent lines in the compiled code
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(args))
else:
source = self._module_source + dump_template % (name, '()')
# check python interpreter
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))
if not out:
if not err:
err = [
'Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name
]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' % ('\n'.join(out), '\n'.join(err)))
# at this point, there was no stdout, and the exit code was 0,
# we must return so that we don't fail trying to serialize back
# the JSON
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
def get_python_executable(conn):
"""
Try to determine the remote Python version so that it can be used
when executing. Avoids the problem of different Python versions, or distros
that do not use ``python`` but do ``python3``
"""
# executables in order of preference:
executables = ['python3', 'python', 'python2.7']
for executable in executables:
conn.logger.debug('trying to determine remote python executable with %s' % executable)
out, err, code = check(conn, ['which', executable])
if code:
conn.logger.warning('skipping %s, was not found in path' % executable)
else:
try:
return out[0].strip()
except IndexError:
conn.logger.warning('could not parse stdout: %s' % out)
# if all fails, we just return whatever the main connection had
conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter)
return conn.interpreter
| mit | -5,670,547,759,678,942,000 | 34.470769 | 111 | 0.581974 | false |
fergalmoran/robotopro | promotions/mixins.py | 1 | 1179 | import json
from django.http import HttpResponse
class AjaxableResponseMixin(object):
"""
Mixin to add AJAX support to a form.
Must be used with an object-based FormView (e.g. CreateView)
"""
def render_to_json_response(self, context, **response_kwargs):
data = json.dumps(context)
response_kwargs['content_type'] = 'application/json'
return HttpResponse(data, **response_kwargs)
def form_invalid(self, form):
response = super(AjaxableResponseMixin, self).form_invalid(form)
if self.request.is_ajax():
return self.render_to_json_response(form.errors, status=400)
else:
return response
def form_valid(self, form):
# We make sure to call the parent's form_valid() method because
# it might do some processing (in the case of CreateView, it will
# call form.save() for example).
response = super(AjaxableResponseMixin, self).form_valid(form)
if self.request.is_ajax():
data = {
'pk': self.object.pk,
}
return self.render_to_json_response(data)
else:
return response
| apache-2.0 | -6,176,625,241,103,926,000 | 34.727273 | 73 | 0.622561 | false |
melqkiades/yelp | source/python/perfomancetest/context_recommender_tests.py | 1 | 16102 | import cPickle as pickle
import copy
import time
import itertools
import numpy
from etl import ETLUtils
from evaluation import precision_in_top_n
from recommenders.context.baseline.simple_user_baseline_calculator import \
SimpleUserBaselineCalculator
from recommenders.context.baseline.user_baseline_calculator import \
UserBaselineCalculator
from recommenders.context.contextual_knn import ContextualKNN
from recommenders.context.neighbour_contribution.neighbour_contribution_calculator import \
NeighbourContributionCalculator
from recommenders.context.neighbour_contribution.context_nc_calculator import \
ContextNCCalculator
from recommenders.context.neighbourhood.context_neighbourhood_calculator import \
ContextNeighbourhoodCalculator
from recommenders.context.neighbourhood.context_hybrid_neighbourhood_calculator import \
ContextHybridNeighbourhoodCalculator
from recommenders.context.neighbourhood.simple_neighbourhood_calculator import \
SimpleNeighbourhoodCalculator
from recommenders.context.similarity.cbc_similarity_calculator import \
CBCSimilarityCalculator
from recommenders.context.similarity.cosine_similarity_calculator import \
CosineSimilarityCalculator
from recommenders.context.similarity.pbc_similarity_calculator import \
PBCSimilarityCalculator
from recommenders.context.similarity.pearson_similarity_calculator import \
PearsonSimilarityCalculator
from topicmodeling.context import lda_context_utils
from topicmodeling.context.lda_based_context import LdaBasedContext
from tripadvisor.fourcity import recommender_evaluator
from tripadvisor.fourcity import extractor
__author__ = 'fpena'
RMSE_HEADERS = [
'dataset',
'cache_reviews',
'num_records',
'reviews_type',
'cross_validation_folds',
'RMSE',
'MAE',
'coverage',
'time',
'name',
'neighbourhood_calculator',
'neighbour_contribution_calculator',
'user_baseline_calculator',
'user_similarity_calculator',
'num_neighbours',
'num_topics',
'threshold1',
'threshold2',
'threshold3',
'threshold4'
]
TOPN_HEADERS = [
'dataset',
'cache_reviews',
'num_records',
'reviews_type',
'cross_validation_folds',
'min_like_score',
'top_n',
'recall',
'coverage',
'time',
'name',
'neighbourhood_calculator',
'neighbour_contribution_calculator',
'user_baseline_calculator',
'user_similarity_calculator',
'num_neighbours',
'num_topics',
'threshold1',
'threshold2',
'threshold3',
'threshold4'
]
def get_knn_recommender_info(recommender):
recommender_name = recommender.__class__.__name__
nc_name = recommender.neighbourhood_calculator.__class__.__name__
ncc_name = recommender.neighbour_contribution_calculator.__class__.__name__
ubc_name = recommender.user_baseline_calculator.__class__.__name__
usc_name = recommender.user_similarity_calculator.__class__.__name__
recommender_info_map = {}
recommender_info_map['name'] = recommender_name
recommender_info_map['neighbourhood_calculator'] = nc_name
recommender_info_map['neighbour_contribution_calculator'] = ncc_name
recommender_info_map['user_baseline_calculator'] = ubc_name
recommender_info_map['user_similarity_calculator'] = usc_name
recommender_info_map['num_neighbours'] = recommender.num_neighbours
recommender_info_map['num_topics'] = recommender.num_topics
recommender_info_map['threshold1'] = recommender.threshold1
recommender_info_map['threshold2'] = recommender.threshold2
recommender_info_map['threshold3'] = recommender.threshold3
recommender_info_map['threshold4'] = recommender.threshold4
return recommender_info_map
def load_records(json_file):
records = ETLUtils.load_json_file(json_file)
fields = ['user_id', 'business_id', 'stars', 'text']
records = ETLUtils.select_fields(fields, records)
# We rename the 'stars' field to 'overall_rating' to take advantage of the
# function extractor.get_user_average_overall_rating
for record in records:
record['overall_rating'] = record.pop('stars')
record['offering_id'] = record.pop('business_id')
return records
def run_rmse_test(
records_file, recommenders, binary_reviews_file, reviews_type=None):
records = load_records(records_file)
# records = extractor.remove_users_with_low_reviews(records, 2)
with open(binary_reviews_file, 'rb') as read_file:
binary_reviews = pickle.load(read_file)
if len(records) != len(binary_reviews):
raise ValueError("The records and reviews should have the same length")
num_folds = 5
dataset_info_map = {}
dataset_info_map['dataset'] = records_file.split('/')[-1]
dataset_info_map['cache_reviews'] = binary_reviews_file.split('/')[-1]
dataset_info_map['num_records'] = len(records)
dataset_info_map['reviews_type'] = reviews_type
dataset_info_map['cross_validation_folds'] = num_folds
results_list = []
results_log_list = []
count = 0
print('Total recommenders: %d' % (len(recommenders)))
for recommender in recommenders:
print('\n**************\n%d/%d\n**************' %
(count, len(recommenders)))
results = recommender_evaluator.perform_cross_validation(
records, recommender, num_folds, binary_reviews, reviews_type)
results_list.append(results)
remaining_time = results['Execution time'] * (len(recommenders) - count)
remaining_time /= 3600
print('Estimated remaining time: %.2f hours' % remaining_time)
count += 1
for recommender, results in zip(recommenders, results_list):
results_log_list.append(process_rmse_results(recommender, results, dataset_info_map))
timestamp = time.strftime("%Y%m%d-%H%M%S")
file_name = 'recommender-rmse-results' + timestamp
ETLUtils.save_csv_file(file_name + '.csv', results_log_list, RMSE_HEADERS, '\t')
def run_top_n_test(
records_file, recommenders, binary_reviews_file, reviews_type=None):
records = load_records(records_file)
# records = extractor.remove_users_with_low_reviews(records, 2)
with open(binary_reviews_file, 'rb') as read_file:
binary_reviews = pickle.load(read_file)
if len(records) != len(binary_reviews):
raise ValueError("The records and reviews should have the same length")
num_folds = 5
split = 0.986
min_like_score = 5.0
top_n = 10
dataset_info_map = {}
dataset_info_map['dataset'] = records_file.split('/')[-1]
dataset_info_map['cache_reviews'] = binary_reviews_file.split('/')[-1]
dataset_info_map['num_records'] = len(records)
dataset_info_map['reviews_type'] = reviews_type
dataset_info_map['cross_validation_folds'] = num_folds
dataset_info_map['min_like_score'] = min_like_score
dataset_info_map['top_n'] = top_n
results_list = []
results_log_list = []
count = 0
print('Total recommenders: %d' % (len(recommenders)))
for recommender in recommenders:
print('\n**************\nProgress: %d/%d\n**************' %
(count, len(recommenders)))
print(get_knn_recommender_info(recommender))
results = precision_in_top_n.calculate_recall_in_top_n(
records, recommender, top_n, num_folds, split, min_like_score,
binary_reviews, reviews_type)
results_list.append(results)
remaining_time = results['Execution time'] * (len(recommenders) - count)
remaining_time /= 3600
print('Estimated remaining time: %.2f hours' % remaining_time)
count += 1
for recommender, results in zip(recommenders, results_list):
results_log_list.append(process_topn_results(recommender, results, dataset_info_map))
timestamp = time.strftime("%Y%m%d-%H%M%S")
file_name = 'recommender-topn-results' + timestamp
ETLUtils.save_csv_file(file_name + '.csv', results_log_list, TOPN_HEADERS, '\t')
def process_rmse_results(recommender, results, dataset_info):
log = dataset_info.copy()
log.update(get_knn_recommender_info(recommender))
log['MAE'] = results['MAE']
log['RMSE'] = results['RMSE']
log['coverage'] = results['Coverage']
log['time'] = results['Execution time']
return log
def process_topn_results(recommender, results, dataset_info):
log = dataset_info.copy()
log.update(get_knn_recommender_info(recommender))
log['recall'] = results['Top N']
log['coverage'] = results['Coverage']
log['time'] = results['Execution time']
return log
def combine_recommenders(
neighbourhood_calculators,
neighbour_contribution_calculators,
baseline_calculators,
similarity_calculators,
num_neighbours_list,
thresholds,
num_topics_list):
combined_recommenders = []
for neighbourhood_calculator,\
neighbour_contribution_calculator,\
baseline_calculator,\
similarity_calculator,\
num_neighbours,\
threshold,\
num_topics\
in itertools.product(
neighbourhood_calculators,
neighbour_contribution_calculators,
baseline_calculators,
similarity_calculators,
num_neighbours_list,
thresholds,
num_topics_list):
recommender = ContextualKNN(
None, None, None, None, None, has_context=True)
recommender.neighbourhood_calculator = neighbourhood_calculator
recommender.neighbour_contribution_calculator =\
neighbour_contribution_calculator
recommender.user_baseline_calculator = baseline_calculator
recommender.user_similarity_calculator = similarity_calculator
recommender.num_neighbours = num_neighbours
recommender.threshold1 = threshold
recommender.threshold2 = threshold
recommender.threshold3 = threshold
recommender.threshold4 = threshold
recommender.num_topics = num_topics
combined_recommenders.append(recommender)
return combined_recommenders
def get_recommenders_set():
# nc = ContextNeighbourhoodCalculator()
# ncc = NeighbourContributionCalculator()
# ubc = UserBaselineCalculator()
# usc = PBCSimilarityCalculator()
# cosine_usc = CBCSimilarityCalculator()
# Similarity calculators
cosine_sc = CosineSimilarityCalculator()
pearson_sc = PearsonSimilarityCalculator()
pbc_sc = PBCSimilarityCalculator()
cbu_sc = CBCSimilarityCalculator()
similarity_calculators = [
cosine_sc,
pearson_sc,
pbc_sc,
cbu_sc
]
# Neighbourhood calculators
simple_nc = SimpleNeighbourhoodCalculator(copy.deepcopy(pearson_sc))
context_nc = ContextNeighbourhoodCalculator()
# hybrid_nc0 = ContextHybridNeighbourhoodCalculator(copy.deepcopy(pearson_sc))
# hybrid_nc0.weight = 0.0
hybrid_nc02 = ContextHybridNeighbourhoodCalculator(copy.deepcopy(pearson_sc))
hybrid_nc02.weight = 0.2
hybrid_nc05 = ContextHybridNeighbourhoodCalculator(copy.deepcopy(pearson_sc))
hybrid_nc05.weight = 0.5
hybrid_nc08 = ContextHybridNeighbourhoodCalculator(copy.deepcopy(pearson_sc))
hybrid_nc08.weight = 0.8
# hybrid_nc1 = ContextHybridNeighbourhoodCalculator(copy.deepcopy(pearson_sc))
# hybrid_nc1.weight = 1.0
neighbourhood_calculators = [
simple_nc,
context_nc,
# hybrid_nc0,
# hybrid_nc02,
hybrid_nc05,
# hybrid_nc08,
# hybrid_nc1
]
# Baseline calculators
simple_ubc = SimpleUserBaselineCalculator()
ubc = UserBaselineCalculator()
baseline_calculators = [
ubc,
simple_ubc
]
# Neighbour contribution calculators
ncc = NeighbourContributionCalculator()
context_ncc = ContextNCCalculator()
neighbour_contribution_calculators = [
ncc,
# context_ncc
]
num_topics = 150
# num_neighbours = None
numpy.random.seed(0)
basic_cosine_knn = ContextualKNN(num_topics, simple_nc, ncc, simple_ubc, cosine_sc, has_context=False)
basic_pearson_knn = ContextualKNN(num_topics, simple_nc, ncc, simple_ubc, pearson_sc, has_context=False)
contextual_knn = ContextualKNN(num_topics, context_nc, ncc, ubc, pbc_sc, has_context=True)
# get_knn_recommender_info(contextual_knn1)
# ocelma_recommender = OcelmaRecommender()
recommenders = [
# basic_cosine_knn,
# basic_pearson_knn,
contextual_knn
# ocelma_recommender
]
num_neighbours_list = [None]
# num_neighbours_list = [None, 3, 6, 10, 15, 20]
threshold_list = [0.0, 0.5, 0.9]
# threshold_list = [0.0]
# num_topics_list = [10, 50, 150, 300, 500]
num_topics_list = [150]
# combined_recommenders = []
# for recommender, num_neighbours in itertools.product(recommenders, num_neighbours_list):
# new_recommender = copy.deepcopy(recommender)
# new_recommender.num_neighbours = num_neighbours
# combined_recommenders.append(new_recommender)
# threshold_list = [None]
#
# combined_recommenders = []
# for recommender, threshold in itertools.product(recommenders, threshold_list):
# new_recommender = copy.deepcopy(recommender)
# new_recommender.threshold1 = threshold
# new_recommender.threshold2 = threshold
# new_recommender.threshold3 = threshold
# new_recommender.threshold4 = threshold
# combined_recommenders.append(new_recommender)
# num_threshold_list = [0.2, 0.5, 0.7]
combined_recommenders = combine_recommenders(
neighbourhood_calculators,
neighbour_contribution_calculators,
baseline_calculators,
similarity_calculators,
num_neighbours_list,
threshold_list,
num_topics_list
)
baseline_recommender = ContextualKNN(num_topics, simple_nc, ncc, simple_ubc, pearson_sc, has_context=True)
best_recommender = ContextualKNN(num_topics, hybrid_nc05, ncc, simple_ubc, pbc_sc, has_context=True)
# best_recommender = ContextualKNN(num_topics, simple_nc, ncc, ubc, cosine_sc, has_context=True)
best_recommender.threshold1 = 0.9
best_recommender.threshold2 = 0.9
best_recommender.threshold3 = 0.9
best_recommender.threshold4 = 0.9
my_recommenders = [
# baseline_recommender,
best_recommender
]
return my_recommenders
# return combined_recommenders
def main():
print('Process start: %s' % time.strftime("%Y/%d/%m-%H:%M:%S"))
folder = '/Users/fpena/UCC/Thesis/datasets/context/'
my_records_file = folder + 'yelp_training_set_review_hotels_shuffled.json'
# my_records_file = folder + 'yelp_training_set_review_restaurants_shuffled.json'
# my_records_file = folder + 'yelp_training_set_review_spas_shuffled.json'
# my_binary_reviews_file = folder + 'reviews_restaurant_shuffled.pkl'
my_binary_reviews_file = folder + 'reviews_hotel_shuffled.pkl'
# my_binary_reviews_file = folder + 'reviews_restaurant_shuffled_20.pkl'
# my_binary_reviews_file = folder + 'reviews_spa_shuffled_2.pkl'
# my_binary_reviews_file = folder + 'reviews_context_hotel_2.pkl'
combined_recommenders = get_recommenders_set()
run_rmse_test(my_records_file, combined_recommenders, my_binary_reviews_file)
# run_top_n_test(my_records_file, combined_recommenders, my_binary_reviews_file)
# run_rmse_test(my_records_file, combined_recommenders[47:], my_binary_reviews_file, 'specific')
# run_top_n_test(my_records_file, combined_recommenders, my_binary_reviews_file, 'specific')
# run_rmse_test(my_records_file, combined_recommenders[47:], my_binary_reviews_file, 'generic')
# run_top_n_test(my_records_file, combined_recommenders, my_binary_reviews_file, 'generic')
start = time.time()
main()
end = time.time()
total_time = end - start
# print("Total time = %f seconds" % total_time)
| lgpl-2.1 | 8,918,031,726,753,243,000 | 34.234136 | 110 | 0.679232 | false |
pyfidelity/zfsbackup | zfsbackup/zfs.py | 1 | 6372 | #
# Copyright (c) 2010, Mij <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and
# the following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# See http://mij.oltrelinux.com/devel/zfsbackup/
# Bitch to [email protected]
#
# module zfs
import os
import subprocess
ZFS_DEFAULT_SNAPSHOT_DIR='/.zfs/snapshot'
def pass_zfs_pool(f):
"""Decorator to pass the appropriate ZFS pool parameter at runtime, if none specified.
Calls f(original args, zpool=value)."""
def _decorator(*args, **kwargs):
if 'zpool' not in kwargs.keys() or not kwargs['zpool']:
# default to first zpool
kwargs.update({'zpool': get_default_pool()})
return f(*args, **kwargs)
return _decorator
def get_pools():
"""Return a list of ZFS pools available on the system"""
command = 'zpool list -H'
try:
p = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)
except OSError:
raise Exception('No ZFS tools found!')
zpout, zperr = p.communicate()
if p.returncode:
raise Exception("Error executing '%s': %d" % (command, p.returncode))
return [line.split('\t', 1)[0] for line in zpout.split('\n') if line]
def get_default_pool():
"""Return the primary ZFS pool configured in the system"""
return os.environ.get('ZFS_POOL', get_pools()[0])
@pass_zfs_pool
def get_datasets(zpool=None, strip_poolname=True):
"""Return a list of ZFS datasets available in a specific pool, or in all.
The root dataset is returned as an empty string."""
if zpool and zpool not in get_pools():
raise Exception("Pool '%s' is not available on this system!" % zpool)
command = 'zfs list -t filesystem -H'
try:
p = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)
except OSError:
raise Exception("zfs not found. Cannot execute '%s'" % command)
zfsout, zfserr = p.communicate()
if p.returncode:
print "Error executing '%s': %d" % (command, p.returncode)
return []
datasets = []
for line in zfsout.split('\n'):
dsname = line.split('\t', 1)[0]
if not dsname: continue
dspool, sep, mountpoint = dsname.partition('/')
if zpool and dspool != zpool:
continue
if strip_poolname:
# produce '/my/mountpoint' for children and '' for root dataset
datasets.append(sep + mountpoint)
else:
datasets.append(dsname)
return datasets
@pass_zfs_pool
def destroy_snapshot(snapname, dataset='', recursive=True, zpool=None):
"""Remove a snapshot, from root or in a specific dataset.
If dataset is not specified, the snapshot is destroyed from the root.
If a zpool is specified, remove from there; else remove from the default zpool."""
fullsnapname = "%s%s@%s" % (zpool, dataset, snapname)
print "Destroying snapshot '%s'" % fullsnapname
if recursive:
command = 'zfs destroy -r %s' % fullsnapname
else:
command = 'zfs destroy %s' % fullsnapname
#print "Exec '%s'" % command
assert command.find('@') != -1 # we are not destroying datasets, only snapshots
p = subprocess.Popen(command.split(' '))
p.wait()
if p.returncode != 0 and p.returncode != 1: # 1 = snapshot did not exist. We can stand that
raise Exception("Error executing '%s': %d" % (command, p.returncode))
@pass_zfs_pool
def take_snapshot(snapname, restrictdatasets=None, nodatasets=None, recursive=True, zpool=None):
"""Take a recursive snapshot with the given name, possibly excluding some datasets.
restrictdatasets and nodatasets are optional lists of datasets to include or exclude
from the recursive snapshot."""
# take recursive snapshot of all datasets...
fullsnapname = '%s@%s' % (zpool, snapname)
print "Taking snapshot '%s'" % fullsnapname
if restrictdatasets:
restrictdatasets = [ds.rstrip('/') for ds in restrictdatasets]
print "Restricting to:", str(restrictdatasets)
print "Excluding:", str(nodatasets)
if recursive:
command = 'zfs snapshot -r %s' % fullsnapname
else:
command = 'zfs snapshot %s' % fullsnapname
#print "Exec '%s'" % command
p = subprocess.Popen(command.split(' '))
p.wait()
if p.returncode:
raise Exception("Error executing '%s': %d" % (command, p.returncode))
# ... then prune away undesired datasets if necessary
if restrictdatasets:
# remove whatever is not required, under ours
for ds in get_datasets():
# do not remove /usr/foo if there is any wanted dataset starting with /usr
if not filter(lambda x: ds.startswith(x), restrictdatasets):
destroy_snapshot(snapname, ds, recursive=False)
if nodatasets:
# remove whatever is explicitly excluded
for ds in get_datasets():
if ds in nodatasets:
destroy_snapshot(snapname, dataset=ds, recursive=True)
def get_snapshots(dataset=''):
"""Return the list of snapshots order by increasing timestamp"""
# filter my tags
return os.listdir(dataset + ZFS_DEFAULT_SNAPSHOT_DIR)
| bsd-2-clause | 4,368,549,535,226,458,600 | 39.075472 | 96 | 0.672003 | false |
nikodtbVf/aima-si | games.py | 1 | 13492 | """Games, or Adversarial Search (Chapter 5)"""
from collections import namedtuple
import random
from utils import argmax
from canvas import Canvas
infinity = float('inf')
GameState = namedtuple('GameState', 'to_move, utility, board, moves')
# ______________________________________________________________________________
# Minimax Search
def minimax_decision(state, game):
"""Given a state in a game, calculate the best move by searching
forward all the way to the terminal states. [Figure 5.3]"""
player = game.to_move(state)
def max_value(state):
if game.terminal_test(state):
return game.utility(state, player)
v = -infinity
for a in game.actions(state):
v = max(v, min_value(game.result(state, a)))
return v
def min_value(state):
if game.terminal_test(state):
return game.utility(state, player)
v = infinity
for a in game.actions(state):
v = min(v, max_value(game.result(state, a)))
return v
# Body of minimax_decision:
return argmax(game.actions(state),
key=lambda a: min_value(game.result(state, a)))
# ______________________________________________________________________________
def alphabeta_full_search(state, game):
"""Search game to determine best action; use alpha-beta pruning.
As in [Figure 5.7], this version searches all the way to the leaves."""
player = game.to_move(state)
# Functions used by alphabeta
def max_value(state, alpha, beta):
if game.terminal_test(state):
return game.utility(state, player)
v = -infinity
for a in game.actions(state):
v = max(v, min_value(game.result(state, a), alpha, beta))
if v >= beta:
return v
alpha = max(alpha, v)
return v
def min_value(state, alpha, beta):
if game.terminal_test(state):
return game.utility(state, player)
v = infinity
for a in game.actions(state):
v = min(v, max_value(game.result(state, a), alpha, beta))
if v <= alpha:
return v
beta = min(beta, v)
return v
# Body of alphabeta_search:
best_score = -infinity
beta = infinity
best_action = None
for a in game.actions(state):
v = min_value(game.result(state, a), best_score, beta)
if v > best_score:
best_score = v
best_action = a
return best_action
def alphabeta_search(state, game, d=4, cutoff_test=None, eval_fn=None):
"""Search game to determine best action; use alpha-beta pruning.
This version cuts off search and uses an evaluation function."""
player = game.to_move(state)
# Functions used by alphabeta
def max_value(state, alpha, beta, depth):
if cutoff_test(state, depth):
return eval_fn(state)
v = -infinity
for a in game.actions(state):
v = max(v, min_value(game.result(state, a),
alpha, beta, depth + 1))
if v >= beta:
return v
alpha = max(alpha, v)
return v
def min_value(state, alpha, beta, depth):
if cutoff_test(state, depth):
return eval_fn(state)
v = infinity
for a in game.actions(state):
v = min(v, max_value(game.result(state, a),
alpha, beta, depth + 1))
if v <= alpha:
return v
beta = min(beta, v)
return v
# Body of alphabeta_search starts here:
# The default test cuts off at depth d or at a terminal state
cutoff_test = (cutoff_test or
(lambda state, depth: depth > d or
game.terminal_test(state)))
eval_fn = eval_fn or (lambda state: game.utility(state, player))
best_score = -infinity
beta = infinity
best_action = None
for a in game.actions(state):
v = min_value(game.result(state, a), best_score, beta, 1)
if v > best_score:
best_score = v
best_action = a
return best_action
# ______________________________________________________________________________
# Players for Games
def query_player(game, state):
"Make a move by querying standard input."
move_string = input('Your move? ')
try:
move = eval(move_string)
except NameError:
move = move_string
return move
def random_player(game, state):
"A player that chooses a legal move at random."
return random.choice(game.actions(state))
def alphabeta_player(game, state):
return alphabeta_full_search(state, game)
def play_game(game, *players):
"""Play an n-person, move-alternating game."""
state = game.initial
while True:
for player in players:
game.display(state)
move = player(game, state)
state = game.result(state, move)
if game.terminal_test(state):
game.display(state)
return game.utility(state, game.to_move(game.initial))
# ______________________________________________________________________________
# Some Sample Games
class Game:
"""A game is similar to a problem, but it has a utility for each
state and a terminal test instead of a path cost and a goal
test. To create a game, subclass this class and implement actions,
result, utility, and terminal_test. You may override display and
successors or you can inherit their default methods. You will also
need to set the .initial attribute to the initial state; this can
be done in the constructor."""
def actions(self, state):
"Return a list of the allowable moves at this point."
raise NotImplementedError
def result(self, state, move):
"Return the state that results from making a move from a state."
raise NotImplementedError
def utility(self, state, player):
"Return the value of this final state to player."
raise NotImplementedError
def terminal_test(self, state):
"Return True if this is a final state for the game."
return not self.actions(state)
def to_move(self, state):
"Return the player whose move it is in this state."
return state.to_move
def display(self, state):
"Print or otherwise display the state."
print(state)
def __repr__(self):
return '<%s>' % self.__class__.__name__
class Fig52Game(Game):
"""The game represented in [Figure 5.2]. Serves as a simple test case."""
succs = dict(A=dict(a1='B', a2='C', a3='D'),
B=dict(b1='B1', b2='B2', b3='B3'),
C=dict(c1='C1', c2='C2', c3='C3'),
D=dict(d1='D1', d2='D2', d3='D3'))
utils = dict(B1=3, B2=12, B3=8, C1=2, C2=4, C3=6, D1=14, D2=5, D3=2)
initial = 'A'
def actions(self, state):
return list(self.succs.get(state, {}).keys())
def result(self, state, move):
return self.succs[state][move]
def utility(self, state, player):
if player == 'MAX':
return self.utils[state]
else:
return -self.utils[state]
def terminal_test(self, state):
return state not in ('A', 'B', 'C', 'D')
def to_move(self, state):
return 'MIN' if state in 'BCD' else 'MAX'
class TicTacToe(Game):
"""Play TicTacToe on an h x v board, with Max (first player) playing 'X'.
A state has the player to move, a cached utility, a list of moves in
the form of a list of (x, y) positions, and a board, in the form of
a dict of {(x, y): Player} entries, where Player is 'X' or 'O'."""
def __init__(self, h=3, v=3, k=3):
self.h = h
self.v = v
self.k = k
moves = [(x, y) for x in range(1, h + 1)
for y in range(1, v + 1)]
self.initial = GameState(to_move='X', utility=0, board={}, moves=moves)
def actions(self, state):
"Legal moves are any square not yet taken."
return state.moves
def result(self, state, move):
if move not in state.moves:
return state # Illegal move has no effect
board = state.board.copy()
board[move] = state.to_move
moves = list(state.moves)
moves.remove(move)
return GameState(to_move=('O' if state.to_move == 'X' else 'X'),
utility=self.compute_utility(board, move, state.to_move),
board=board, moves=moves)
def utility(self, state, player):
"Return the value to player; 1 for win, -1 for loss, 0 otherwise."
return state.utility if player == 'X' else -state.utility
def terminal_test(self, state):
"A state is terminal if it is won or there are no empty squares."
return state.utility != 0 or len(state.moves) == 0
def display(self, state):
board = state.board
for x in range(1, self.h + 1):
for y in range(1, self.v + 1):
print(board.get((x, y), '.'), end=' ')
print()
def compute_utility(self, board, move, player):
"If 'X' wins with this move, return 1; if 'O' wins return -1; else return 0."
if (self.k_in_row(board, move, player, (0, 1)) or
self.k_in_row(board, move, player, (1, 0)) or
self.k_in_row(board, move, player, (1, -1)) or
self.k_in_row(board, move, player, (1, 1))):
return +1 if player == 'X' else -1
else:
return 0
def k_in_row(self, board, move, player, delta_x_y):
"Return true if there is a line through move on board for player."
(delta_x, delta_y) = delta_x_y
x, y = move
n = 0 # n is number of moves in row
while board.get((x, y)) == player:
n += 1
x, y = x + delta_x, y + delta_y
x, y = move
while board.get((x, y)) == player:
n += 1
x, y = x - delta_x, y - delta_y
n -= 1 # Because we counted move itself twice
return n >= self.k
class ConnectFour(TicTacToe):
"""A TicTacToe-like game in which you can only make a move on the bottom
row, or in a square directly above an occupied square. Traditionally
played on a 7x6 board and requiring 4 in a row."""
def __init__(self, h=7, v=6, k=4):
TicTacToe.__init__(self, h, v, k)
def actions(self, state):
return [(x, y) for (x, y) in state.moves
if y == 1 or (x, y - 1) in state.board]
class Canvas_TicTacToe(Canvas):
"""Play a 3x3 TicTacToe game on HTML canvas
TODO: Add restart button
"""
def __init__(self, varname, player_1='human', player_2='random', id=None, width=300, height=300):
valid_players = ('human', 'random', 'alphabeta')
if player_1 not in valid_players or player_2 not in valid_players:
raise TypeError("Players must be one of {}".format(valid_players))
Canvas.__init__(self, varname, id, width, height)
self.ttt = TicTacToe()
self.state = self.ttt.initial
self.turn = 0
self.strokeWidth(5)
self.players = (player_1, player_2)
self.draw_board()
self.font("Ariel 30px")
def mouse_click(self, x, y):
player = self.players[self.turn]
if self.ttt.terminal_test(self.state):
return
if player == 'human':
x, y = int(3*x/self.width) + 1, int(3*y/self.height) + 1
if (x, y) not in self.ttt.actions(self.state):
# Invalid move
return
move = (x, y)
elif player == 'alphabeta':
move = alphabeta_player(self.ttt, self.state)
else:
move = random_player(self.ttt, self.state)
self.state = self.ttt.result(self.state, move)
self.turn ^= 1
self.draw_board()
def draw_board(self):
self.clear()
self.stroke(0, 0, 0)
offset = 1/20
self.line_n(0 + offset, 1/3, 1 - offset, 1/3)
self.line_n(0 + offset, 2/3, 1 - offset, 2/3)
self.line_n(1/3, 0 + offset, 1/3, 1 - offset)
self.line_n(2/3, 0 + offset, 2/3, 1 - offset)
board = self.state.board
for mark in board:
if board[mark] == 'X':
self.draw_x(mark)
elif board[mark] == 'O':
self.draw_o(mark)
if self.ttt.terminal_test(self.state):
# End game message
utility = self.ttt.utility(self.state, self.ttt.to_move(self.ttt.initial))
if utility == 0:
self.text_n('Game Draw!', 0.1, 0.1)
else:
self.text_n('Player {} wins!'.format(1 if utility > 0 else 2), 0.1, 0.1)
else: # Print which player's turn it is
self.text_n("Player {}'s move({})".format(self.turn+1, self.players[self.turn]), 0.1, 0.1)
self.update()
def draw_x(self, position):
self.stroke(0, 255, 0)
x, y = [i-1 for i in position]
offset = 1/15
self.line_n(x/3 + offset, y/3 + offset, x/3 + 1/3 - offset, y/3 + 1/3 - offset)
self.line_n(x/3 + 1/3 - offset, y/3 + offset, x/3 + offset, y/3 + 1/3 - offset)
def draw_o(self, position):
self.stroke(255, 0, 0)
x, y = [i-1 for i in position]
self.arc_n(x/3 + 1/6, y/3 + 1/6, 1/9, 0, 360)
| mit | -5,513,437,764,375,890,000 | 33.243655 | 102 | 0.549585 | false |
mnjy/critters | CrittersProto/generator/boxworm_algorithm.py | 1 | 1821 | #####################################################################
#
# hummingloop_algorithm.py
#
# Copyright (c) 2015, Nick Benson
# Modifications by benchan
#
# Released under the MIT License (http://opensource.org/licenses/MIT)
#
#####################################################################
import random as r
def choose_notes():
notes = []
pattern = r.choice(PATTERNS)
subpat_dict = {}
# Generate subpattern dictionary
for mapping in pattern[1]:
# Generate subpattern
new_subpat = []
subpat_probs = r.choice(HIT_PROBS)
for i in range(mapping[1]):
if r.random() < subpat_probs[i]:
new_subpat.append(r.choice(HITS))
else:
new_subpat.append(-1)
subpat_dict[mapping[0]] = new_subpat
# Generate notes based on pattern
for char in pattern[0]:
notes += subpat_dict[char]
# Late-pass mutation: Ensure first-note hit
notes[0] = r.choice(HITS)
# Late-pass mutation: Alternate rapid sequence hits
cur_hit = -1
for i in range(len(notes)):
if notes[i] == cur_hit:
notes[i] = ALT_HITS[notes[i]]
cur_hit = notes[i]
print "Notes: " + str(notes)
return notes
# Rhythm patterns
PATTERN_1 = ("ABABABAC", [("A", 8), ("B", 8), ("C", 8)])
PATTERN_2 = ("AABAABAABAAC", [("A", 4), ("B", 8), ("C", 8)])
PATTERN_3 = ("ABABABACC", [("A", 8), ("B", 8), ("C", 4)])
PATTERNS = [PATTERN_1, PATTERN_2, PATTERN_3]
# 16th slot hit probabilities
HIT_PROB_1 = [0.6, 0.4, 0.5, 0.4]*4
HIT_PROB_2 = [0.8, 0.3, 0.7, 0.3]*4
HIT_PROB_3 = [0.3, 0.8, 0.5, 0.6]*4
HIT_PROBS = [HIT_PROB_1, HIT_PROB_2, HIT_PROB_3]
# Possible hits
HITS = [48, 45, 42, 35]
ALT_HITS = {-1:-1, 48:50, 45:47, 42:-1, 35:36}
# Interaction configuration
NOTE_VELOCITY_MULT = 0.5 | mit | -1,726,495,276,093,130,500 | 27.46875 | 69 | 0.533773 | false |
warner/magic-wormhole | src/wormhole/journal.py | 1 | 1179 | from __future__ import absolute_import, print_function, unicode_literals
import contextlib
from zope.interface import implementer
from ._interfaces import IJournal
@implementer(IJournal)
class Journal(object):
def __init__(self, save_checkpoint):
self._save_checkpoint = save_checkpoint
self._outbound_queue = []
self._processing = False
def queue_outbound(self, fn, *args, **kwargs):
assert self._processing
self._outbound_queue.append((fn, args, kwargs))
@contextlib.contextmanager
def process(self):
assert not self._processing
assert not self._outbound_queue
self._processing = True
yield # process inbound messages, change state, queue outbound
self._save_checkpoint()
for (fn, args, kwargs) in self._outbound_queue:
fn(*args, **kwargs)
self._outbound_queue[:] = []
self._processing = False
@implementer(IJournal)
class ImmediateJournal(object):
def __init__(self):
pass
def queue_outbound(self, fn, *args, **kwargs):
fn(*args, **kwargs)
@contextlib.contextmanager
def process(self):
yield
| mit | -7,589,996,619,548,008,000 | 25.795455 | 72 | 0.64207 | false |
jhogsett/linkit | python/simon5.py | 1 | 5584 | #!/usr/bin/python
import serial
import time
import random
import sys
s = None
num_leds = 93
play_time = 0.0
def flush_input():
s.flushInput()
def wait_for_ack():
while s.inWaiting() <= 0:
pass
s.read(s.inWaiting())
def command(cmd_text):
# print cmd_text
s.write((cmd_text + ':').encode())
wait_for_ack()
def setup():
global s, ticks, play_time
s = serial.Serial("/dev/ttyS0", 115200)
flush_input()
choose_colors()
command(":::pau")
command("rst:clr:pau")
if len(sys.argv) > 1:
command(sys.argv[1])
if len(sys.argv) > 2:
play_time = float(sys.argv[2])
command("6:zon:red:7:rep:grn:7:rep:org:7:rep:blu:7:rep")
command("5:zon:red:5:rep:grn:5:rep:org:5:rep:blu:5:rep")
command("4:zon:red:3:rep:grn:3:rep:org:3:rep:blu:3:rep")
command("3:zon:red:2:rep:grn:2:rep:org:2:rep:blu:2:rep")
command("2:zon:red:1:rep:grn:1:rep:org:1:rep:blu:1:rep")
command("1:zon:wht")
num_colors = 12
colors = [ "red", "orange", "yellow", "ltgreen", "green", "seafoam", "cyan", "ltblue", "blue", "purple", "magenta", "pink", "black", "white", "gray", "dkgray" ]
effects = ['blink1','blink2','blink3','blink4','blink5','blink6']
effect_index = 0
chosen_colors = [0,1,2,3,4,5]
def random_color():
r = random.randrange(0, num_colors)
return colors[r]
def choose_colors():
global chosen_colors
for i in range(0, 6):
chosen_colors[i] = random_color()
def shift_colors():
global chosen_colors
for i in xrange(5, 0, -1):
chosen_colors[i] = chosen_colors[i-1]
def clear_colors():
for j in range(0,6):
chosen_colors[j] = "black"
def place_color(zone, color):
command(str(zone) + ":zone:" + color + ":blink" + str(zone) + ":flood")
def place_colors():
place_color(6, chosen_colors[0])
place_color(5, chosen_colors[1])
place_color(4, chosen_colors[2])
place_color(3, chosen_colors[3])
place_color(2, chosen_colors[4])
place_color(1, chosen_colors[5])
def display():
place_colors()
command("flush")
def do_zone(zone):
command(str(zone) + ":zone:rotate")
def do_zones():
for i in range(2, 7):
do_zone(i)
command("flush")
global idx, color, rotation_count, change_count
idx = -1
color = 'pink'
rotation_count = 0
past_colors = ['', '', '', '', '', '', 'red', 'green', 'orange', 'blue']
change_count = 0
def loop():
global idx, rotation_count, color, change_count
do_flush = False
idx = idx + 1
if (idx % 3 == 0):
command("6:zon:" + color)
do_flush = True
if (idx % 4 == 0):
command("5:zon:" + color)
do_flush = True
if (idx % 6 == 0):
command("4:zon:" + color)
do_flush = True
if (idx % 8 == 0):
command("3:zon:" + color)
do_flush = True
if (idx % 12 == 0):
command("2:zon:" + color)
do_flush = True
if do_flush == True:
command("flu")
rotation_count += 1
if(rotation_count == 24):
change_count = (change_count + 1) % 2
if(change_count == 0):
color = "black"
else:
color = random_color()
while(color in past_colors):
color = random_color()
past_colors.pop(0)
past_colors.append(color)
rotation_count = 0
time.sleep(play_time)
if __name__ == '__main__':
setup()
while True:
loop()
| mit | 4,013,220,484,216,460 | 37.777778 | 179 | 0.344914 | false |
h4ck3rm1k3/pywikibot-core | tests/tools_tests.py | 1 | 21092 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Test tools package alone which don't fit into other tests."""
#
# (C) Pywikibot team, 2016
#
# Distributed under the terms of the MIT license.
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import collections
import decimal
import inspect
import os.path
import subprocess
import tempfile
import warnings
from pywikibot import tools
from tests import join_xml_data_path
from tests.aspects import (
unittest, require_modules, DeprecationTestCase, TestCase, MetaTestCaseClass
)
from tests.utils import expected_failure_if, add_metaclass
class ContextManagerWrapperTestCase(TestCase):
"""Test that ContextManagerWrapper is working correctly."""
class DummyClass(object):
"""A dummy class which has some values and a close method."""
class_var = 42
def __init__(self):
"""Create instance with dummy values."""
self.instance_var = 1337
self.closed = False
def close(self):
"""Just store that it has been closed."""
self.closed = True
net = False
def test_wrapper(self):
"""Create a test instance and verify the wrapper redirects."""
obj = self.DummyClass()
wrapped = tools.ContextManagerWrapper(obj)
self.assertIs(wrapped.class_var, obj.class_var)
self.assertIs(wrapped.instance_var, obj.instance_var)
self.assertIs(wrapped._wrapped, obj)
self.assertFalse(obj.closed)
with wrapped as unwrapped:
self.assertFalse(obj.closed)
self.assertIs(unwrapped, obj)
unwrapped.class_var = 47
self.assertTrue(obj.closed)
self.assertEqual(wrapped.class_var, 47)
def test_exec_wrapper(self):
"""Check that the wrapper permits exceptions."""
wrapper = tools.ContextManagerWrapper(self.DummyClass())
self.assertFalse(wrapper.closed)
with self.assertRaises(ZeroDivisionError):
with wrapper:
1 / 0
self.assertTrue(wrapper.closed)
class OpenArchiveTestCase(TestCase):
"""
Unit test class for tools.
The tests for open_archive requires that article-pyrus.xml* contain all
the same content after extraction. The content itself is not important.
The file article-pyrus.xml_invalid.7z is not a valid 7z file and
open_archive will fail extracting it using 7za.
"""
net = False
@classmethod
def setUpClass(cls):
"""Define base_file and original_content."""
super(OpenArchiveTestCase, cls).setUpClass()
cls.base_file = join_xml_data_path('article-pyrus.xml')
with open(cls.base_file, 'rb') as f:
cls.original_content = f.read()
def _get_content(self, *args, **kwargs):
"""Use open_archive and return content using a with-statement."""
with tools.open_archive(*args, **kwargs) as f:
return f.read()
def test_open_archive_normal(self):
"""Test open_archive with no compression in the standard library."""
self.assertEqual(self._get_content(self.base_file), self.original_content)
def test_open_archive_bz2(self):
"""Test open_archive with bz2 compressor in the standard library."""
self.assertEqual(self._get_content(self.base_file + '.bz2'), self.original_content)
self.assertEqual(self._get_content(self.base_file + '.bz2', use_extension=False),
self.original_content)
@require_modules('bz2file')
def test_open_archive_with_bz2file(self):
"""Test open_archive when bz2file library."""
old_bz2 = tools.bz2
try:
tools.bz2 = __import__('bz2file')
self.assertEqual(self._get_content(self.base_file + '.bz2'),
self.original_content)
self.assertEqual(self._get_content(self.base_file + '.bz2',
use_extension=False),
self.original_content)
finally:
tools.bz2 = old_bz2
def test_open_archive_without_bz2(self):
"""Test open_archive when bz2 and bz2file are not available."""
old_bz2 = tools.bz2
try:
tools.bz2 = ImportError()
self.assertRaises(ImportError, self._get_content, self.base_file + '.bz2')
finally:
tools.bz2 = old_bz2
def test_open_archive_gz(self):
"""Test open_archive with gz compressor in the standard library."""
self.assertEqual(self._get_content(self.base_file + '.gz'), self.original_content)
def test_open_archive_7z(self):
"""Test open_archive with 7za if installed."""
try:
subprocess.Popen(['7za'], stdout=subprocess.PIPE).stdout.close()
except OSError:
raise unittest.SkipTest('7za not installed')
self.assertEqual(self._get_content(self.base_file + '.7z'), self.original_content)
self.assertRaises(OSError, self._get_content, self.base_file + '_invalid.7z',
use_extension=True)
class OpenCompressedTestCase(OpenArchiveTestCase, DeprecationTestCase):
"""Test opening files with the deprecated open_compressed."""
net = False
def _get_content(self, *args, **kwargs):
"""Use open_compressed and return content using a with-statement."""
# open_archive default is True, so if it's False it's not the default
# so use the non-default of open_compressed (which is True)
if kwargs.get('use_extension') is False:
kwargs['use_extension'] = True
with tools.open_compressed(*args, **kwargs) as f:
content = f.read()
self.assertOneDeprecation(self.INSTEAD)
return content
class OpenArchiveWriteTestCase(TestCase):
"""Test writing with open_archive."""
net = False
@classmethod
def setUpClass(cls):
"""Define base_file and original_content."""
super(OpenArchiveWriteTestCase, cls).setUpClass()
cls.base_file = join_xml_data_path('article-pyrus.xml')
with open(cls.base_file, 'rb') as f:
cls.original_content = f.read()
def _write_content(self, suffix):
try:
fh, fn = tempfile.mkstemp(suffix)
with tools.open_archive(fn, 'wb') as f:
f.write(self.original_content)
with tools.open_archive(fn, 'rb') as f:
self.assertEqual(f.read(), self.original_content)
with open(fn, 'rb') as f:
return f.read()
finally:
os.close(fh)
os.remove(fn)
def test_invalid_modes(self):
"""Test various invalid mode configurations."""
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'ra') # two modes besides
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'rt') # text mode
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'br') # binary at front
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'wb', False) # writing without extension
def test_binary_mode(self):
"""Test that it uses binary mode."""
with tools.open_archive(self.base_file, 'r') as f:
self.assertEqual(f.mode, 'rb')
self.assertIsInstance(f.read(), bytes)
def test_write_archive_bz2(self):
"""Test writing a bz2 archive."""
content = self._write_content('.bz2')
with open(self.base_file + '.bz2', 'rb') as f:
self.assertEqual(content, f.read())
def test_write_archive_gz(self):
"""Test writing a gz archive."""
content = self._write_content('.gz')
self.assertEqual(content[:3], b'\x1F\x8B\x08')
def test_write_archive_7z(self):
"""Test writing an archive as a 7z archive."""
self.assertRaises(NotImplementedError, tools.open_archive,
'/dev/null.7z', mode='wb')
class MergeUniqueDicts(TestCase):
"""Test merge_unique_dicts."""
net = False
dct1 = {'foo': 'bar', '42': 'answer'}
dct2 = {47: 'Star', 74: 'Trek'}
dct_both = dct1.copy()
dct_both.update(dct2)
def test_single(self):
"""Test that it returns the dict itself when there is only one."""
self.assertEqual(tools.merge_unique_dicts(self.dct1), self.dct1)
self.assertEqual(tools.merge_unique_dicts(**self.dct1), self.dct1)
def test_multiple(self):
"""Test that it actually merges dicts."""
self.assertEqual(tools.merge_unique_dicts(self.dct1, self.dct2),
self.dct_both)
self.assertEqual(tools.merge_unique_dicts(self.dct2, **self.dct1),
self.dct_both)
def test_different_type(self):
"""Test that the keys can be different types."""
self.assertEqual(tools.merge_unique_dicts({'1': 'str'}, {1: 'int'}),
{'1': 'str', 1: 'int'})
def test_conflict(self):
"""Test that it detects conflicts."""
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, **{'42': 'bad'})
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, self.dct1)
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, **self.dct1)
def passthrough(x):
"""Return x."""
return x
class SkipList(set):
"""Container that ignores items."""
skip_list = [1, 3]
def __contains__(self, item):
"""Override to not process some items."""
if item in self.skip_list:
return True
else:
return super(SkipList, self).__contains__(item)
class ProcessAgainList(set):
"""Container that keeps processing certain items."""
process_again_list = [1, 3]
def add(self, item):
"""Override to not add some items."""
if item in self.process_again_list:
return
else:
return super(ProcessAgainList, self).add(item)
class ContainsStopList(set):
"""Container that stops when encountering items."""
stop_list = []
def __contains__(self, item):
"""Override to stop on encountering items."""
if item in self.stop_list:
raise StopIteration
else:
return super(ContainsStopList, self).__contains__(item)
class AddStopList(set):
"""Container that stops when encountering items."""
stop_list = []
def add(self, item):
"""Override to not continue on encountering items."""
if item in self.stop_list:
raise StopIteration
else:
super(AddStopList, self).add(item)
class TestFilterUnique(TestCase):
"""Test filter_unique."""
net = False
ints = [1, 3, 2, 1, 2, 1, 2, 4, 2]
strs = [str(i) for i in ints]
decs = [decimal.Decimal(i) for i in ints]
def _test_dedup_int(self, deduped, deduper, key=None):
"""Test filter_unique results for int."""
if not key:
key = passthrough
self.assertEqual(len(deduped), 0)
self.assertEqual(next(deduper), 1)
self.assertEqual(next(deduper), 3)
if key in (hash, passthrough):
if isinstance(deduped, tools.OrderedDict):
self.assertEqual(list(deduped.keys()), [1, 3])
elif isinstance(deduped, collections.Mapping):
self.assertCountEqual(list(deduped.keys()), [1, 3])
else:
self.assertEqual(deduped, set([1, 3]))
self.assertEqual(next(deduper), 2)
self.assertEqual(next(deduper), 4)
if key in (hash, passthrough):
if isinstance(deduped, tools.OrderedDict):
self.assertEqual(list(deduped.keys()), [1, 3, 2, 4])
elif isinstance(deduped, collections.Mapping):
self.assertCountEqual(list(deduped.keys()), [1, 2, 3, 4])
else:
self.assertEqual(deduped, set([1, 2, 3, 4]))
self.assertRaises(StopIteration, next, deduper)
def _test_dedup_str(self, deduped, deduper, key=None):
"""Test filter_unique results for str."""
if not key:
key = passthrough
self.assertEqual(len(deduped), 0)
self.assertEqual(next(deduper), '1')
self.assertEqual(next(deduper), '3')
if key in (hash, passthrough):
if isinstance(deduped, collections.Mapping):
self.assertEqual(deduped.keys(), [key('1'), key('3')])
else:
self.assertEqual(deduped, set([key('1'), key('3')]))
self.assertEqual(next(deduper), '2')
self.assertEqual(next(deduper), '4')
if key in (hash, passthrough):
if isinstance(deduped, collections.Mapping):
self.assertEqual(deduped.keys(), [key(i) for i in self.strs])
else:
self.assertEqual(deduped, set(key(i) for i in self.strs))
self.assertRaises(StopIteration, next, deduper)
def test_set(self):
"""Test filter_unique with a set."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_dict(self):
"""Test filter_unique with a dict."""
deduped = dict()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_OrderedDict(self):
"""Test filter_unique with a OrderedDict."""
deduped = tools.OrderedDict()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_int_hash(self):
"""Test filter_unique with ints using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped, key=hash)
self._test_dedup_int(deduped, deduper, hash)
def test_int_id(self):
"""Test filter_unique with ints using id as key."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped, key=id)
self._test_dedup_int(deduped, deduper, id)
def test_obj(self):
"""Test filter_unique with objects."""
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_obj_hash(self):
"""Test filter_unique with objects using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped, key=hash)
self._test_dedup_int(deduped, deduper, hash)
def test_obj_id(self):
"""Test filter_unique with objects using id as key, which fails."""
# Two objects which may be equal do not necessary have the same id.
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped, key=id)
self.assertEqual(len(deduped), 0)
for _ in self.decs:
self.assertEqual(id(next(deduper)), deduped.pop())
self.assertRaises(StopIteration, next, deduper)
# No. of Decimal with distinct ids != no. of Decimal with distinct value.
deduper_ids = list(tools.filter_unique(self.decs, key=id))
self.assertNotEqual(len(deduper_ids), len(set(deduper_ids)))
def test_str(self):
"""Test filter_unique with str."""
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped)
self._test_dedup_str(deduped, deduper)
def test_str_hash(self):
"""Test filter_unique with str using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped, key=hash)
self._test_dedup_str(deduped, deduper, hash)
@expected_failure_if(not tools.PY2)
def test_str_id(self):
"""Test str using id as key fails on Python 3."""
# str in Python 3 behave like objects.
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped, key=id)
self._test_dedup_str(deduped, deduper, id)
def test_for_resumable(self):
"""Test filter_unique is resumable after a for loop."""
gen2 = tools.filter_unique(self.ints)
deduped = []
for item in gen2:
deduped.append(item)
if len(deduped) == 3:
break
self.assertEqual(deduped, [1, 3, 2])
last = next(gen2)
self.assertEqual(last, 4)
self.assertRaises(StopIteration, next, gen2)
def test_skip(self):
"""Test filter_unique with a container that skips items."""
deduped = SkipList()
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([2, 4]))
def test_process_again(self):
"""Test filter_unique with an ignoring container."""
deduped = ProcessAgainList()
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertEqual(deduped_out, [1, 3, 2, 1, 1, 4])
self.assertEqual(deduped, set([2, 4]))
def test_stop(self):
"""Test filter_unique with an ignoring container."""
deduped = ContainsStopList()
deduped.stop_list = [2]
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([1, 3]))
# And it should not resume
self.assertRaises(StopIteration, next, deduper)
deduped = AddStopList()
deduped.stop_list = [4]
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([1, 2, 3]))
# And it should not resume
self.assertRaises(StopIteration, next, deduper)
class MetaTestArgSpec(MetaTestCaseClass):
"""Metaclass to create dynamically the tests. Set the net flag to false."""
def __new__(cls, name, bases, dct):
"""Create a new test case class."""
def create_test(method):
def test_method(self):
"""Test getargspec."""
# all expect at least self and param
expected = method(1, 2)
returned = self.getargspec(method)
self.assertEqual(returned, expected)
self.assertIsInstance(returned, self.expected_class)
self.assertNoDeprecation()
return test_method
for attr, tested_method in list(dct.items()):
if attr.startswith('_method_test_'):
suffix = attr[len('_method_test_'):]
cls.add_method(dct, 'test_method_' + suffix,
create_test(tested_method),
doc_suffix='on {0}'.format(suffix))
dct['net'] = False
return super(MetaTestArgSpec, cls).__new__(cls, name, bases, dct)
@add_metaclass
class TestArgSpec(DeprecationTestCase):
"""Test getargspec and ArgSpec from tools."""
__metaclass__ = MetaTestArgSpec
expected_class = tools.ArgSpec
def _method_test_args(self, param):
"""Test method with two positional arguments."""
return (['self', 'param'], None, None, None)
def _method_test_kwargs(self, param=42):
"""Test method with one positional and one keyword argument."""
return (['self', 'param'], None, None, (42,))
def _method_test_varargs(self, param, *var):
"""Test method with two positional arguments and var args."""
return (['self', 'param'], 'var', None, None)
def _method_test_varkwargs(self, param, **var):
"""Test method with two positional arguments and var kwargs."""
return (['self', 'param'], None, 'var', None)
def _method_test_vars(self, param, *args, **kwargs):
"""Test method with two positional arguments and both var args."""
return (['self', 'param'], 'args', 'kwargs', None)
def getargspec(self, method):
"""Call tested getargspec function."""
return tools.getargspec(method)
@unittest.skipIf(tools.PYTHON_VERSION >= (3, 6), 'removed in Python 3.6')
class TestPythonArgSpec(TestArgSpec):
"""Test the same tests using Python's implementation."""
expected_class = inspect.ArgSpec
def getargspec(self, method):
"""Call inspect's getargspec function."""
with warnings.catch_warnings():
if tools.PYTHON_VERSION >= (3, 5):
warnings.simplefilter('ignore', DeprecationWarning)
return inspect.getargspec(method)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| mit | -5,386,138,548,688,300,000 | 33.978441 | 91 | 0.601555 | false |
emccode/HeliosBurn | heliosburn/proxy/modules/server_overload.py | 1 | 4841 | import datetime
import random
from injectors import ExponentialInjector
from injectors import PlateauInjector
from module import AbstractModule
from twisted.python import log
from module_decorators import SkipHandler
from models import SOProfileModel
# ultimately pull from settings file
injector_map = {
"exponential": ExponentialInjector,
"plateau": PlateauInjector
}
class ResponseTrigger(object):
def __init__(self, min_load, max_load, probability):
self.min_load = min_load
self.max_load = max_load
self.probability = probability
self.metrics = {}
self.delay = 0
def match(self, load):
matched = False
if load >= self.min_load and load <= self.max_load:
if random.random() <= self.probability/100:
matched = True
if matched:
self.metrics[self.__class__.__name__] += 1
return matched
def get_response(self):
pass
class SimulatedResponseTrigger(ResponseTrigger):
def __init__(self, min_load, max_load, probability, response):
ResponseTrigger.__init__(self, min_load, max_load, probability)
self.response = response
def get_response(self):
return self.response
class DelayedResponseTrigger(ResponseTrigger):
def __init__(self, min_load, max_load, probability, delay):
ResponseTrigger.__init__(self, min_load, max_load, probability)
self.delay = delay
def get_response(self):
return None
class ServerOverload(AbstractModule):
triggers = []
injectors = []
response = None
def __init__(self):
AbstractModule.__init__(self)
self.redis_host = '127.0.0.1'
self.redis_port = 6379
self.redis_db = 0
self.mongo_host = 'heliosburn.traffic'
self.mongo_port = '127.0.0.1'
self.mongo_db = 'heliosburn'
self.stats['ServerOverload'] = []
self.response_code = None
def configure(self, **configs):
pass
@SkipHandler
def handle_request(self, request):
for injector in self.injectors:
load = injector.execute()
log.msg("Load:" + str(load))
self.stats['ServerOverload'].append(injector.metrics)
log.msg("about to trigger:")
for trigger in self.triggers:
log.msg("checking triggers:")
if trigger.match(load):
self.stats['ServerOverload'].append(trigger.metrics)
self.response_code = trigger.get_response()
request.delay += trigger.delay
log.msg("ServerOverload request: " + str(request))
return request
@SkipHandler
def handle_response(self, response):
if self.response_code:
response.code = self.response_code
return response
def _set_triggers(self):
for trigger in self.profile['response_triggers']:
min_load = trigger['fromLoad']
max_load = trigger['toLoad']
for action in trigger['actions']:
if action['type'] == "response":
response = action['value']
prob = action['percentage']
sr_trigger = SimulatedResponseTrigger(min_load,
max_load,
prob,
response
)
self.triggers.append(sr_trigger)
if action['type'] == "delay":
response = action['value']
prob = action['percentage']
d_trigger = DelayedResponseTrigger(min_load,
max_load,
prob,
response
)
self.triggers.append(d_trigger)
def start(self, **params):
self.session_id = params['session_id']
self.profile_id = params['profile_id']
self.profile = SOProfileModel({"_id": self.profile_id})
self.state = "running"
self.status = str(datetime.datetime.now())
self._set_triggers()
injector_type = self.profile['function']['type']
self.injectors.append(injector_map[injector_type](self.profile))
log.msg("Server Overload module started at: " + self.status)
def stop(self, **params):
self.state = "stopped"
self.profile = None
self.status = str(datetime.datetime.now())
self.injectors = []
log.msg("Server Overload module stopped at: " + self.status)
so = ServerOverload()
| mit | -6,316,699,127,163,408,000 | 31.059603 | 72 | 0.543689 | false |
robofab-developers/fontParts | Lib/fontParts/test/test_bPoint.py | 1 | 33013 | import unittest
import collections
from fontTools.misc.py23 import basestring
from fontParts.base import FontPartsError
class TestBPoint(unittest.TestCase):
def getBPoint_corner(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
return bPoint
def getBPoint_corner_with_bcpOut(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((133, 212), "offcurve")
contour.appendPoint((0, 0), "offcurve")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
return bPoint
def getBPoint_corner_with_bcpIn(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((0, 0), "offcurve")
contour.appendPoint((61, 190), "offcurve")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
return bPoint
def getContour(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((19, 121), "offcurve")
contour.appendPoint((61, 190), "offcurve")
contour.appendPoint((101, 202), "curve", smooth=True)
contour.appendPoint((133, 212), "offcurve")
contour.appendPoint((155, 147), "offcurve")
contour.appendPoint((255, 147), "curve")
return contour
def getBPoint_curve(self):
contour = self.getContour()
bPoint = contour.bPoints[1]
return bPoint
def getBPoint_curve_firstPointOpenContour(self):
contour = self.getContour()
bPoint = contour.bPoints[0]
return bPoint
def getBPoint_curve_lastPointOpenContour(self):
contour = self.getContour()
bPoint = contour.bPoints[-1]
return bPoint
def getBPoint_withName(self):
bPoint = self.getBPoint_corner()
bPoint.name = "BP"
return bPoint
# ----
# repr
# ----
def test_reprContents(self):
bPoint = self.getBPoint_corner()
value = bPoint._reprContents()
self.assertIsInstance(value, list)
for i in value:
self.assertIsInstance(i, basestring)
def test_reprContents_noContour(self):
point, _ = self.objectGenerator("point")
value = point._reprContents()
self.assertIsInstance(value, list)
for i in value:
self.assertIsInstance(i, basestring)
# -------
# Parents
# -------
def test_get_parent_font(self):
font, _ = self.objectGenerator("font")
layer = font.newLayer("L")
glyph = layer.newGlyph("X")
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
glyph.appendContour(contour)
contour = glyph.contours[0]
bPoint = contour.bPoints[1]
self.assertIsNotNone(bPoint.font)
self.assertEqual(
bPoint.font,
font
)
def test_get_parent_noFont(self):
layer, _ = self.objectGenerator("layer")
glyph = layer.newGlyph("X")
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
glyph.appendContour(contour)
contour = glyph.contours[0]
bPoint = contour.bPoints[1]
self.assertIsNone(bPoint.font)
def test_get_parent_layer(self):
layer, _ = self.objectGenerator("layer")
glyph = layer.newGlyph("X")
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
glyph.appendContour(contour)
contour = glyph.contours[0]
bPoint = contour.bPoints[1]
self.assertIsNotNone(bPoint.layer)
self.assertEqual(
bPoint.layer,
layer
)
def test_get_parent_noLayer(self):
glyph, _ = self.objectGenerator("glyph")
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
glyph.appendContour(contour)
contour = glyph.contours[0]
bPoint = contour.bPoints[1]
self.assertIsNone(bPoint.font)
self.assertIsNone(bPoint.layer)
def test_get_parent_glyph(self):
glyph, _ = self.objectGenerator("glyph")
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
glyph.appendContour(contour)
contour = glyph.contours[0]
bPoint = contour.bPoints[1]
self.assertIsNotNone(bPoint.glyph)
self.assertEqual(
bPoint.glyph,
glyph
)
def test_get_parent_noGlyph(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
self.assertIsNone(bPoint.glyph)
def test_get_parent_contour(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
self.assertIsNotNone(bPoint.contour)
self.assertEqual(
bPoint.contour,
contour
)
def test_get_parent_noContour(self):
bPoint, _ = self.objectGenerator("bPoint")
self.assertIsNone(bPoint.contour)
def test_get_parent_segment(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
self.assertIsNotNone(bPoint._segment)
# def test_get_parent_noSegment(self):
# bPoint, _ = self.objectGenerator("bPoint")
# self.assertIsNone(bPoint._segment)
def test_get_parent_nextSegment(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[2]
self.assertIsNotNone(bPoint._nextSegment)
def test_get_parent_noNextSegment(self):
bPoint, _ = self.objectGenerator("bPoint")
self.assertIsNone(bPoint._nextSegment)
# get segment/nosegment
def test_set_parent_contour(self):
contour, _ = self.objectGenerator("contour")
bPoint, _ = self.objectGenerator("bPoint")
bPoint.contour = contour
self.assertIsNotNone(bPoint.contour)
self.assertEqual(
bPoint.contour,
contour
)
def test_set_already_set_parent_contour(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
contourOther, _ = self.objectGenerator("contour")
with self.assertRaises(AssertionError):
bPoint.contour = contourOther
def test_set_parent_contour_none(self):
bPoint, _ = self.objectGenerator("bPoint")
bPoint.contour = None
self.assertIsNone(bPoint.contour)
def test_get_parent_glyph_noContour(self):
bPoint, _ = self.objectGenerator("bPoint")
self.assertIsNone(bPoint.glyph)
def test_get_parent_layer_noContour(self):
bPoint, _ = self.objectGenerator("bPoint")
self.assertIsNone(bPoint.layer)
def test_get_parent_font_noContour(self):
bPoint, _ = self.objectGenerator("bPoint")
self.assertIsNone(bPoint.font)
# ----
# Attributes
# ----
# type
def test_get_type_corner(self):
bPoint = self.getBPoint_corner()
self.assertEqual(
bPoint.type,
"corner"
)
def test_get_type_curve(self):
bPoint = self.getBPoint_curve()
self.assertEqual(
bPoint.type,
"curve"
)
def test_set_type_corner(self):
bPoint = self.getBPoint_curve()
bPoint.type = "corner"
self.assertEqual(
bPoint.type,
"corner"
)
def test_set_type_curve(self):
bPoint = self.getBPoint_corner()
bPoint.type = "curve"
self.assertEqual(
bPoint.type,
"curve"
)
def test_type_not_equal(self):
bPoint = self.getBPoint_corner()
self.assertNotEqual(
bPoint.type,
"curve"
)
def test_set_bcpOutIn_type_change(self):
bPoint = self.getBPoint_curve()
bPoint.bcpOut = (0, 0)
bPoint.bcpIn = (0, 0)
self.assertEqual(
bPoint.type,
"corner"
)
def test_set_bcpInOut_type_change(self):
bPoint = self.getBPoint_curve()
bPoint.bcpIn = (0, 0)
bPoint.bcpOut = (0, 0)
self.assertEqual(
bPoint.type,
"corner"
)
# anchor
def test_get_anchor(self):
bPoint = self.getBPoint_corner()
self.assertEqual(
bPoint.anchor,
(101, 202)
)
def test_set_anchor_valid_tuple(self):
bPoint = self.getBPoint_corner()
bPoint.anchor = (51, 45)
self.assertEqual(
bPoint.anchor,
(51, 45)
)
def test_set_anchor_valid_list(self):
bPoint = self.getBPoint_corner()
bPoint.anchor = [51, 45]
self.assertEqual(
bPoint.anchor,
(51, 45)
)
def test_set_anchor_invalid_too_many_items(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.anchor = (51, 45, 67)
def test_set_anchor_invalid_single_item_list(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.anchor = [51]
def test_set_anchor_invalid_single_item_tuple(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.anchor = (51,)
def test_set_anchor_invalidType_int(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.anchor = 51
def test_set_anchor_invalidType_None(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.anchor = None
# bcp in
def test_get_bcpIn_corner(self):
bPoint = self.getBPoint_corner()
self.assertEqual(
bPoint.bcpIn,
(0, 0)
)
def test_get_bcpIn_curve(self):
bPoint = self.getBPoint_curve()
self.assertEqual(
bPoint.bcpIn,
(-40, -12)
)
def test_set_bcpIn_corner_valid_tuple(self):
bPoint = self.getBPoint_corner()
bPoint.bcpIn = (51, 45)
self.assertEqual(
bPoint.bcpIn,
(51, 45)
)
def test_set_bcpIn_corner_with_bcpOut(self):
bPoint = self.getBPoint_corner_with_bcpOut()
bPoint.bcpIn = (51, 45)
self.assertEqual(
bPoint.bcpIn,
(51, 45)
)
def test_set_bcpIn_curve_valid_tuple(self):
bPoint = self.getBPoint_curve()
bPoint.bcpIn = (51, 45)
self.assertEqual(
bPoint.bcpIn,
(51, 45)
)
def test_set_bcpIn_curve_firstPointOpenContour(self):
bPoint = self.getBPoint_curve_firstPointOpenContour()
with self.assertRaises(FontPartsError):
bPoint.bcpIn = (10, 20)
def test_set_bcpIn_valid_list(self):
bPoint = self.getBPoint_corner()
bPoint.bcpIn = [51, 45]
self.assertEqual(
bPoint.bcpIn,
(51, 45)
)
def test_set_bcpIn_invalid_too_many_items(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.bcpIn = [51, 45, 67]
def test_set_bcpIn_invalid_single_item_list(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.bcpIn = [51]
def test_set_bcpIn_invalid_single_item_tuple(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.bcpIn = (51)
def test_set_bcpIn_invalidType_int(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.bcpIn = 51
def test_set_bcpIn_invalidType_None(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.bcpIn = None
# bcp out
def test_get_bcpOut_corner(self):
bPoint = self.getBPoint_corner()
self.assertEqual(
bPoint.bcpOut,
(0, 0)
)
def test_get_bcpOut_curve(self):
bPoint = self.getBPoint_curve()
self.assertEqual(
bPoint.bcpOut,
(32, 10)
)
def test_set_bcpOut_corner_valid_tuple(self):
bPoint = self.getBPoint_corner()
bPoint.bcpOut = (51, 45)
self.assertEqual(
bPoint.bcpOut,
(51, 45)
)
def test_set_bcpOut_corner_with_bcpIn(self):
bPoint = self.getBPoint_corner_with_bcpIn()
bPoint.bcpOut = (51, 45)
self.assertEqual(
bPoint.bcpOut,
(51, 45)
)
def test_set_bcpOut_curve_valid_tuple(self):
bPoint = self.getBPoint_curve()
bPoint.bcpOut = (51, 45)
self.assertEqual(
bPoint.bcpOut,
(51, 45)
)
def test_set_bcpOut_valid_list(self):
bPoint = self.getBPoint_curve()
bPoint.bcpOut = [51, 45]
self.assertEqual(
bPoint.bcpOut,
(51, 45)
)
def test_set_bcpOut_curve_lastPointOpenContour(self):
bPoint = self.getBPoint_curve_lastPointOpenContour()
with self.assertRaises(FontPartsError):
bPoint.bcpOut = (10, 20)
def test_set_bcpOut_invalid_too_many_items(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.bcpOut = [51, 45, 67]
def test_set_bcpOut_invalid_single_item_list(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.bcpOut = [51]
def test_set_bcpOut_invalid_single_item_tuple(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.bcpOut = (51)
def test_set_bcpOut_invalidType_int(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.bcpOut = 51
def test_set_bcpOut_invalidType_None(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.bcpOut = None
# --------------
# Identification
# --------------
# index
def getBPoint_noParentContour(self):
bPoint, _ = self.objectGenerator("bPoint")
bPoint.anchor = (101, 202)
bPoint.bcpIn = (-40, 0)
bPoint.bcpOut = (50, 0)
bPoint.type = "curve"
return bPoint
def test_get_index(self):
bPoint = self.getBPoint_corner()
self.assertEqual(
bPoint.index,
1
)
# def test_get_index_noParentContour(self):
# bPoint = self.getBPoint_noParentContour()
# self.assertEqual(
# bPoint.index,
# None
# )
def test_set_index(self):
point = self.getBPoint_corner()
with self.assertRaises(FontPartsError):
point.index = 0
# identifier
def test_identifier_get_none(self):
bPoint = self.getBPoint_corner()
self.assertIsNone(bPoint.identifier)
def test_identifier_generated_type(self):
bPoint = self.getBPoint_corner()
bPoint.generateIdentifier()
self.assertIsInstance(bPoint.identifier, basestring)
def test_identifier_consistency(self):
bPoint = self.getBPoint_corner()
bPoint.generateIdentifier()
# get: twice to test consistency
self.assertEqual(bPoint.identifier, bPoint.identifier)
def test_identifier_cannot_set(self):
# identifier is a read-only property
bPoint = self.getBPoint_corner()
with self.assertRaises(FontPartsError):
bPoint.identifier = "ABC"
# def test_getIdentifer_no_contour(self):
# bPoint, _ = self.objectGenerator("bPoint")
# with self.assertRaises(FontPartsError):
# bPoint.getIdentifier()
def test_getIdentifer_consistency(self):
bPoint = self.getBPoint_corner()
bPoint.getIdentifier()
self.assertEqual(bPoint.identifier, bPoint.getIdentifier())
# ----
# Hash
# ----
def test_hash(self):
bPoint = self.getBPoint_corner()
self.assertEqual(
isinstance(bPoint, collections.Hashable),
False
)
# --------
# Equality
# --------
def test_object_equal_self(self):
bPoint_one = self.getBPoint_corner()
self.assertEqual(
bPoint_one,
bPoint_one
)
def test_object_not_equal_other(self):
bPoint_one = self.getBPoint_corner()
bPoint_two = self.getBPoint_corner()
self.assertNotEqual(
bPoint_one,
bPoint_two
)
def test_object_equal_self_variable_assignment(self):
bPoint_one = self.getBPoint_corner()
a = bPoint_one
a.anchor = (51, 45)
self.assertEqual(
bPoint_one,
a
)
def test_object_not_equal_other_variable_assignment(self):
bPoint_one = self.getBPoint_corner()
bPoint_two = self.getBPoint_corner()
a = bPoint_one
self.assertNotEqual(
bPoint_two,
a
)
# ---------
# Selection
# ---------
def test_selected_true(self):
bPoint = self.getBPoint_corner()
try:
bPoint.selected = False
except NotImplementedError:
return
bPoint.selected = True
self.assertEqual(
bPoint.selected,
True
)
def test_selected_false(self):
bPoint = self.getBPoint_corner()
try:
bPoint.selected = False
except NotImplementedError:
return
bPoint.selected = False
self.assertEqual(
bPoint.selected,
False
)
# ----
# Copy
# ----
def test_copy_seperate_objects(self):
bPoint = self.getBPoint_corner()
copied = bPoint.copy()
self.assertIsNot(
bPoint,
copied
)
def test_copy_different_contour(self):
bPoint = self.getBPoint_corner()
copied = bPoint.copy()
self.assertIsNot(
bPoint.contour,
copied.contour
)
def test_copy_none_contour(self):
bPoint = self.getBPoint_corner()
copied = bPoint.copy()
self.assertEqual(
copied.contour,
None
)
# def test_copy_same_type(self):
# bPoint = self.getBPoint_corner()
# copied = bPoint.copy()
# self.assertEqual(
# bPoint.type,
# copied.type
# )
# def test_copy_same_anchor(self):
# bPoint = self.getBPoint_corner()
# copied = bPoint.copy()
# self.assertEqual(
# bPoint.anchor,
# copied.anchor
# )
# def test_copy_same_bcpIn(self):
# bPoint = self.getBPoint_corner()
# copied = bPoint.copy()
# self.assertEqual(
# bPoint.bcpIn,
# copied.bcpIn
# )
# def test_copy_same_bcpOut(self):
# bPoint = self.getBPoint_corner()
# copied = bPoint.copy()
# self.assertEqual(
# bPoint.bcpOut,
# copied.bcpOut
# )
# def test_copy_same_identifier_None(self):
# bPoint = self.getBPoint_corner()
# bPoint.identifer = None
# copied = bPoint.copy()
# self.assertEqual(
# bPoint.identifier,
# copied.identifier,
# )
# def test_copy_different_identifier(self):
# bPoint = self.getBPoint_corner()
# bPoint.generateIdentifier()
# copied = bPoint.copy()
# self.assertNotEqual(
# bPoint.identifier,
# copied.identifier,
# )
# def test_copy_generated_identifier_different(self):
# otherContour, _ = self.objectGenerator("contour")
# bPoint = self.getBPoint_corner()
# copied = bPoint.copy()
# copied.contour = otherContour
# bPoint.generateIdentifier()
# copied.generateIdentifier()
# self.assertNotEqual(
# bPoint.identifier,
# copied.identifier
# )
# def test_copyData_type(self):
# bPoint = self.getBPoint_corner()
# bPointOther, _ = self.objectGenerator("bPoint")
# bPointOther.copyData(bPoint)
# self.assertEqual(
# bPoint.type,
# bPointOther.type,
# )
# def test_copyData_anchor(self):
# bPoint = self.getBPoint_corner()
# bPointOther, _ = self.objectGenerator("bPoint")
# bPointOther.copyData(bPoint)
# self.assertEqual(
# bPoint.anchor,
# bPointOther.anchor,
# )
# def test_copyData_bcpIn(self):
# bPoint = self.getBPoint_corner()
# bPointOther, _ = self.objectGenerator("bPoint")
# bPointOther.copyData(bPoint)
# self.assertEqual(
# bPoint.bcpIn,
# bPointOther.bcpIn,
# )
# def test_copyData_bcpOut(self):
# bPoint = self.getBPoint_corner()
# bPointOther, _ = self.objectGenerator("bPoint")
# bPointOther.copyData(bPoint)
# self.assertEqual(
# bPoint.bcpOut,
# bPointOther.bcpOut,
# )
# --------------
# Transformation
# --------------
# transformBy
def test_transformBy_valid_no_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.transformBy((2, 0, 0, 3, -3, 2))
self.assertEqual(
bPoint.anchor,
(199.0, 608.0)
)
def test_transformBy_valid_no_origin_bcpIn(self):
bPoint = self.getBPoint_curve()
bPoint.transformBy((2, 0, 0, 3, -3, 2))
self.assertEqual(
bPoint.bcpIn,
(-80.0, -36.0)
)
def test_transformBy_valid_no_origin_bcpOut(self):
bPoint = self.getBPoint_curve()
bPoint.transformBy((2, 0, 0, 3, -3, 2))
self.assertEqual(
bPoint.bcpOut,
(64.0, 30.0)
)
def test_transformBy_valid_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.transformBy((2, 0, 0, 2, 0, 0), origin=(1, 2))
self.assertEqual(
bPoint.anchor,
(201.0, 402.0)
)
def test_transformBy_valid_origin_bcpIn(self):
bPoint = self.getBPoint_curve()
bPoint.transformBy((2, 0, 0, 2, 0, 0), origin=(1, 2))
self.assertEqual(
bPoint.bcpIn,
(-80.0, -24.0)
)
def test_transformBy_valid_origin_bcpOut(self):
bPoint = self.getBPoint_curve()
bPoint.transformBy((2, 0, 0, 2, 0, 0), origin=(1, 2))
self.assertEqual(
bPoint.bcpOut,
(64.0, 20.0)
)
def test_transformBy_invalid_one_string_value(self):
point = self.getBPoint_curve()
with self.assertRaises(TypeError):
point.transformBy((1, 0, 0, 1, 0, "0"))
def test_transformBy_invalid_all_string_values(self):
point = self.getBPoint_curve()
with self.assertRaises(TypeError):
point.transformBy("1, 0, 0, 1, 0, 0")
def test_transformBy_invalid_int_value(self):
point = self.getBPoint_curve()
with self.assertRaises(TypeError):
point.transformBy(123)
# moveBy
def test_moveBy_valid_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.moveBy((-1, 2))
self.assertEqual(
bPoint.anchor,
(100.0, 204.0)
)
def test_moveBy_noChange_bcpIn(self):
bPoint = self.getBPoint_curve()
bPoint.moveBy((-1, 2))
otherBPoint = self.getBPoint_curve()
self.assertEqual(
bPoint.bcpIn,
otherBPoint.bcpIn
)
def test_moveBy_noChange_bcpOut(self):
bPoint = self.getBPoint_curve()
bPoint.moveBy((-1, 2))
otherBPoint = self.getBPoint_curve()
self.assertEqual(
bPoint.bcpOut,
otherBPoint.bcpOut
)
def test_moveBy_invalid_one_string_value(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.moveBy((-1, "2"))
def test_moveBy_invalid_all_strings_value(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.moveBy("-1, 2")
def test_moveBy_invalid_int_value(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.moveBy(1)
# scaleBy
def test_scaleBy_valid_one_value_no_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.scaleBy((-2))
self.assertEqual(
bPoint.anchor,
(-202.0, -404.0)
)
def test_scaleBy_valid_two_values_no_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.scaleBy((-2, 3))
self.assertEqual(
bPoint.anchor,
(-202.0, 606.0)
)
def test_scaleBy_valid_two_values_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.scaleBy((-2, 3), origin=(1, 2))
self.assertEqual(
bPoint.anchor,
(-199.0, 602.0)
)
def test_scaleBy_valid_two_values_origin_bcpIn(self):
bPoint = self.getBPoint_curve()
bPoint.scaleBy((-2, 3), origin=(1, 2))
self.assertEqual(
bPoint.bcpIn,
(80.0, -36.0)
)
def test_scaleBy_valid_two_values_origin_bcpOut(self):
bPoint = self.getBPoint_curve()
bPoint.scaleBy((-2, 3), origin=(1, 2))
self.assertEqual(
bPoint.bcpOut,
(-64.0, 30.0)
)
def test_invalid_one_string_value_scaleBy(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.scaleBy((-1, "2"))
def test_invalid_two_string_values_scaleBy(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.scaleBy("-1, 2")
def test_invalid_tuple_too_many_values_scaleBy(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(ValueError):
bPoint.scaleBy((-1, 2, -3))
# rotateBy
def test_rotateBy_valid_no_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.rotateBy(45)
self.assertEqual(
[(round(bPoint.anchor[0], 3)), (round(bPoint.anchor[1], 3))],
[-71.418, 214.253]
)
def test_rotateBy_valid_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.rotateBy(45, origin=(1, 2))
self.assertEqual(
[(round(bPoint.anchor[0], 3)), (round(bPoint.anchor[1], 3))],
[-69.711, 214.132]
)
def test_rotateBy_valid_origin_bcpIn(self):
bPoint = self.getBPoint_curve()
bPoint.rotateBy(45, origin=(1, 2))
self.assertEqual(
[(round(bPoint.bcpIn[0], 3)), (round(bPoint.bcpIn[1], 3))],
[-19.799, -36.77]
)
def test_rotateBy_valid_origin_bcpOut(self):
bPoint = self.getBPoint_curve()
bPoint.rotateBy(45, origin=(1, 2))
self.assertEqual(
[(round(bPoint.bcpOut[0], 3)), (round(bPoint.bcpOut[1], 3))],
[15.556, 29.698]
)
def test_rotateBy_invalid_string_value(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.rotateBy("45")
def test_rotateBy_invalid_too_large_value_positive(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(ValueError):
bPoint.rotateBy(361)
def test_rotateBy_invalid_too_large_value_negative(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(ValueError):
bPoint.rotateBy(-361)
# skewBy
def test_skewBy_valid_no_origin_one_value_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.skewBy(100)
self.assertEqual(
[(round(bPoint.anchor[0], 3)), (round(bPoint.anchor[1], 3))],
[-1044.599, 202.0]
)
def test_skewBy_valid_no_origin_two_values_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.skewBy((100, 200))
self.assertEqual(
[(round(bPoint.anchor[0], 3)), (round(bPoint.anchor[1], 3))],
[-1044.599, 238.761]
)
def test_skewBy_valid_origin_one_value_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.skewBy(100, origin=(1, 2))
self.assertEqual(
[(round(bPoint.anchor[0], 3)), (round(bPoint.anchor[1], 3))],
[-1033.256, 202.0]
)
def test_skewBy_valid_origin_two_values_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.skewBy((100, 200), origin=(1, 2))
self.assertEqual(
[(round(bPoint.anchor[0], 3)), (round(bPoint.anchor[1], 3))],
[-1033.256, 238.397]
)
def test_skewBy_valid_origin_two_values_bcpIn(self):
bPoint = self.getBPoint_curve()
bPoint.skewBy((100, 200), origin=(1, 2))
self.assertEqual(
[(round(bPoint.bcpIn[0], 3)), (round(bPoint.bcpIn[1], 3))],
[28.055, -26.559]
)
def test_skewBy_valid_origin_two_values_bcpOut(self):
bPoint = self.getBPoint_curve()
bPoint.skewBy((100, 200), origin=(1, 2))
self.assertEqual(
[(round(bPoint.bcpOut[0], 3)), (round(bPoint.bcpOut[1], 3))],
[-24.713, 21.647]
)
def test_skewBy_invalid_string_value(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.skewBy("45")
def test_skewBy_invalid_too_large_value_positive(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(ValueError):
bPoint.skewBy(361)
def test_skewBy_invalid_too_large_value_negative(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(ValueError):
bPoint.skewBy(-361)
# -------------
# Normalization
# -------------
# round
def getBPoint_curve_float(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((19.231, 121.291), "offcurve")
contour.appendPoint((61.193, 190.942), "offcurve")
contour.appendPoint((101.529, 202.249), "curve", smooth=True)
contour.appendPoint((133.948, 212.193), "offcurve")
contour.appendPoint((155.491, 147.314), "offcurve")
contour.appendPoint((255.295, 147.314), "curve")
bPoint = contour.bPoints[1]
return bPoint
def test_round_anchor(self):
bPoint = self.getBPoint_curve_float()
bPoint.round()
self.assertEqual(
bPoint.anchor,
(102.0, 202.0)
)
def test_round_bcpIn(self):
bPoint = self.getBPoint_curve_float()
bPoint.round()
self.assertEqual(
bPoint.bcpIn,
(-40.0, -11.0)
)
def test_round_bcpOut(self):
bPoint = self.getBPoint_curve_float()
bPoint.round()
self.assertEqual(
bPoint.bcpOut,
(32.0, 10.0)
)
| mit | -2,513,112,687,653,909,500 | 28.822042 | 73 | 0.565111 | false |
ayepezv/GAD_ERP | addons/account/wizard/account_invoice_refund.py | 1 | 6614 | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.tools.safe_eval import safe_eval
from odoo.exceptions import UserError
class AccountInvoiceRefund(models.TransientModel):
"""Refunds invoice"""
_name = "account.invoice.refund"
_description = "Invoice Refund"
@api.model
def _get_reason(self):
context = dict(self._context or {})
active_id = context.get('active_id', False)
if active_id:
inv = self.env['account.invoice'].browse(active_id)
return inv.name
return ''
date_invoice = fields.Date(string='Refund Date', default=fields.Date.context_today, required=True)
date = fields.Date(string='Accounting Date')
description = fields.Char(string='Reason', required=True, default=_get_reason)
refund_only = fields.Boolean(string='Technical field to hide filter_refund in case invoice is partially paid', compute='_get_refund_only')
filter_refund = fields.Selection([('refund', 'Create a draft refund'), ('cancel', 'Cancel: create refund and reconcile'), ('modify', 'Modify: create refund, reconcile and create a new draft invoice')],
default='refund', string='Refund Method', required=True, help='Refund base on this type. You can not Modify and Cancel if the invoice is already reconciled')
@api.depends('date_invoice')
@api.one
def _get_refund_only(self):
invoice_id = self.env['account.invoice'].browse(self._context.get('active_id',False))
if len(invoice_id.payment_move_line_ids) != 0 and invoice_id.state != 'paid':
self.refund_only = True
else:
self.refund_only = False
@api.multi
def compute_refund(self, mode='refund'):
inv_obj = self.env['account.invoice']
inv_tax_obj = self.env['account.invoice.tax']
inv_line_obj = self.env['account.invoice.line']
context = dict(self._context or {})
xml_id = False
for form in self:
created_inv = []
date = False
description = False
for inv in inv_obj.browse(context.get('active_ids')):
if inv.state in ['draft', 'proforma2', 'cancel']:
raise UserError(_('Cannot refund draft/proforma/cancelled invoice.'))
if inv.reconciled and mode in ('cancel', 'modify'):
raise UserError(_('Cannot refund invoice which is already reconciled, invoice should be unreconciled first. You can only refund this invoice.'))
date = form.date or False
description = form.description or inv.name
refund = inv.refund(form.date_invoice, date, description, inv.journal_id.id)
refund.compute_taxes()
created_inv.append(refund.id)
if mode in ('cancel', 'modify'):
movelines = inv.move_id.line_ids
to_reconcile_ids = {}
to_reconcile_lines = self.env['account.move.line']
for line in movelines:
if line.account_id.id == inv.account_id.id:
to_reconcile_lines += line
to_reconcile_ids.setdefault(line.account_id.id, []).append(line.id)
if line.reconciled:
line.remove_move_reconcile()
refund.signal_workflow('invoice_open')
for tmpline in refund.move_id.line_ids:
if tmpline.account_id.id == inv.account_id.id:
to_reconcile_lines += tmpline
to_reconcile_lines.reconcile()
if mode == 'modify':
invoice = inv.read(
['name', 'type', 'number', 'reference',
'comment', 'date_due', 'partner_id',
'partner_insite', 'partner_contact',
'partner_ref', 'payment_term_id', 'account_id',
'currency_id', 'invoice_line_ids', 'tax_line_ids',
'journal_id', 'date'])
invoice = invoice[0]
del invoice['id']
invoice_lines = inv_line_obj.browse(invoice['invoice_line_ids'])
invoice_lines = inv_obj.with_context(mode='modify')._refund_cleanup_lines(invoice_lines)
tax_lines = inv_tax_obj.browse(invoice['tax_line_ids'])
tax_lines = inv_obj._refund_cleanup_lines(tax_lines)
invoice.update({
'type': inv.type,
'date_invoice': form.date_invoice,
'state': 'draft',
'number': False,
'invoice_line_ids': invoice_lines,
'tax_line_ids': tax_lines,
'date': date,
'name': description,
'origin': inv.origin,
'fiscal_position': inv.fiscal_position.id,
})
for field in ('partner_id', 'account_id', 'currency_id',
'payment_term_id', 'journal_id'):
invoice[field] = invoice[field] and invoice[field][0]
inv_refund = inv_obj.create(invoice)
if inv_refund.payment_term_id.id:
inv_refund._onchange_payment_term_date_invoice()
created_inv.append(inv_refund.id)
xml_id = (inv.type in ['out_refund', 'out_invoice']) and 'action_invoice_tree1' or \
(inv.type in ['in_refund', 'in_invoice']) and 'action_invoice_tree2'
# Put the reason in the chatter
subject = _("Invoice refund")
body = description
refund.message_post(body=body, subject=subject)
if xml_id:
result = self.env.ref('account.%s' % (xml_id)).read()[0]
invoice_domain = safe_eval(result['domain'])
invoice_domain.append(('id', 'in', created_inv))
result['domain'] = invoice_domain
return result
return True
@api.multi
def invoice_refund(self):
data_refund = self.read(['filter_refund'])[0]['filter_refund']
return self.compute_refund(data_refund)
| gpl-3.0 | -2,647,333,404,067,818,500 | 50.271318 | 205 | 0.518597 | false |
akx/shoop | shoop/core/fields/tagged_json.py | 1 | 3243 | # This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
"""
"Tagged JSON" encoder/decoder.
Objects that are normally not unambiguously representable via JSON
are encoded into special objects of the form `{tag: val}`; the encoding
and decoding process can be customized however necessary.
"""
from __future__ import unicode_literals
import datetime
import decimal
from enum import Enum
import django.utils.dateparse as dateparse
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from jsonfield.encoder import JSONEncoder
from six import text_type
from shoop.utils.importing import load
from shoop.utils.iterables import first
def isoformat(obj):
return obj.isoformat()
def encode_enum(enum_val):
enum_cls = enum_val.__class__
spec = "%s:%s" % (enum_cls.__module__, enum_cls.__name__)
try:
if load(spec) != enum_cls:
raise ImproperlyConfigured("That's not the same class!")
except ImproperlyConfigured: # Also raised by `load`
return enum_val.value # Fall back to the bare value.
return [spec, enum_val.value]
def decode_enum(val):
spec, value = val
cls = load(spec)
if issubclass(cls, Enum):
return cls(value)
return value # Fall back to the bare value. Not optimal, I know.
class TagRegistry(object):
def __init__(self):
self.tags = {}
def register(self, tag, classes, encoder=text_type, decoder=None):
if decoder is None:
if isinstance(classes, (list, tuple)):
decoder = classes[0]
else:
decoder = classes
if not callable(decoder):
raise ValueError("Decoder %r for tag %r is not callable" % (decoder, tag))
if not callable(encoder):
raise ValueError("Encoder %r for tag %r is not callable" % (encoder, tag))
self.tags[tag] = {
"classes": classes,
"encoder": encoder,
"decoder": decoder
}
def encode(self, obj, default):
for tag, info in six.iteritems(self.tags):
if isinstance(obj, info["classes"]):
return {tag: info["encoder"](obj)}
return default(obj)
def decode(self, obj):
if len(obj) == 1:
tag, val = first(obj.items())
info = self.tags.get(tag)
if info:
return info["decoder"](val)
return obj
#: The default tag registry.
tag_registry = TagRegistry()
tag_registry.register("$datetime", datetime.datetime, encoder=isoformat, decoder=dateparse.parse_datetime)
tag_registry.register("$date", datetime.date, encoder=isoformat, decoder=dateparse.parse_date)
tag_registry.register("$time", datetime.time, encoder=isoformat, decoder=dateparse.parse_time)
tag_registry.register("$dec", decimal.Decimal)
tag_registry.register("$enum", Enum, encoder=encode_enum, decoder=decode_enum)
class TaggedJSONEncoder(JSONEncoder):
registry = tag_registry
def default(self, obj):
return self.registry.encode(obj, super(JSONEncoder, self).default)
| agpl-3.0 | -1,473,840,330,059,314,200 | 30.485437 | 106 | 0.659574 | false |
victorduan/ds_usage-collector | collector.py | 1 | 5672 | #!/usr/bin/python
import sys
import logging
import config
import time
import calendar
from env import Env
from env import MySqlHelper
if config.logLevel == "info":
logging.basicConfig(format='%(asctime)s | %(levelname)s | %(filename)s | %(message)s', level=logging.INFO, filename=config.logFile)
else:
logging.basicConfig(format='%(asctime)s | %(levelname)s | %(filename)s | %(message)s', level=logging.DEBUG, filename=config.logFile)
if __name__ == "__main__":
table_name = config.mysql_table
# Create object for internal database methods (mySQL)
mysql = MySqlHelper(config.mysql_username, config.mysql_password, config.mysql_host, config.mysql_database)
retry = 3
while retry:
try:
logging.info("Getting valid columns from MySQL database.")
valid_sources = mysql.return_columns(table_name)
valid_sources = [ col for col in valid_sources if col not in ('intID', 'username', 'start', 'end', 'stream_type', 'stream_hash', 'seconds') ]
logging.debug("Columns found: {0}".format(valid_sources))
retry = 0
except Exception, err:
logging.exception(err)
retry -= 1
logging.warning("Retries left: {0}".format(retry))
time.sleep(5) # Sleep 5 seconds before retrying
# Open a MySQL connection
mysql.connect()
# Set up the environment
env = Env(sys.argv)
username = sys.argv[1]
retry = 12
while retry:
try:
usage = env.get_usage(config.usage_interval)
logging.info("Getting /usage for user: {0}".format(username))
print "Getting /usage for user: {0}".format(username)
break
except Exception, err:
logging.exception("Encountered getting /usage for user: {0}. Error message: {1}".format(username, err))
if err[1] == 403:
logging.info("Exceeded DataSift API Rate Limit. Sleeping for 5 minutes before next retry")
time.sleep(300)
else:
logging.info("Unable to get usage from DataSift. Sleeping for 15 seconds before next retry")
time.sleep(15) # Sleep 15 seconds before retrying
retry -= 1
logging.info("Retries left: {0}".format(retry))
if retry == 0:
logging.error("Unable to get usage from DataSift. Exiting after all retries")
sys.exit()
date_format = "%a, %d %b %Y %H:%M:%S +0000"
start = time.strptime(usage['start'], date_format)
end = time.strptime(usage['end'], date_format)
unix_start = calendar.timegm(start)
unix_end = calendar.timegm(end)
insert_string = ''
if len(usage['streams']):
for stream in usage['streams']:
if len(stream) == 32:
stream_type = "stream"
else:
stream_type = "historic"
seconds = usage['streams'][stream]['seconds']
data = {
'username' : username,
'start' : unix_start,
'end' : unix_end,
'stream_type' : stream_type,
'stream_hash' : str(stream),
'seconds' : seconds
}
licenses = usage['streams'][stream]['licenses']
if len(licenses):
headers = []
for license_type, license_value in licenses.items():
# Only add licenses for columns that exist in the database
if any(str(license_type) in x for x in valid_sources):
data[str(license_type)] = license_value
headers.append(str(license_type))
fields_string = ", ".join([ "`{0}`".format(k) for k in headers ])
values_string = ", ".join([ "%({0})s".format(k) for k in headers ])
insert_query = ("""
INSERT INTO {0}
(`username`, `start`, `end`, `stream_type`, `stream_hash`, `seconds`, {1})
VALUES ('%(username)s', %(start)s, %(end)s, '%(stream_type)s', '%(stream_hash)s', %(seconds)s, {2});
""").format(table_name, fields_string, values_string)
# Different MySQL Query if there is no license consumption
else:
insert_query = ("""
INSERT INTO {0}
(`username`, `start`, `end`, `stream_type`, `stream_hash`, `seconds`)
VALUES ('%(username)s', %(start)s, %(end)s, '%(stream_type)s', '%(stream_hash)s', %(seconds)s);
""").format(table_name)
# Concatenate all the INSERT statements
insert_string += " ".join(insert_query.split()) % data
try:
insert_count= 0
cursor = mysql.execute_many(insert_string)
for insert in cursor:
insert_count += 1
# Commit the inserts for the user (if there are results)
if insert_count: mysql.commit()
else: mysql.close()
except Exception, err:
logging.exception(err)
sys.exit()
else:
logging.info("No streams consumed in the period: {0} for user: {1}".format(config.usage_interval, username))
logging.info("Tasks completed.")
| mit | 7,862,495,477,375,088,000 | 37.849315 | 153 | 0.515339 | false |
Gr1N/rainbowrunners | rainbowrunners/result.py | 1 | 5763 | # -*- coding: utf-8 -*-
import math
import unittest
from rainbowrunners.utils import Colors, get_terminal_size
__all__ = (
'NyanCatResult',
)
class BaseRainbowResult(unittest.TestResult):
"""A test result class that can print rainbow and awesome pet to a stream.
"""
separator1 = '\033[{0}m{1:*^70}\033[0m'.format(Colors.SEPARATOR1, '')
separator2 = '\033[{0}m{1:-^70}\033[0m'.format(Colors.SEPARATOR2, '')
def __init__(self, stream=None, descriptions=None, verbosity=None):
super(BaseRainbowResult, self).__init__(
stream=stream, descriptions=descriptions, verbosity=verbosity
)
self.stream = stream
self.descriptions = descriptions
self.width = get_terminal_size()[0] * 0.8
self.tick = False
self.number_of_lines = 4
self.trajectories = [[], [], [], []]
self.pet_width = 11
self.scoreboard_width = 5
self.trajectory_width_max = self.width - self.pet_width
self.rainbow_colors = self.generate_colors()
self.color_index = 0
self.success = 0
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
return str(test)
def addSuccess(self, test):
super(BaseRainbowResult, self).addSuccess(test)
self.success += 1
self.draw()
def addError(self, test, err):
super(BaseRainbowResult, self).addError(test, err)
self.draw()
def addFailure(self, test, err):
super(BaseRainbowResult, self).addFailure(test, err)
self.draw()
def addSkip(self, test, reason):
super(BaseRainbowResult, self).addSkip(test, reason)
self.draw()
def addExpectedFailure(self, test, err):
super(BaseRainbowResult, self).addExpectedFailure(test, err)
self.draw()
def addUnexpectedSuccess(self, test):
super(BaseRainbowResult, self).addUnexpectedSuccess(test)
self.draw()
def printErrors(self):
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln('{0}: {1}'.format(flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
def startTestRun(self):
self.cursor_hide()
def stopTestRun(self):
for i in range(self.number_of_lines):
self.stream.write('\n')
self.cursor_show()
def draw(self):
self.append_rainbow()
draw_methods = (
self.draw_scoreboard,
self.draw_rainbow,
self.draw_pet,
)
for method in draw_methods:
method()
self.cursor_up()
self.tick = not self.tick
def append_rainbow(self):
segment = '_' if self.tick else '-'
rainbowified = self.rainbowify(segment)
for index in range(self.number_of_lines):
trajectory = self.trajectories[index]
if len(trajectory) >= self.trajectory_width_max:
trajectory.pop(0)
trajectory.append(rainbowified)
def draw_scoreboard(self):
self.draw_score(self.success, Colors.SUCCESS)
self.draw_score(len(self.errors), Colors.ERROR)
self.draw_score(len(self.failures), Colors.FAIL)
self.stream.writeln()
def draw_score(self, score, color):
self.stream.write(' ')
self.stream.writeln('\033[{0}m{1}\033[0m'.format(color, score))
def draw_rainbow(self):
for trajectory in self.trajectories:
self.stream.write('\033[{0}C'.format(self.scoreboard_width))
self.stream.writeln(''.join(trajectory))
def draw_pet(self):
raise NotImplementedError
def rainbowify(self, string):
color = self.rainbow_colors[self.color_index % len(self.rainbow_colors)]
self.color_index += 1
return '\033[38;5;{0}m{1}\033[0m'.format(color, string)
def generate_colors(self):
pi3 = math.floor(math.pi / 3)
n = lambda i: i * (1.0 / 6)
r = lambda i: math.floor(3 * math.sin(n(i)) + 3)
g = lambda i: math.floor(3 * math.sin(n(i) + 2 + pi3) + 3)
b = lambda i: math.floor(3 * math.sin(n(i) + 4 + pi3) + 3)
colors = [int(36 * r(i) + 6 * g(i) + b(i) + 16) for i in range(42)]
return colors
def cursor_up(self):
self.stream.write('\033[{0}A'.format(self.number_of_lines))
def cursor_hide(self):
self.stream.write('\033[?25l')
def cursor_show(self):
self.stream.write('\033[?25h')
class NyanCatResult(BaseRainbowResult):
def draw_pet(self):
start_width = self.scoreboard_width + len(self.trajectories[0])
color = '\033[{0}C'.format(start_width)
self.stream.write(color)
self.stream.writeln('_,------,')
self.stream.write(color)
padding = ' ' if self.tick else ' '
self.stream.writeln('_|{0}/\\_/\\ '.format(padding))
self.stream.write(color)
padding = '_' if self.tick else '__'
tail = '~' if self.tick else '^'
self.stream.write('{0}|{1}{2} '.format(tail, padding, self.face()))
self.stream.writeln()
self.stream.write(color)
padding = ' ' if self.tick else ' '
self.stream.writeln('{0}"" "" '.format(padding))
def face(self):
if self.errors:
return '( x .x)'
elif self.failures:
return '( o .o)'
else:
return '( ^ .^)'
| mit | 8,010,086,037,771,533,000 | 30.151351 | 86 | 0.585459 | false |
alsgregory/quasi_geostrophic_model | demos/demo_two_level_variance_reduction.py | 1 | 2299 | """ sample variance decay of two level QG system """
from __future__ import division
from __future__ import absolute_import
from firedrake import *
from quasi_geostrophic_model import *
import numpy as np
import matplotlib.pyplot as plot
# define mesh hierarchy
mesh = UnitSquareMesh(5, 5)
L = 4
mesh_hierarchy = MeshHierarchy(mesh, L)
# define sample size
n = 10
# define variance
variance = 0.125
# define initial condition function
def ic(mesh, xp):
x = SpatialCoordinate(mesh)
ufl_expression = (exp(-(pow(x[0] - 0.5 + xp, 2) / (2 * pow(0.25, 2)) +
pow(x[1] - 0.7, 2) / (2 * pow(0.1, 2)))) -
exp(-(pow(x[0] - 0.5 + xp, 2) / (2 * pow(0.25, 2)) +
pow(x[1] - 0.3, 2) / (2 * pow(0.1, 2)))))
return ufl_expression
sample_variances_difference = np.zeros(L)
finest_fs = FunctionSpace(mesh_hierarchy[-1], 'CG', 1)
for l in range(L):
print 'level: ', l
meshc = mesh_hierarchy[l]
meshf = mesh_hierarchy[l + 1]
# define fs
dg_fs_c = FunctionSpace(meshc, 'DG', 1)
cg_fs_c = FunctionSpace(meshc, 'CG', 1)
dg_fs_f = FunctionSpace(meshf, 'DG', 1)
cg_fs_f = FunctionSpace(meshf, 'CG', 1)
m = Function(finest_fs)
sq = Function(finest_fs)
for j in range(n):
print 'sample: ', j
# set-up system
QG = two_level_quasi_geostrophic(dg_fs_c, cg_fs_c, dg_fs_f, cg_fs_f, variance)
# fixed ic
xp = 0
QG.initial_condition(ic(meshc, xp), ic(meshf, xp))
# time-step
QG.timestepper(3.0)
# prolong coarse and fine
comp_c = Function(finest_fs)
comp_f = Function(finest_fs)
prolong(QG.psi_[0], comp_c)
if l < L - 1:
prolong(QG.psi_[1], comp_f)
else:
comp_f.assign(QG.psi_[1])
m += assemble((comp_f - comp_c) * (1.0 / n))
sq += assemble(((comp_f - comp_c) ** 2) * (1.0 / n))
ff = Function(finest_fs).assign((sq - (m ** 2)))
sample_variances_difference[l] = assemble(ff * dx)
dxf = 1.0 / 2 ** (np.linspace(1, L, L))
plot.loglog(dxf, sample_variances_difference)
plot.loglog(dxf, 1e-9 * dxf ** (4), 'k--')
plot.xlabel('normalized dx of coarse level')
plot.ylabel('sample variance difference')
plot.show()
| mit | 1,268,599,031,481,539,800 | 22.701031 | 86 | 0.564158 | false |
alphagov/notifications-admin | app/navigation.py | 1 | 10999 | from itertools import chain
from flask import request
class Navigation:
mapping = {}
selected_class = "selected"
def __init__(self):
self.mapping = {
navigation: {
# if not specified, assume endpoints are all in the `main` blueprint.
self.get_endpoint_with_blueprint(endpoint) for endpoint in endpoints
} for navigation, endpoints in self.mapping.items()
}
@property
def endpoints_with_navigation(self):
return tuple(chain.from_iterable((
endpoints
for navigation_item, endpoints in self.mapping.items()
)))
def is_selected(self, navigation_item):
if request.endpoint in self.mapping[navigation_item]:
return " " + self.selected_class
return ''
@staticmethod
def get_endpoint_with_blueprint(endpoint):
return endpoint if '.' in endpoint else 'main.{}'.format(endpoint)
class HeaderNavigation(Navigation):
mapping = {
'support': {
'bat_phone',
'feedback',
'support',
'support_public',
'thanks',
'triage',
},
'features': {
'features',
'features_email',
'features_letters',
'features_sms',
'message_status',
'roadmap',
'security',
'terms',
'trial_mode_new',
'using_notify',
},
'pricing': {
'pricing',
'how_to_pay',
},
'documentation': {
'documentation',
'integration_testing',
},
'user-profile': {
'user_profile',
'user_profile_email',
'user_profile_email_authenticate',
'user_profile_email_confirm',
'user_profile_mobile_number',
'user_profile_mobile_number_authenticate',
'user_profile_mobile_number_confirm',
'user_profile_name',
'user_profile_password',
'user_profile_disable_platform_admin_view',
},
'platform-admin': {
'archive_user',
'clear_cache',
'create_email_branding',
'create_letter_branding',
'edit_sms_provider_ratio',
'email_branding',
'find_services_by_name',
'find_users_by_email',
'letter_branding',
'live_services',
'live_services_csv',
'notifications_sent_by_service',
'get_billing_report',
'organisations',
'platform_admin',
'platform_admin_list_complaints',
'platform_admin_reports',
'platform_admin_returned_letters',
'platform_admin_splash_page',
'suspend_service',
'trial_services',
'update_email_branding',
'update_letter_branding',
'user_information',
'view_provider',
'view_providers',
},
'sign-in': {
'revalidate_email_sent',
'sign_in',
'two_factor_sms',
'two_factor_email',
'two_factor_email_sent',
'two_factor_email_interstitial',
'two_factor_webauthn',
'verify',
'verify_email',
},
}
# header HTML now comes from GOVUK Frontend so requires a boolean, not an attribute
def is_selected(self, navigation_item):
return request.endpoint in self.mapping[navigation_item]
class MainNavigation(Navigation):
mapping = {
'dashboard': {
'broadcast_tour',
'conversation',
'inbox',
'monthly',
'returned_letter_summary',
'returned_letters',
'service_dashboard',
'template_usage',
'view_notification',
'view_notifications',
},
'current-broadcasts': {
'broadcast_dashboard',
'broadcast_dashboard_updates',
'view_current_broadcast',
'new_broadcast',
'write_new_broadcast',
},
'previous-broadcasts': {
'broadcast_dashboard_previous',
'view_previous_broadcast',
},
'rejected-broadcasts': {
'broadcast_dashboard_rejected',
'view_rejected_broadcast',
},
'templates': {
'action_blocked',
'add_service_template',
'check_messages',
'check_notification',
'choose_from_contact_list',
'choose_template',
'choose_template_to_copy',
'confirm_redact_template',
'conversation_reply',
'copy_template',
'delete_service_template',
'edit_service_template',
'edit_template_postage',
'manage_template_folder',
'send_messages',
'send_one_off',
'send_one_off_letter_address',
'send_one_off_step',
'send_one_off_to_myself',
'no_cookie.send_test_preview',
'set_sender',
'set_template_sender',
'view_template',
'view_template_version',
'view_template_versions',
'broadcast',
'preview_broadcast_areas',
'choose_broadcast_library',
'choose_broadcast_area',
'choose_broadcast_sub_area',
'remove_broadcast_area',
'preview_broadcast_message',
'approve_broadcast_message',
'reject_broadcast_message',
'cancel_broadcast_message',
},
'uploads': {
'upload_contact_list',
'check_contact_list',
'save_contact_list',
'contact_list',
'delete_contact_list',
'upload_letter',
'uploaded_letter_preview',
'uploaded_letters',
'uploads',
'view_job',
'view_jobs',
},
'team-members': {
'confirm_edit_user_email',
'confirm_edit_user_mobile_number',
'edit_user_email',
'edit_user_mobile_number',
'edit_user_permissions',
'invite_user',
'manage_users',
'remove_user_from_service',
},
'usage': {
'usage',
},
'settings': {
'add_organisation_from_gp_service',
'add_organisation_from_nhs_local_service',
'branding_request',
'estimate_usage',
'link_service_to_organisation',
'request_to_go_live',
'service_add_email_reply_to',
'service_add_letter_contact',
'service_add_sms_sender',
'service_agreement',
'service_accept_agreement',
'service_confirm_agreement',
'service_confirm_delete_email_reply_to',
'service_confirm_delete_letter_contact',
'service_confirm_delete_sms_sender',
'service_edit_email_reply_to',
'service_edit_letter_contact',
'service_edit_sms_sender',
'service_email_reply_to',
'service_letter_contact_details',
'service_make_blank_default_letter_contact',
'service_name_change',
'service_name_change_confirm',
'service_preview_email_branding',
'service_preview_letter_branding',
'service_set_auth_type',
'service_set_channel',
'send_files_by_email_contact_details',
'service_confirm_broadcast_account_type',
'service_set_broadcast_channel',
'service_set_broadcast_network',
'service_set_email_branding',
'service_set_inbound_number',
'service_set_inbound_sms',
'service_set_international_letters',
'service_set_international_sms',
'service_set_letters',
'service_set_reply_to_email',
'service_set_sms_prefix',
'service_verify_reply_to_address',
'service_verify_reply_to_address_updates',
'service_settings',
'service_sms_senders',
'set_free_sms_allowance',
'set_message_limit',
'set_rate_limit',
'service_set_letter_branding',
'submit_request_to_go_live',
},
'api-integration': {
'api_callbacks',
'api_documentation',
'api_integration',
'api_keys',
'create_api_key',
'delivery_status_callback',
'received_text_messages_callback',
'revoke_api_key',
'guest_list',
'old_guest_list',
},
}
class CaseworkNavigation(Navigation):
mapping = {
'dashboard': {
'broadcast_tour',
'broadcast_dashboard',
'broadcast_dashboard_previous',
'broadcast_dashboard_updates',
},
'send-one-off': {
'choose_from_contact_list',
'choose_template',
'send_one_off',
'send_one_off_letter_address',
'send_one_off_step',
'send_one_off_to_myself',
},
'sent-messages': {
'view_notifications',
'view_notification',
},
'uploads': {
'view_jobs',
'view_job',
'upload_contact_list',
'check_contact_list',
'save_contact_list',
'contact_list',
'delete_contact_list',
'upload_letter',
'uploaded_letter_preview',
'uploaded_letters',
'uploads',
},
}
class OrgNavigation(Navigation):
mapping = {
'dashboard': {
'organisation_dashboard',
},
'settings': {
'confirm_edit_organisation_name',
'edit_organisation_agreement',
'edit_organisation_billing_details',
'edit_organisation_crown_status',
'edit_organisation_domains',
'edit_organisation_email_branding',
'edit_organisation_letter_branding',
'edit_organisation_domains',
'edit_organisation_go_live_notes',
'edit_organisation_name',
'edit_organisation_notes',
'edit_organisation_type',
'organisation_preview_email_branding',
'organisation_preview_letter_branding',
'organisation_settings',
},
'team-members': {
'edit_user_org_permissions',
'invite_org_user',
'manage_org_users',
'remove_user_from_organisation',
},
'trial-services': {
'organisation_trial_mode_services',
}
}
| mit | -5,444,905,363,476,817,000 | 30.15864 | 87 | 0.500864 | false |
adowaconan/Spindle_by_Graphical_Features | duplicate/Generate_Features (adowaconan).py | 1 | 5970 | # -*- coding: utf-8 -*-
"""
Created on Wed May 17 12:35:28 2017
@author: ning
"""
#import mne
import numpy as np
import pandas as pd
import os
from time import time
#import networkx as nx
from collections import Counter
os.chdir('D:\\NING - spindle\\Spindle_by_Graphical_Features')
channelList = ['F3','F4','C3','C4','O1','O2']
import eegPipelineFunctions
raw_dir = 'D:\\NING - spindle\\training set\\'
# get EEG files that have corresponding annotations
raw_files = []
for file in [f for f in os.listdir(raw_dir) if ('txt' in f)]:
sub = int(file.split('_')[0][3:])
if sub < 11:
day = file.split('_')[1][1]
day_for_load = file.split('_')[1][:2]
else:
day = file.split('_')[2][-1]
day_for_load = file.split('_')[2]
raw_file = [f for f in os.listdir(raw_dir) if (file.split('_')[0] in f) and (day_for_load in f) and ('fif' in f)]
if len(raw_file) != 0:
raw_files.append([raw_dir + raw_file[0],raw_dir + file])
# directory for storing all the feature files
raw_dir = 'D:\\NING - spindle\\training set\\road_trip\\'
if not os.path.exists(raw_dir):
os.makedirs(raw_dir)
# initialize the range of the parameters we want to compute based on
epoch_lengths = np.arange(1.,5.,0.2) # 1. to 5 seconds with 0.5 stepsize
plv_thresholds = np.arange(0.6, 0.85, 0.05) # 0.6 to 0.8 with .05
pli_thresholds = np.arange(0.05,0.30, 0.05) # 0.05 to 0.25 with 0.05
cc_thresholds = np.arange(0.7, 0.95,0.05) # 0.7 to 0.9 with 0.05
# make sub-directories based on epoch length
first_level_directory = []
for epoch_length in epoch_lengths:
directory_1 = raw_dir + 'epoch_length '+str(epoch_length)+'\\'
if not os.path.exists(directory_1):
os.makedirs(directory_1)
first_level_directory.append(directory_1)
os.chdir(directory_1)
#print(os.getcwd())
for files in raw_files:
raw_file, annotation_file = files
temp_anno = annotation_file.split('\\')[-1]
sub = int(temp_anno.split('_')[0][3:])
if sub < 11:
day = temp_anno.split('_')[1][1]
day_for_load = temp_anno.split('_')[1][:2]
else:
day = temp_anno.split('_')[2][-1]
day_for_load = temp_anno.split('_')[2]
directory_2 = directory_1 + 'sub' + str(sub) + 'day' + day + '\\'
if not os.path.exists(directory_2):
#print(directory_2)
os.makedirs(directory_2)
os.chdir(directory_2)
# epoch the data
epochs,label,_ = eegPipelineFunctions.get_data_ready(raw_file,channelList,
annotation_file,
epoch_length=epoch_length)
print(Counter(label))
# extract signal features
ssssss = time()
print('extracting signal features ......')
epochFeature = eegPipelineFunctions.featureExtraction(epochs,)
epochFeature = pd.DataFrame(epochFeature)
epochFeature['label']=label
epochFeature.to_csv('sub'+str(sub)+'day'+day+'_'+str(epoch_length)+'_'+'epoch_features.csv',index=False)
# compute adjasency matrices based on epochs
connectivity = eegPipelineFunctions.connectivity(epochs)
connectivity = np.array(connectivity)
plv, pli, cc = connectivity[0,:,:,:],connectivity[1,:,:,:],connectivity[2,:,:,:]
# pre-thresholding graph features
print('extracting graph features of plv ........')
plv_pre_threshold = eegPipelineFunctions.extractGraphFeatures(plv)
plv_pre_threshold['label']=label
print('extracting graph features of pli ........')
pli_pre_threshold = eegPipelineFunctions.extractGraphFeatures(pli)
pli_pre_threshold['label']=label
print('extracting graph features of cc .........')
cc_pre_threshold = eegPipelineFunctions.extractGraphFeatures(cc )
cc_pre_threshold['label']=label
plv_pre_threshold.to_csv('sub'+str(sub)+'day'+day+'plv_features.csv',index=False)
pli_pre_threshold.to_csv('sub'+str(sub)+'day'+day+'pli_features.csv',index=False)
cc_pre_threshold.to_csv('sub'+str(sub)+'day'+day+'cc_features.csv',index=False)
eeeeee = time()
print('done signal, plv, pli, and cc, cost time: %d s'%(eeeeee - ssssss))
# print('start thresholding')
# # extract graph features
# for t_plv,t_pli,t_cc in zip(plv_thresholds,pli_thresholds,cc_thresholds):
# # convert adjasency matrices to binary adjasency matrices
# adj_plv = eegPipelineFunctions.thresholding(t_plv,plv)
# adj_pli = eegPipelineFunctions.thresholding(t_pli,pli)
# adj_cc = eegPipelineFunctions.thresholding(t_cc, cc )
# # this is how we extract graph features
# graphFeature_plv = eegPipelineFunctions.extractGraphFeatures(adj_plv)
# graphFeature_pli = eegPipelineFunctions.extractGraphFeatures(adj_pli)
# graphFeature_cc = eegPipelineFunctions.extractGraphFeatures(adj_cc )
# # prepare the sub-directories for storing feature files
# plv_dir = directory_2 + 'plv_' + str(t_plv) + '\\'
# pli_dir = directory_2 + 'pli_' + str(t_pli) + '\\'
# cc_dir = directory_2 + 'cc_' + str(t_cc ) + '\\'
# if not os.path.exists(plv_dir):
# os.makedirs(plv_dir)
# if not os.path.exists(pli_dir):
# os.makedirs(pli_dir)
# if not os.path.exists(cc_dir):
# os.makedirs(cc_dir)
# # saving csvs
# pd.concat([epochFeature,graphFeature_plv],axis=1).to_csv(plv_dir + 'plv_' + str(t_plv) + '.csv',index=False)
# pd.concat([epochFeature,graphFeature_pli],axis=1).to_csv(pli_dir + 'pli_' + str(t_pli) + '.csv',index=False)
# pd.concat([epochFeature,graphFeature_cc ],axis=1).to_csv(cc_dir + 'cc_' + str(t_cc ) + '.csv',index=False)
| mit | 4,144,034,402,099,314,700 | 46.388889 | 121 | 0.597655 | false |
zeekay/elemental | elemental/js.py | 1 | 3175 | from sys import modules
from core import Element as Element
class js(Element):
tag = 'js'
def __init__(self, script='', url=''):
if script:
self.format = '<script type="text/javascript">%s</script>' % script
elif url:
self.format = '<script type="text/javascript" src="%s"></script>' % url
super(js, self).__init__()
def render_this(self):
return self.format
class js_lib(Element):
tag = 'js_lib'
url = '/js/{version}/app.js'
version = '0.1'
def __init__(self, url='', version=''):
if url:
self.url = url
if version:
self.version = version
super(js_lib, self).__init__()
@property
def format(self):
return ''.join(['<script src="', self.url, '"></script>'])
def render_this(self):
return self.format.format(version=self.version)
class jquery(js_lib):
tag = 'jquery'
url = '//ajax.googleapis.com/ajax/libs/jquery/{version}/jquery.min.js'
version = '1.6.2'
_cdnjs = [x.split() for x in """
xuijs 2.0.0 xui.min.js
css3finalize 1.43 jquery.css3finalize.min.js
processing.js 1.2.1 processing-api.min.js
prototype 1.7.0.0 prototype.js
camanjs 2.2 caman.full.min.js
noisy 1.0 jquery.noisy.min.js
modernizr 2.0.6 modernizr.min.js
string_score 0.1.10 string_score.min.js
mustache.js 0.3.0 mustache.min.js
dojo 1.6.0 dojo.xd.js
ext-core 3.1.0 ext-core.js
sizzle 1.4.4 sizzle.min.js
graphael 0.4.1 g.raphael-min.js
ocanvas 1.0 ocanvas.min.js
jqueryui 1.8.13 jquery-ui.min.js
spinejs 0.0.4 spine.min.js
galleria 1.2.3 galleria.min.js
less.js 1.1.3 less-1.1.3.min.js
underscore.js 1.1.7 underscore-min.js
highcharts 2.1.6 highcharts.js
flexie 1.0.0 flexie.min.js
waypoints 1.1 waypoints.min.js
yepnope 1.0.1 yepnope.min.js
mootools 1.3.2 mootools-yui-compressed.js
script.js 1.3 script.min.js
handlebars.js 1.0.0.beta2 handlebars.min.js
json2 20110223 json2.js
cufon 1.09i cufon-yui.js
zepto 0.6 zepto.min.js
chrome-frame 1.0.2 CFInstall.min.js
selectivizr 1.0.2 selectivizr-min.js
sammy.js 0.6.3 sammy.min.js
es5-shim 1.2.4 es5-shim.min.js
js-signals 0.6.1 js-signals.min.js
raphael 1.5.2 raphael-min.js
yui 3.3.0 yui-min.js
underscore.string 1.1.4 underscore.string.min.js
labjs 2.0 LAB.min.js
pubnub 3.1.2 pubnub.min.js
backbone.js 0.5.1 backbone-min.js
twitterlib.js 0.9.0 twitterlib.min.js
scriptaculous 1.8.3 scriptaculous.js
headjs 0.96 head.min.js
webfont 1.0.19 webfont.js
require.js 0.24.0 require.min.js
socket.io 0.7.0 socket.io.min.js
knockout 1.2.1 knockout-min.js
""".splitlines() if x]
_cdnjs_url = '//cdnjs.cloudflare.com/ajax/libs/%s/{version}/%s'
for _name, _version, _filename in _cdnjs:
_tag = _name.replace('.','')
_dict = {'tag': _tag,
'url': _cdnjs_url % (_name, _filename),
'version': _version}
setattr(modules[__name__], _tag, type(_tag, (js_lib,), _dict))
def _get_latest_cdnjs():
import requests
import json
data = requests.get('http://www.cdnjs.com/packages.json').read()
packages = json.loads(data)['packages']
for n, v, f in [(x['name'], x['version'], x['filename']) for x in packages if x]:
print n, v, f
| mit | -3,511,169,699,096,084,000 | 28.95283 | 85 | 0.654488 | false |
visipedia/tf_classification | preprocessing/inputs.py | 1 | 29376 | # Some of this code came from the https://github.com/tensorflow/models/tree/master/slim
# directory, so lets keep the Google license around for now.
#
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from easydict import EasyDict
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from preprocessing.decode_example import decode_serialized_example
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return tf.tuple([cropped_image, distort_bbox])
def _largest_size_at_most(height, width, largest_side):
"""Computes new shape with the largest side equal to `largest_side`.
Computes new shape with the largest side equal to `largest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
largest_side: A python integer or scalar `Tensor` indicating the size of
the largest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
largest_side = tf.convert_to_tensor(largest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
largest_side = tf.to_float(largest_side)
scale = tf.cond(tf.greater(height, width),
lambda: largest_side / height,
lambda: largest_side / width)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
return new_height, new_width
class DistortedInputs():
def __init__(self, cfg, add_summaries):
self.cfg = cfg
self.add_summaries = add_summaries
def apply(self, original_image, bboxes, distorted_inputs, image_summaries, current_index):
cfg = self.cfg
add_summaries = self.add_summaries
image_shape = tf.shape(original_image)
image_height = tf.cast(image_shape[0], dtype=tf.float32) # cast so that we can multiply them by the bbox coords
image_width = tf.cast(image_shape[1], dtype=tf.float32)
# First thing we need to do is crop out the bbox region from the image
bbox = bboxes[current_index]
xmin = tf.cast(bbox[0] * image_width, tf.int32)
ymin = tf.cast(bbox[1] * image_height, tf.int32)
xmax = tf.cast(bbox[2] * image_width, tf.int32)
ymax = tf.cast(bbox[3] * image_height, tf.int32)
bbox_width = xmax - xmin
bbox_height = ymax - ymin
image = tf.image.crop_to_bounding_box(
image=original_image,
offset_height=ymin,
offset_width=xmin,
target_height=bbox_height,
target_width=bbox_width
)
image_height = bbox_height
image_width = bbox_width
# Convert the pixel values to be in the range [0,1]
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Add a summary of the original data
if add_summaries:
new_height, new_width = _largest_size_at_most(image_height, image_width, cfg.INPUT_SIZE)
resized_original_image = tf.image.resize_bilinear(tf.expand_dims(image, 0), [new_height, new_width])
resized_original_image = tf.squeeze(resized_original_image)
resized_original_image = tf.image.pad_to_bounding_box(resized_original_image, 0, 0, cfg.INPUT_SIZE, cfg.INPUT_SIZE)
# If there are multiple boxes for an image, we only want to write to the TensorArray once.
#image_summaries = image_summaries.write(0, tf.expand_dims(resized_original_image, 0))
image_summaries = tf.cond(tf.equal(current_index, 0),
lambda: image_summaries.write(0, tf.expand_dims(resized_original_image, 0)),
lambda: image_summaries.identity()
)
# Extract a distorted bbox
if cfg.DO_RANDOM_CROP > 0:
r = tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32)
do_crop = tf.less(r, cfg.DO_RANDOM_CROP)
rc_cfg = cfg.RANDOM_CROP_CFG
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
distorted_image, distorted_bbox = tf.cond(do_crop,
lambda: distorted_bounding_box_crop(image, bbox,
aspect_ratio_range=(rc_cfg.MIN_ASPECT_RATIO, rc_cfg.MAX_ASPECT_RATIO),
area_range=(rc_cfg.MIN_AREA, rc_cfg.MAX_AREA),
max_attempts=rc_cfg.MAX_ATTEMPTS),
lambda: tf.tuple([image, bbox])
)
else:
distorted_image = tf.identity(image)
distorted_bbox = tf.constant([[[0.0, 0.0, 1.0, 1.0]]]) # ymin, xmin, ymax, xmax
if cfg.DO_CENTRAL_CROP > 0:
r = tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32)
do_crop = tf.less(r, cfg.DO_CENTRAL_CROP)
distorted_image = tf.cond(do_crop,
lambda: tf.image.central_crop(distorted_image, cfg.CENTRAL_CROP_FRACTION),
lambda: tf.identity(distorted_image)
)
distorted_image.set_shape([None, None, 3])
# Add a summary
if add_summaries:
image_with_bbox = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), distorted_bbox)
new_height, new_width = _largest_size_at_most(image_height, image_width, cfg.INPUT_SIZE)
resized_image_with_bbox = tf.image.resize_bilinear(image_with_bbox, [new_height, new_width])
resized_image_with_bbox = tf.squeeze(resized_image_with_bbox)
resized_image_with_bbox = tf.image.pad_to_bounding_box(resized_image_with_bbox, 0, 0, cfg.INPUT_SIZE, cfg.INPUT_SIZE)
#image_summaries = image_summaries.write(1, tf.expand_dims(resized_image_with_bbox, 0))
image_summaries = tf.cond(tf.equal(current_index, 0),
lambda: image_summaries.write(1, tf.expand_dims(resized_image_with_bbox, 0)),
lambda: image_summaries.identity()
)
# Resize the distorted image to the correct dimensions for the network
if cfg.MAINTAIN_ASPECT_RATIO:
shape = tf.shape(distorted_image)
height = shape[0]
width = shape[1]
new_height, new_width = _largest_size_at_most(height, width, cfg.INPUT_SIZE)
else:
new_height = cfg.INPUT_SIZE
new_width = cfg.INPUT_SIZE
num_resize_cases = 1 if cfg.RESIZE_FAST else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [new_height, new_width], method=method),
num_cases=num_resize_cases)
distorted_image = tf.image.pad_to_bounding_box(distorted_image, 0, 0, cfg.INPUT_SIZE, cfg.INPUT_SIZE)
if add_summaries:
#image_summaries = image_summaries.write(2, tf.expand_dims(distorted_image, 0))
image_summaries = tf.cond(tf.equal(current_index, 0),
lambda: image_summaries.write(2, tf.expand_dims(distorted_image, 0)),
lambda: image_summaries.identity()
)
# Randomly flip the image:
if cfg.DO_RANDOM_FLIP_LEFT_RIGHT > 0:
r = tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32)
do_flip = tf.less(r, 0.5)
distorted_image = tf.cond(do_flip, lambda: tf.image.flip_left_right(distorted_image), lambda: tf.identity(distorted_image))
# TODO: Can this be changed so that we don't always distort the colors?
# Distort the colors
if cfg.DO_COLOR_DISTORTION > 0:
r = tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32)
do_color_distortion = tf.less(r, cfg.DO_COLOR_DISTORTION)
num_color_cases = 1 if cfg.COLOR_DISTORT_FAST else 4
distorted_color_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode=cfg.COLOR_DISTORT_FAST),
num_cases=num_color_cases)
distorted_image = tf.cond(do_color_distortion, lambda: tf.identity(distorted_color_image), lambda: tf.identity(distorted_image))
distorted_image.set_shape([cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
# Add a summary
if add_summaries:
#image_summaries = image_summaries.write(3, tf.expand_dims(distorted_image, 0))
image_summaries = tf.cond(tf.equal(current_index, 0),
lambda: image_summaries.write(3, tf.expand_dims(distorted_image, 0)),
lambda: image_summaries.identity()
)
# Add the distorted image to the TensorArray
distorted_inputs = distorted_inputs.write(current_index, tf.expand_dims(distorted_image, 0))
return [original_image, bboxes, distorted_inputs, image_summaries, current_index + 1]
def check_normalized_box_values(xmin, ymin, xmax, ymax, maximum_normalized_coordinate=1.01, prefix=""):
""" Make sure the normalized coordinates are less than 1
"""
xmin_maximum = tf.reduce_max(xmin)
xmin_assert = tf.Assert(
tf.greater_equal(1.01, xmin_maximum),
['%s, maximum xmin coordinate value is larger '
'than %f: ' % (prefix, maximum_normalized_coordinate), xmin_maximum])
with tf.control_dependencies([xmin_assert]):
xmin = tf.identity(xmin)
ymin_maximum = tf.reduce_max(ymin)
ymin_assert = tf.Assert(
tf.greater_equal(1.01, ymin_maximum),
['%s, maximum ymin coordinate value is larger '
'than %f: ' % (prefix, maximum_normalized_coordinate), ymin_maximum])
with tf.control_dependencies([ymin_assert]):
ymin = tf.identity(ymin)
xmax_maximum = tf.reduce_max(xmax)
xmax_assert = tf.Assert(
tf.greater_equal(1.01, xmax_maximum),
['%s, maximum xmax coordinate value is larger '
'than %f: ' % (prefix, maximum_normalized_coordinate), xmax_maximum])
with tf.control_dependencies([xmax_assert]):
xmax = tf.identity(xmax)
ymax_maximum = tf.reduce_max(ymax)
ymax_assert = tf.Assert(
tf.greater_equal(1.01, ymax_maximum),
['%s, maximum ymax coordinate value is larger '
'than %f: ' % (prefix, maximum_normalized_coordinate), ymax_maximum])
with tf.control_dependencies([ymax_assert]):
ymax = tf.identity(ymax)
return xmin, ymin, xmax, ymax
def expand_bboxes(xmin, xmax, ymin, ymax, cfg):
"""
Expand the bboxes.
"""
w = xmax - xmin
h = ymax - ymin
w = w * cfg.WIDTH_EXPANSION_FACTOR
h = h * cfg.HEIGHT_EXPANSION_FACTOR
half_w = w / 2.
half_h = h / 2.
xmin = tf.clip_by_value(xmin - half_w, 0, 1)
xmax = tf.clip_by_value(xmax + half_w, 0, 1)
ymin = tf.clip_by_value(ymin - half_h, 0, 1)
ymax = tf.clip_by_value(ymax + half_h, 0, 1)
return tf.tuple([xmin, xmax, ymin, ymax])
def get_region_data(serialized_example, cfg, fetch_ids=True, fetch_labels=True, fetch_text_labels=True, read_filename=False):
"""
Return the image, an array of bounding boxes, and an array of ids.
"""
feature_dict = {}
if cfg.REGION_TYPE == 'bbox':
bbox_cfg = cfg.BBOX_CFG
features_to_extract = [('image/object/bbox/xmin', 'xmin'),
('image/object/bbox/xmax', 'xmax'),
('image/object/bbox/ymin', 'ymin'),
('image/object/bbox/ymax', 'ymax'),
('image/object/bbox/ymax', 'ymax')]
if read_filename:
features_to_extract.append(('image/filename', 'filename'))
else:
features_to_extract.append(('image/encoded', 'image'))
if fetch_ids:
features_to_extract.append(('image/object/id', 'id'))
if fetch_labels:
features_to_extract.append(('image/object/bbox/label', 'label'))
if fetch_text_labels:
features_to_extract.append(('image/object/bbox/text', 'text'))
features = decode_serialized_example(serialized_example, features_to_extract)
if read_filename:
image_buffer = tf.read_file(features['filename'])
image = tf.image.decode_jpeg(image_buffer, channels=3)
else:
image = features['image']
feature_dict['image'] = image
xmin = tf.expand_dims(features['xmin'], 0)
ymin = tf.expand_dims(features['ymin'], 0)
xmax = tf.expand_dims(features['xmax'], 0)
ymax = tf.expand_dims(features['ymax'], 0)
xmin, ymin, xmax, ymax = check_normalized_box_values(xmin, ymin, xmax, ymax, prefix="From tfrecords ")
if 'DO_EXPANSION' in bbox_cfg and bbox_cfg.DO_EXPANSION > 0:
r = tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32)
do_expansion = tf.less(r, bbox_cfg.DO_EXPANSION)
xmin, xmax, ymin, ymax = tf.cond(do_expansion,
lambda: expand_bboxes(xmin, xmax, ymin, ymax, bbox_cfg.EXPANSION_CFG),
lambda: tf.tuple([xmin, xmax, ymin, ymax])
)
xmin, ymin, xmax, ymax = check_normalized_box_values(xmin, ymin, xmax, ymax, prefix="After expansion ")
# combine the bounding boxes
bboxes = tf.concat(values=[xmin, ymin, xmax, ymax], axis=0)
# order the bboxes so that they have the shape: [num_bboxes, bbox_coords]
bboxes = tf.transpose(bboxes, [1, 0])
feature_dict['bboxes'] = bboxes
if fetch_ids:
ids = features['id']
feature_dict['ids'] = ids
if fetch_labels:
labels = features['label']
feature_dict['labels'] = labels
if fetch_text_labels:
text = features['text']
feature_dict['text'] = text
elif cfg.REGION_TYPE == 'image':
features_to_extract = []
if read_filename:
features_to_extract.append(('image/filename', 'filename'))
else:
features_to_extract.append(('image/encoded', 'image'))
if fetch_ids:
features_to_extract.append(('image/id', 'id'))
if fetch_labels:
features_to_extract.append(('image/class/label', 'label'))
if fetch_text_labels:
features_to_extract.append(('image/class/text', 'text'))
features = decode_serialized_example(serialized_example, features_to_extract)
if read_filename:
image_buffer = tf.read_file(features['filename'])
image = tf.image.decode_jpeg(image_buffer, channels=3)
else:
image = features['image']
feature_dict['image'] = image
bboxes = tf.constant([[0.0, 0.0, 1.0, 1.0]])
feature_dict['bboxes'] = bboxes
if fetch_ids:
ids = [features['id']]
feature_dict['ids'] = ids
if fetch_labels:
labels = [features['label']]
feature_dict['labels'] = labels
if fetch_text_labels:
text = [features['text']]
feature_dict['text'] = text
else:
raise ValueError("Unknown REGION_TYPE: %s" % (cfg.REGION_TYPE,))
return feature_dict
def bbox_crop_loop_cond(original_image, bboxes, distorted_inputs, image_summaries, current_index):
num_bboxes = tf.shape(bboxes)[0]
return current_index < num_bboxes
def get_distorted_inputs(original_image, bboxes, cfg, add_summaries):
distorter = DistortedInputs(cfg, add_summaries)
num_bboxes = tf.shape(bboxes)[0]
distorted_inputs = tf.TensorArray(
dtype=tf.float32,
size=num_bboxes,
element_shape=tf.TensorShape([1, cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
)
if add_summaries:
image_summaries = tf.TensorArray(
dtype=tf.float32,
size=4,
element_shape=tf.TensorShape([1, cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
)
else:
image_summaries = tf.constant([])
current_index = tf.constant(0, dtype=tf.int32)
loop_vars = [original_image, bboxes, distorted_inputs, image_summaries, current_index]
original_image, bboxes, distorted_inputs, image_summaries, current_index = tf.while_loop(
cond=bbox_crop_loop_cond,
body=distorter.apply,
loop_vars=loop_vars,
parallel_iterations=10, back_prop=False, swap_memory=False
)
distorted_inputs = distorted_inputs.concat()
if add_summaries:
tf.summary.image('0.original_image', image_summaries.read(0))
tf.summary.image('1.image_with_random_crop', image_summaries.read(1))
tf.summary.image('2.cropped_resized_image', image_summaries.read(2))
tf.summary.image('3.final_distorted_image', image_summaries.read(3))
return distorted_inputs
def create_training_batch(serialized_example, cfg, add_summaries, read_filenames=False):
features = get_region_data(serialized_example, cfg, fetch_ids=False,
fetch_labels=True, fetch_text_labels=False, read_filename=read_filenames)
original_image = features['image']
bboxes = features['bboxes']
labels = features['labels']
distorted_inputs = get_distorted_inputs(original_image, bboxes, cfg, add_summaries)
distorted_inputs = tf.subtract(distorted_inputs, 0.5)
distorted_inputs = tf.multiply(distorted_inputs, 2.0)
names = ('inputs', 'labels')
tensors = [distorted_inputs, labels]
return [names, tensors]
def create_visualization_batch(serialized_example, cfg, add_summaries, fetch_text_labels=False, read_filenames=False):
features = get_region_data(serialized_example, cfg, fetch_ids=True,
fetch_labels=True, fetch_text_labels=fetch_text_labels, read_filename=read_filenames)
original_image = features['image']
ids = features['ids']
bboxes = features['bboxes']
labels = features['labels']
if fetch_text_labels:
text_labels = features['text']
cpy_original_image = tf.identity(original_image)
distorted_inputs = get_distorted_inputs(original_image, bboxes, cfg, add_summaries)
original_image = cpy_original_image
# Resize the original image
if original_image.dtype != tf.float32:
original_image = tf.image.convert_image_dtype(original_image, dtype=tf.float32)
shape = tf.shape(original_image)
height = shape[0]
width = shape[1]
new_height, new_width = _largest_size_at_most(height, width, cfg.INPUT_SIZE)
original_image = tf.image.resize_images(original_image, [new_height, new_width], method=0)
original_image = tf.image.pad_to_bounding_box(original_image, 0, 0, cfg.INPUT_SIZE, cfg.INPUT_SIZE)
original_image = tf.image.convert_image_dtype(original_image, dtype=tf.uint8)
# make a copy of the original image for each bounding box
num_bboxes = tf.shape(bboxes)[0]
expanded_original_image = tf.expand_dims(original_image, 0)
concatenated_original_images = tf.tile(expanded_original_image, [num_bboxes, 1, 1, 1])
names = ['original_inputs', 'inputs', 'ids', 'labels']
tensors = [concatenated_original_images, distorted_inputs, ids, labels]
if fetch_text_labels:
names.append('text_labels')
tensors.append(text_labels)
return [names, tensors]
def create_classification_batch(serialized_example, cfg, add_summaries, read_filenames=False):
features = get_region_data(serialized_example, cfg, fetch_ids=True,
fetch_labels=False, fetch_text_labels=False, read_filename=read_filenames)
original_image = features['image']
bboxes = features['bboxes']
ids = features['ids']
distorted_inputs = get_distorted_inputs(original_image, bboxes, cfg, add_summaries)
distorted_inputs = tf.subtract(distorted_inputs, 0.5)
distorted_inputs = tf.multiply(distorted_inputs, 2.0)
names = ('inputs', 'ids')
tensors = [distorted_inputs, ids]
return [names, tensors]
def input_nodes(tfrecords, cfg, num_epochs=None, batch_size=32, num_threads=2,
shuffle_batch = True, random_seed=1, capacity = 1000, min_after_dequeue = 96,
add_summaries=True, input_type='train', fetch_text_labels=False,
read_filenames=False):
"""
Args:
tfrecords:
cfg:
num_epochs: number of times to read the tfrecords
batch_size:
num_threads:
shuffle_batch:
capacity:
min_after_dequeue:
add_summaries: Add tensorboard summaries of the images
input_type: 'train', 'visualize', 'test', 'classification'
"""
with tf.name_scope('inputs'):
# A producer to generate tfrecord file paths
filename_queue = tf.train.string_input_producer(
tfrecords,
num_epochs=num_epochs
)
# Construct a Reader to read examples from the tfrecords file
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
if input_type=='train' or input_type=='test':
batch_keys, data_to_batch = create_training_batch(serialized_example, cfg, add_summaries, read_filenames)
elif input_type=='visualize':
batch_keys, data_to_batch = create_visualization_batch(serialized_example, cfg, add_summaries, fetch_text_labels, read_filenames)
elif input_type=='classification':
batch_keys, data_to_batch = create_classification_batch(serialized_example, cfg, add_summaries, read_filenames)
else:
raise ValueError("Unknown input type: %s. Options are `train`, `test`, " \
"`visualize`, and `classification`." % (input_type,))
if shuffle_batch:
batch = tf.train.shuffle_batch(
data_to_batch,
batch_size=batch_size,
num_threads=num_threads,
capacity= capacity,
min_after_dequeue= min_after_dequeue,
seed = random_seed,
enqueue_many=True
)
else:
batch = tf.train.batch(
data_to_batch,
batch_size=batch_size,
num_threads=num_threads,
capacity= capacity,
enqueue_many=True
)
batch_dict = {k : v for k, v in zip(batch_keys, batch)}
return batch_dict | mit | 5,512,424,368,205,709,000 | 41.027182 | 141 | 0.625919 | false |
sunqm/pyscf | pyscf/lo/ibo.py | 1 | 16558 | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Paul J. Robinson <[email protected]>
# Qiming Sun <[email protected]>
#
'''
Intrinsic Bonding Orbitals
ref. JCTC, 9, 4834
Below here is work done by Paul Robinson.
much of the code below is adapted from code published freely on the website of Gerald Knizia
Ref: JCTC, 2013, 9, 4834-4843
'''
from functools import reduce
import numpy
from pyscf.lib import logger
from pyscf.lo import iao
from pyscf.lo import orth, pipek
from pyscf import __config__
MINAO = getattr(__config__, 'lo_iao_minao', 'minao')
def ibo(mol, orbocc, locmethod='IBO', iaos=None, s=None,
exponent=4, grad_tol=1e-8, max_iter=200, minao=MINAO, verbose=logger.NOTE):
'''Intrinsic Bonding Orbitals
This function serves as a wrapper to the underlying localization functions
ibo_loc and PipekMezey to create IBOs.
Args:
mol : the molecule or cell object
orbocc : occupied molecular orbital coefficients
Kwargs:
locmethod : string
the localization method 'PM' for Pipek Mezey localization or 'IBO' for the IBO localization
iaos : 2D array
the array of IAOs
s : 2D array
the overlap array in the ao basis
Returns:
IBOs in the basis defined in mol object.
'''
if s is None:
if getattr(mol, 'pbc_intor', None): # whether mol object is a cell
if isinstance(orbocc, numpy.ndarray) and orbocc.ndim == 2:
s = mol.pbc_intor('int1e_ovlp', hermi=1)
else:
raise NotImplementedError('k-points crystal orbitals')
else:
s = mol.intor_symmetric('int1e_ovlp')
if iaos is None:
iaos = iao.iao(mol, orbocc)
locmethod = locmethod.strip().upper()
if locmethod == 'PM':
EXPONENT = getattr(__config__, 'lo_ibo_PipekMezey_exponent', exponent)
ibos = PipekMezey(mol, orbocc, iaos, s, exponent=EXPONENT, minao=minao)
del(EXPONENT)
else:
ibos = ibo_loc(mol, orbocc, iaos, s, exponent=exponent,
grad_tol=grad_tol, max_iter=max_iter,
minao=minao, verbose=verbose)
return ibos
def ibo_loc(mol, orbocc, iaos, s, exponent, grad_tol, max_iter,
minao=MINAO, verbose=logger.NOTE):
'''Intrinsic Bonding Orbitals. [Ref. JCTC, 9, 4834]
This implementation follows Knizia's implementation execept that the
resultant IBOs are symmetrically orthogonalized. Note the IBOs of this
implementation do not strictly maximize the IAO Mulliken charges.
IBOs can also be generated by another implementation (see function
pyscf.lo.ibo.PM). In that function, PySCF builtin Pipek-Mezey localization
module was used to maximize the IAO Mulliken charges.
Args:
mol : the molecule or cell object
orbocc : 2D array or a list of 2D array
occupied molecular orbitals or crystal orbitals for each k-point
Kwargs:
iaos : 2D array
the array of IAOs
exponent : integer
Localization power in PM scheme
grad_tol : float
convergence tolerance for norm of gradients
Returns:
IBOs in the big basis (the basis defined in mol object).
'''
log = logger.new_logger(mol, verbose)
assert(exponent in (2, 4))
# Symmetrically orthogonalization of the IAO orbitals as Knizia's
# implementation. The IAO returned by iao.iao function is not orthogonal.
iaos = orth.vec_lowdin(iaos, s)
#static variables
StartTime = logger.perf_counter()
L = 0 # initialize a value of the localization function for safety
#max_iter = 20000 #for some reason the convergence of solid is slower
#fGradConv = 1e-10 #this ought to be pumped up to about 1e-8 but for testing purposes it's fine
swapGradTolerance = 1e-12
#dynamic variables
Converged = False
# render Atoms list without ghost atoms
iao_mol = iao.reference_mol(mol, minao=minao)
Atoms = [iao_mol.atom_pure_symbol(i) for i in range(iao_mol.natm)]
#generates the parameters we need about the atomic structure
nAtoms = len(Atoms)
AtomOffsets = MakeAtomIbOffsets(Atoms)[0]
iAtSl = [slice(AtomOffsets[A],AtomOffsets[A+1]) for A in range(nAtoms)]
#converts the occupied MOs to the IAO basis
CIb = reduce(numpy.dot, (iaos.T, s , orbocc))
numOccOrbitals = CIb.shape[1]
log.debug(" {0:^5s} {1:^14s} {2:^11s} {3:^8s}"
.format("ITER.","LOC(Orbital)","GRADIENT", "TIME"))
for it in range(max_iter):
fGrad = 0.00
#calculate L for convergence checking
L = 0.
for A in range(nAtoms):
for i in range(numOccOrbitals):
CAi = CIb[iAtSl[A],i]
L += numpy.dot(CAi,CAi)**exponent
# loop over the occupied orbitals pairs i,j
for i in range(numOccOrbitals):
for j in range(i):
# I eperimented with exponentially falling off random noise
Aij = 0.0 #numpy.random.random() * numpy.exp(-1*it)
Bij = 0.0 #numpy.random.random() * numpy.exp(-1*it)
for k in range(nAtoms):
CIbA = CIb[iAtSl[k],:]
Cii = numpy.dot(CIbA[:,i], CIbA[:,i])
Cij = numpy.dot(CIbA[:,i], CIbA[:,j])
Cjj = numpy.dot(CIbA[:,j], CIbA[:,j])
#now I calculate Aij and Bij for the gradient search
if exponent == 2:
Aij += 4.*Cij**2 - (Cii - Cjj)**2
Bij += 4.*Cij*(Cii - Cjj)
else:
Bij += 4.*Cij*(Cii**3-Cjj**3)
Aij += -Cii**4 - Cjj**4 + 6*(Cii**2 + Cjj**2)*Cij**2 + Cii**3 * Cjj + Cii*Cjj**3
if (Aij**2 + Bij**2 < swapGradTolerance) and False:
continue
#this saves us from replacing already fine orbitals
else:
#THE BELOW IS TAKEN DIRECLTY FROMG KNIZIA's FREE CODE
# Calculate 2x2 rotation angle phi.
# This correspond to [2] (12)-(15), re-arranged and simplified.
phi = .25*numpy.arctan2(Bij,-Aij)
fGrad += Bij**2
# ^- Bij is the actual gradient. Aij is effectively
# the second derivative at phi=0.
# 2x2 rotation form; that's what PM suggest. it works
# fine, but I don't like the asymmetry.
cs = numpy.cos(phi)
ss = numpy.sin(phi)
Ci = 1. * CIb[:,i]
Cj = 1. * CIb[:,j]
CIb[:,i] = cs * Ci + ss * Cj
CIb[:,j] = -ss * Ci + cs * Cj
fGrad = fGrad**.5
log.debug(" {0:5d} {1:12.8f} {2:11.2e} {3:8.2f}"
.format(it+1, L**(1./exponent), fGrad, logger.perf_counter()-StartTime))
if fGrad < grad_tol:
Converged = True
break
Note = "IB/P%i/2x2, %i iter; Final gradient %.2e" % (exponent, it+1, fGrad)
if not Converged:
log.note("\nWARNING: Iterative localization failed to converge!"
"\n %s", Note)
else:
log.note(" Iterative localization: %s", Note)
log.debug(" Localized orbitals deviation from orthogonality: %8.2e",
numpy.linalg.norm(numpy.dot(CIb.T, CIb) - numpy.eye(numOccOrbitals)))
# Note CIb is not unitary matrix (although very close to unitary matrix)
# because the projection <IAO|OccOrb> does not give unitary matrix.
return numpy.dot(iaos, (orth.vec_lowdin(CIb)))
def PipekMezey(mol, orbocc, iaos, s, exponent, minao=MINAO):
'''
Note this localization is slightly different to Knizia's implementation.
The localization here reserves orthogonormality during optimization.
Orbitals are projected to IAO basis first and the Mulliken pop is
calculated based on IAO basis (in function atomic_pops). A series of
unitary matrices are generated and applied on the input orbitals. The
intemdiate orbitals in the optimization and the finally localized orbitals
are all orthogonormal.
Examples:
>>> from pyscf import gto, scf
>>> from pyscf.lo import ibo
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1', >>> basis='unc-sto3g')
>>> mf = scf.RHF(mol).run()
>>> pm = ibo.PM(mol, mf.mo_coeff[:,mf.mo_occ>0])
>>> loc_orb = pm.kernel()
'''
# Note: PM with Lowdin-orth IAOs is implemented in pipek.PM class
# TODO: Merge the implemenation here to pipek.PM
cs = numpy.dot(iaos.T.conj(), s)
s_iao = numpy.dot(cs, iaos)
iao_inv = numpy.linalg.solve(s_iao, cs)
iao_mol = iao.reference_mol(mol, minao=minao)
# Define the mulliken population of each atom based on IAO basis.
# proj[i].trace is the mulliken population of atom i.
def atomic_pops(mol, mo_coeff, method=None):
nmo = mo_coeff.shape[1]
proj = numpy.empty((mol.natm,nmo,nmo))
orb_in_iao = reduce(numpy.dot, (iao_inv, mo_coeff))
for i, (b0, b1, p0, p1) in enumerate(iao_mol.offset_nr_by_atom()):
csc = reduce(numpy.dot, (orb_in_iao[p0:p1].T, s_iao[p0:p1],
orb_in_iao))
proj[i] = (csc + csc.T) * .5
return proj
pm = pipek.PM(mol, orbocc)
pm.atomic_pops = atomic_pops
pm.exponent = exponent
return pm
PM = Pipek = PipekMezey
def shell_str(l, n_cor, n_val):
'''
Help function to define core and valence shells for shell with different l
'''
cor_shell = [
"[{n}s]", "[{n}px] [{n}py] [{n}pz]",
"[{n}d0] [{n}d2-] [{n}d1+] [{n}d2+] [{n}d1-]",
"[{n}f1+] [{n}f1-] [{n}f0] [{n}f3+] [{n}f2-] [{n}f3-] [{n}f2+]"]
val_shell = [
l_str.replace('[', '').replace(']', '') for l_str in cor_shell]
l_str = ' '.join(
[cor_shell[l].format(n=i) for i in range(l + 1, l + 1 + n_cor)] +
[val_shell[l].format(n=i) for i in range(l + 1 + n_cor,
l + 1 + n_cor + n_val)])
return l_str
'''
These are parameters for selecting the valence space correctly.
The parameters are taken from in G. Knizia's free code
https://sites.psu.edu/knizia/software/
'''
def MakeAtomInfos():
nCoreX = {"H": 0, "He": 0}
for At in "Li Be B C O N F Ne".split(): nCoreX[At] = 1
for At in "Na Mg Al Si P S Cl Ar".split(): nCoreX[At] = 5
for At in "Na Mg Al Si P S Cl Ar".split(): nCoreX[At] = 5
for At in "K Ca".split(): nCoreX[At] = 18/2
for At in "Sc Ti V Cr Mn Fe Co Ni Cu Zn".split(): nCoreX[At] = 18/2
for At in "Ga Ge As Se Br Kr".split(): nCoreX[At] = 18/2+5 # [Ar] and the 5 d orbitals.
nAoX = {"H": 1, "He": 1}
for At in "Li Be".split(): nAoX[At] = 2
for At in "B C O N F Ne".split(): nAoX[At] = 5
for At in "Na Mg".split(): nAoX[At] = 3*1 + 1*3
for At in "Al Si P S Cl Ar".split(): nAoX[At] = 3*1 + 2*3
for At in "K Ca".split(): nAoX[At] = 18/2+1
for At in "Sc Ti V Cr Mn Fe Co Ni Cu Zn".split(): nAoX[At] = 18/2+1+5 # 4s, 3d
for At in "Ga Ge As Se Br Kr".split(): nAoX[At] = 18/2+1+5+3
AoLabels = {}
def SetAo(At, AoDecl):
Labels = AoDecl.split()
AoLabels[At] = Labels
assert(len(Labels) == nAoX[At])
nCore = len([o for o in Labels if o.startswith('[')])
assert(nCore == nCoreX[At])
# atomic orbitals in the MINAO basis: [xx] denotes core orbitals.
for At in "H He".split(): SetAo(At, "1s")
for At in "Li Be".split(): SetAo(At, "[1s] 2s")
for At in "B C O N F Ne".split(): SetAo(At, "[1s] 2s 2px 2py 2pz")
for At in "Na Mg".split(): SetAo(At, "[1s] [2s] 3s [2px] [2py] [2pz]")
for At in "Al Si P S Cl Ar".split(): SetAo(At, "[1s] [2s] 3s [2px] [2py] [2pz] 3px 3py 3pz")
for At in "K Ca".split(): SetAo(At, "[1s] [2s] [3s] 4s [2px] [2py] [2pz] [3px] [3py] [3pz]")
for At in "Sc Ti V Cr Mn Fe Co Ni Cu Zn".split(): SetAo(At, "[1s] [2s] [3s] 4s [2px] [2py] [2pz] [3px] [3py] [3pz] 3d0 3d2- 3d1+ 3d2+ 3d1-")
for At in "Ga Ge As Se Br Kr".split(): SetAo(At, "[1s] [2s] [3s] 4s [2px] [2py] [2pz] [3px] [3py] [3pz] 4px 4py 4pz [3d0] [3d2-] [3d1+] [3d2+] [3d1-]")
for At in "Rb Sr".split():
nCoreX[At] = 36/2
nAoX[At] = nCoreX[At] + 1
SetAo(At, ' '.join ([shell_str(0,4,1),
shell_str(1,3,0),
shell_str(2,1,0)]))
for At in "Y Zr Nb Mo Tc Ru Rh Pd Ag Cd".split():
nCoreX[At] = 36/2
nAoX[At] = nCoreX[At] + 1 + 5
SetAo(At, ' '.join ([shell_str(0,4,1),
shell_str(1,3,0),
shell_str(2,1,1)]))
for At in "In Sn Sb Te I Xe".split():
nCoreX[At] = 36/2 + 5
nAoX[At] = nCoreX[At] + 1 + 3
SetAo(At, ' '.join ([shell_str(0,4,1),
shell_str(1,3,1),
shell_str(2,2,0)]))
for At in "Cs Ba".split():
nCoreX[At] = 54/2
nAoX[At] = nCoreX[At] + 1
SetAo(At, ' '.join ([shell_str(0,5,1),
shell_str(1,4,0),
shell_str(2,2,0)]))
for At in "Ce Pr Nd Pm Sm Eu Gd Tb Dy Ho Er Tm Yb Lu".split():
nCoreX[At] = 54/2
nAoX[At] = nCoreX[At] + 1 + 5 + 7
SetAo(At, ' '.join ([shell_str(0,5,1),
shell_str(1,4,0),
shell_str(2,2,1),
shell_str(3,0,1)]))
for At in "La Hf Ta W Re Os Ir Pt Au Hg".split():
nCoreX[At] = 54/2 + 7
nAoX[At] = nCoreX[At] + 1 + 5
SetAo(At, ' '.join ([shell_str(0,5,1),
shell_str(1,4,0),
shell_str(2,2,1),
shell_str(3,1,0)]))
for At in "Tl Pb Bi Po At Rn".split():
nCoreX[At] = 54/2 + 7 + 5
nAoX[At] = nCoreX[At] + 1 + 3
SetAo(At, ' '.join ([shell_str(0,5,1),
shell_str(1,4,1),
shell_str(2,3,0),
shell_str(3,1,0)]))
for At in "Fr Ra".split():
nCoreX[At] = 86/2
nAoX[At] = nCoreX[At] + 1
SetAo(At, ' '.join ([shell_str(0,6,1),
shell_str(1,5,0),
shell_str(2,3,0),
shell_str(3,1,0)]))
for At in "Th Pa U Np Pu Am Cm Bk Cf Es Fm Md No".split():
nCoreX[At] = 86/2
nAoX[At] = nCoreX[At] + 1 + 5 + 7
SetAo(At, ' '.join ([shell_str(0,6,1),
shell_str(1,5,0),
shell_str(2,3,1),
shell_str(3,1,1)]))
for At in "Ac Lr Rf Db Sg Bh Hs Mt Ds Rg Cn".split():
nCoreX[At] = 86/2 + 7
nAoX[At] = nCoreX[At] + 1 + 5
SetAo(At, ' '.join ([shell_str(0,6,1),
shell_str(1,5,0),
shell_str(2,3,1),
shell_str(3,2,0)]))
for At in "Nh Fl Mc Lv Ts Og".split():
nCoreX[At] = 86/2 + 7 + 5
nAoX[At] = nCoreX[At] + 1 + 3
SetAo(At, ' '.join ([shell_str(0,6,1),
shell_str(1,5,1),
shell_str(2,4,0),
shell_str(3,2,0)]))
# note: f order is '4f1+','4f1-','4f0','4f3+','4f2-','4f3-','4f2+',
return nCoreX, nAoX, AoLabels
def MakeAtomIbOffsets(Atoms):
"""calcualte offset of first orbital of individual atoms
in the valence minimal basis (IB)"""
nCoreX, nAoX, AoLabels = MakeAtomInfos()
iBfAt = [0]
for Atom in Atoms:
Atom = ''.join(char for char in Atom if char.isalpha())
iBfAt.append(iBfAt[-1] + nAoX[Atom])
return iBfAt, nCoreX, nAoX, AoLabels
del(MINAO)
| apache-2.0 | 5,620,724,558,578,377,000 | 39.583333 | 155 | 0.543967 | false |
raxod502/straight.el | watcher/straight_watch_callback.py | 1 | 2215 | #!/usr/bin/env -S python3 -u
import os
import pathlib
import sys
WATCHEXEC_VAR_COMMON = "WATCHEXEC_COMMON_PATH"
WATCHEXEC_VARS = [
"WATCHEXEC_CREATED_PATH",
"WATCHEXEC_REMOVED_PATH",
"WATCHEXEC_RENAMED_PATH",
"WATCHEXEC_WRITTEN_PATH",
"WATCHEXEC_META_CHANGED_PATH",
]
def die(message):
print(message, file=sys.stderr)
sys.exit(1)
def usage():
return "usage: python -m straight_watch_callback <repos-dir> <modified-dir>"
def path_contains(parent, child):
parent = pathlib.Path(parent).resolve()
child = pathlib.Path(child).resolve()
return parent in child.parents
def path_strip(parent, child):
parent = pathlib.Path(parent).parts
child = pathlib.Path(child).parts
return child[len(parent)]
def main(args):
if len(args) != 2:
die(usage())
repos_dir, modified_dir = args
repos_dir = pathlib.Path(repos_dir).resolve()
modified_dir = pathlib.Path(modified_dir).resolve()
paths = []
for var in WATCHEXEC_VARS:
if var in os.environ:
for path in os.environ[var].split(os.pathsep):
paths.append(path)
if not paths:
die("straight_watch_callback.py: watchexec gave no modified files")
if WATCHEXEC_VAR_COMMON in os.environ:
common = os.environ[WATCHEXEC_VAR_COMMON]
# Yes, string concatentation. For some reason when a common
# prefix is used, the individual paths start with a slash even
# though they're actually relative to the prefix.
paths = [common + path for path in paths]
paths = [pathlib.Path(path).resolve() for path in paths]
paths = sorted(set(paths))
repos = set()
for path in paths:
print("detect modification: {}".format(path), file=sys.stderr)
if repos_dir in path.parents:
repo = path.relative_to(repos_dir).parts[0]
repos.add(repo)
if repos:
modified_dir.mkdir(parents=True, exist_ok=True)
repos = sorted(repos)
for repo in repos:
print("--> mark for rebuild: {}".format(repo), file=sys.stderr)
with open(modified_dir / repo, "w"):
pass
if __name__ == "__main__":
main(sys.argv[1:])
| mit | -3,666,085,764,388,796,400 | 28.144737 | 80 | 0.628894 | false |
Onager/plaso | plaso/cli/status_view.py | 1 | 18865 | # -*- coding: utf-8 -*-
"""The status view."""
import ctypes
import sys
import time
try:
import win32api
import win32console
except ImportError:
win32console = None
from dfvfs.lib import definitions as dfvfs_definitions
import plaso
from plaso.cli import tools
from plaso.cli import views
from plaso.lib import definitions
class StatusView(object):
"""Processing status view."""
MODE_LINEAR = 'linear'
MODE_WINDOW = 'window'
_SOURCE_TYPES = {
definitions.SOURCE_TYPE_ARCHIVE: 'archive',
dfvfs_definitions.SOURCE_TYPE_DIRECTORY: 'directory',
dfvfs_definitions.SOURCE_TYPE_FILE: 'single file',
dfvfs_definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE: (
'storage media device'),
dfvfs_definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE: (
'storage media image')}
_UNITS_1024 = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'EiB', 'ZiB', 'YiB']
_WINAPI_STD_OUTPUT_HANDLE = -11
_WINAPI_ENABLE_PROCESSED_INPUT = 1
_WINAPI_ENABLE_LINE_INPUT = 2
_WINAPI_ENABLE_ECHO_INPUT = 4
_WINAPI_ANSI_CONSOLE_MODE = (
_WINAPI_ENABLE_PROCESSED_INPUT | _WINAPI_ENABLE_LINE_INPUT |
_WINAPI_ENABLE_ECHO_INPUT)
def __init__(self, output_writer, tool_name):
"""Initializes a status view.
Args:
output_writer (OutputWriter): output writer.
tool_name (str): namd of the tool.
"""
super(StatusView, self).__init__()
self._artifact_filters = None
self._filter_file = None
self._have_ansi_support = not win32console
self._mode = self.MODE_WINDOW
self._output_writer = output_writer
self._source_path = None
self._source_type = None
self._stdout_output_writer = isinstance(
output_writer, tools.StdoutOutputWriter)
self._storage_file_path = None
self._tool_name = tool_name
if win32console:
kernel32 = ctypes.windll.kernel32
stdout_handle = kernel32.GetStdHandle(self._WINAPI_STD_OUTPUT_HANDLE)
result = kernel32.SetConsoleMode(
stdout_handle, self._WINAPI_ANSI_CONSOLE_MODE)
self._have_ansi_support = result != 0
def _AddsAnalysisProcessStatusTableRow(self, process_status, table_view):
"""Adds an analysis process status table row.
Args:
process_status (ProcessStatus): processing status.
table_view (CLITabularTableView): table view.
"""
used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)
events = ''
if (process_status.number_of_consumed_events is not None and
process_status.number_of_consumed_events_delta is not None):
events = '{0:d} ({1:d})'.format(
process_status.number_of_consumed_events,
process_status.number_of_consumed_events_delta)
event_tags = ''
if (process_status.number_of_produced_event_tags is not None and
process_status.number_of_produced_event_tags_delta is not None):
event_tags = '{0:d} ({1:d})'.format(
process_status.number_of_produced_event_tags,
process_status.number_of_produced_event_tags_delta)
reports = ''
if (process_status.number_of_produced_reports is not None and
process_status.number_of_produced_reports_delta is not None):
reports = '{0:d} ({1:d})'.format(
process_status.number_of_produced_reports,
process_status.number_of_produced_reports_delta)
table_view.AddRow([
process_status.identifier, process_status.pid, process_status.status,
used_memory, events, event_tags, reports])
def _AddExtractionProcessStatusTableRow(self, process_status, table_view):
"""Adds an extraction process status table row.
Args:
process_status (ProcessStatus): processing status.
table_view (CLITabularTableView): table view.
"""
used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)
sources = ''
if (process_status.number_of_produced_sources is not None and
process_status.number_of_produced_sources_delta is not None):
sources = '{0:d} ({1:d})'.format(
process_status.number_of_produced_sources,
process_status.number_of_produced_sources_delta)
events = ''
if (process_status.number_of_produced_events is not None and
process_status.number_of_produced_events_delta is not None):
events = '{0:d} ({1:d})'.format(
process_status.number_of_produced_events,
process_status.number_of_produced_events_delta)
# TODO: shorten display name to fit in 80 chars and show the filename.
table_view.AddRow([
process_status.identifier, process_status.pid, process_status.status,
used_memory, sources, events, process_status.display_name])
def _ClearScreen(self):
"""Clears the terminal/console screen."""
if self._have_ansi_support:
# ANSI escape sequence to clear screen.
self._output_writer.Write('\033[2J')
# ANSI escape sequence to move cursor to top left.
self._output_writer.Write('\033[H')
elif win32console:
# This version of Windows cmd.exe does not support ANSI escape codes, thus
# instead we fill the console screen buffer with spaces. The downside of
# this approach is an annoying flicker.
top_left_coordinate = win32console.PyCOORDType(0, 0)
screen_buffer = win32console.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
screen_buffer_information = screen_buffer.GetConsoleScreenBufferInfo()
screen_buffer_attributes = screen_buffer_information['Attributes']
screen_buffer_size = screen_buffer_information['Size']
console_size = screen_buffer_size.X * screen_buffer_size.Y
screen_buffer.FillConsoleOutputCharacter(
' ', console_size, top_left_coordinate)
screen_buffer.FillConsoleOutputAttribute(
screen_buffer_attributes, console_size, top_left_coordinate)
screen_buffer.SetConsoleCursorPosition(top_left_coordinate)
# TODO: remove update flicker. For win32console we could set the cursor
# top left, write the table, clean the remainder of the screen buffer
# and set the cursor at the end of the table.
def _FormatSizeInUnitsOf1024(self, size):
"""Represents a number of bytes in units of 1024.
Args:
size (int): size in bytes.
Returns:
str: human readable string of the size.
"""
magnitude_1024 = 0
used_memory_1024 = float(size)
while used_memory_1024 >= 1024:
used_memory_1024 /= 1024
magnitude_1024 += 1
if 0 < magnitude_1024 <= 7:
return '{0:.1f} {1:s}'.format(
used_memory_1024, self._UNITS_1024[magnitude_1024])
return '{0:d} B'.format(size)
def _FormatProcessingTime(self, processing_status):
"""Formats the processing time.
Args:
processing_status (ProcessingStatus): processing status.
Returns:
str: processing time formatted as: "5 days, 12:34:56".
"""
processing_time = 0
if processing_status:
processing_time = time.time() - processing_status.start_time
processing_time, seconds = divmod(int(processing_time), 60)
processing_time, minutes = divmod(processing_time, 60)
days, hours = divmod(processing_time, 24)
if days == 0:
days_string = ''
elif days == 1:
days_string = '1 day, '
else:
days_string = '{0:d} days, '.format(days)
return '{0:s}{1:02d}:{2:02d}:{3:02d}'.format(
days_string, hours, minutes, seconds)
def _PrintAnalysisStatusHeader(self, processing_status):
"""Prints the analysis status header.
Args:
processing_status (ProcessingStatus): processing status.
"""
self._output_writer.Write(
'Storage file\t\t: {0:s}\n'.format(self._storage_file_path))
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time))
if processing_status and processing_status.events_status:
self._PrintEventsStatus(processing_status.events_status)
self._output_writer.Write('\n')
def _PrintAnalysisStatusUpdateLinear(self, processing_status):
"""Prints an analysis status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time: {0:s}\n'.format(processing_time))
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events consumed: {3:d}\n').format(
processing_status.foreman_status.identifier,
processing_status.foreman_status.pid,
processing_status.foreman_status.status,
processing_status.foreman_status.number_of_consumed_events)
self._output_writer.Write(status_line)
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events consumed: {3:d}\n').format(
worker_status.identifier, worker_status.pid, worker_status.status,
worker_status.number_of_consumed_events)
self._output_writer.Write(status_line)
self._output_writer.Write('\n')
def _PrintAnalysisStatusUpdateWindow(self, processing_status):
"""Prints an analysis status update in window mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
if self._stdout_output_writer:
self._ClearScreen()
output_text = 'plaso - {0:s} version {1:s}\n\n'.format(
self._tool_name, plaso.__version__)
self._output_writer.Write(output_text)
self._PrintAnalysisStatusHeader(processing_status)
table_view = views.CLITabularTableView(column_names=[
'Identifier', 'PID', 'Status', 'Memory', 'Events', 'Tags',
'Reports'], column_sizes=[23, 7, 15, 15, 15, 15, 0])
self._AddsAnalysisProcessStatusTableRow(
processing_status.foreman_status, table_view)
for worker_status in processing_status.workers_status:
self._AddsAnalysisProcessStatusTableRow(worker_status, table_view)
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
if processing_status.aborted:
self._output_writer.Write(
'Processing aborted - waiting for clean up.\n\n')
if self._stdout_output_writer:
# We need to explicitly flush stdout to prevent partial status updates.
sys.stdout.flush()
def _PrintExtractionStatusUpdateLinear(self, processing_status):
"""Prints an extraction status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time: {0:s}\n'.format(processing_time))
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events produced: {3:d}, file: '
'{4:s}\n').format(
processing_status.foreman_status.identifier,
processing_status.foreman_status.pid,
processing_status.foreman_status.status,
processing_status.foreman_status.number_of_produced_events,
processing_status.foreman_status.display_name)
self._output_writer.Write(status_line)
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events produced: {3:d}, file: '
'{4:s}\n').format(
worker_status.identifier, worker_status.pid, worker_status.status,
worker_status.number_of_produced_events,
worker_status.display_name)
self._output_writer.Write(status_line)
self._output_writer.Write('\n')
def _PrintExtractionStatusUpdateWindow(self, processing_status):
"""Prints an extraction status update in window mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
if self._stdout_output_writer:
self._ClearScreen()
output_text = 'plaso - {0:s} version {1:s}\n\n'.format(
self._tool_name, plaso.__version__)
self._output_writer.Write(output_text)
self.PrintExtractionStatusHeader(processing_status)
table_view = views.CLITabularTableView(column_names=[
'Identifier', 'PID', 'Status', 'Memory', 'Sources', 'Events',
'File'], column_sizes=[15, 7, 15, 15, 15, 15, 0])
self._AddExtractionProcessStatusTableRow(
processing_status.foreman_status, table_view)
for worker_status in processing_status.workers_status:
self._AddExtractionProcessStatusTableRow(worker_status, table_view)
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
if processing_status.aborted:
self._output_writer.Write(
'Processing aborted - waiting for clean up.\n\n')
# TODO: remove update flicker. For win32console we could set the cursor
# top left, write the table, clean the remainder of the screen buffer
# and set the cursor at the end of the table.
if self._stdout_output_writer:
# We need to explicitly flush stdout to prevent partial status updates.
sys.stdout.flush()
def _PrintEventsStatus(self, events_status):
"""Prints the status of the events.
Args:
events_status (EventsStatus): events status.
"""
if events_status:
table_view = views.CLITabularTableView(
column_names=['Events:', 'Filtered', 'In time slice', 'Duplicates',
'MACB grouped', 'Total'],
column_sizes=[15, 15, 15, 15, 15, 0])
table_view.AddRow([
'', events_status.number_of_filtered_events,
events_status.number_of_events_from_time_slice,
events_status.number_of_duplicate_events,
events_status.number_of_macb_grouped_events,
events_status.total_number_of_events])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
def _PrintTasksStatus(self, processing_status):
"""Prints the status of the tasks.
Args:
processing_status (ProcessingStatus): processing status.
"""
if processing_status and processing_status.tasks_status:
tasks_status = processing_status.tasks_status
table_view = views.CLITabularTableView(
column_names=['Tasks:', 'Queued', 'Processing', 'Merging',
'Abandoned', 'Total'],
column_sizes=[15, 7, 15, 15, 15, 0])
table_view.AddRow([
'', tasks_status.number_of_queued_tasks,
tasks_status.number_of_tasks_processing,
tasks_status.number_of_tasks_pending_merge,
tasks_status.number_of_abandoned_tasks,
tasks_status.total_number_of_tasks])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
def GetAnalysisStatusUpdateCallback(self):
"""Retrieves the analysis status update callback function.
Returns:
function: status update callback function or None if not available.
"""
if self._mode == self.MODE_LINEAR:
return self._PrintAnalysisStatusUpdateLinear
if self._mode == self.MODE_WINDOW:
return self._PrintAnalysisStatusUpdateWindow
return None
def GetExtractionStatusUpdateCallback(self):
"""Retrieves the extraction status update callback function.
Returns:
function: status update callback function or None if not available.
"""
if self._mode == self.MODE_LINEAR:
return self._PrintExtractionStatusUpdateLinear
if self._mode == self.MODE_WINDOW:
return self._PrintExtractionStatusUpdateWindow
return None
# TODO: refactor to protected method.
def PrintExtractionStatusHeader(self, processing_status):
"""Prints the extraction status header.
Args:
processing_status (ProcessingStatus): processing status.
"""
self._output_writer.Write(
'Source path\t\t: {0:s}\n'.format(self._source_path))
self._output_writer.Write(
'Source type\t\t: {0:s}\n'.format(self._source_type))
if self._artifact_filters:
artifacts_string = ', '.join(self._artifact_filters)
self._output_writer.Write('Artifact filters\t: {0:s}\n'.format(
artifacts_string))
if self._filter_file:
self._output_writer.Write('Filter file\t\t: {0:s}\n'.format(
self._filter_file))
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time))
self._PrintTasksStatus(processing_status)
self._output_writer.Write('\n')
def PrintExtractionSummary(self, processing_status):
"""Prints a summary of the extraction.
Args:
processing_status (ProcessingStatus): processing status.
"""
if not processing_status:
self._output_writer.Write(
'WARNING: missing processing status information.\n')
elif not processing_status.aborted:
if processing_status.error_path_specs:
self._output_writer.Write('Processing completed with errors.\n')
else:
self._output_writer.Write('Processing completed.\n')
number_of_warnings = (
processing_status.foreman_status.number_of_produced_warnings)
if number_of_warnings:
output_text = '\n'.join([
'',
('Number of warnings generated while extracting events: '
'{0:d}.').format(number_of_warnings),
'',
'Use pinfo to inspect warnings in more detail.',
''])
self._output_writer.Write(output_text)
if processing_status.error_path_specs:
output_text = '\n'.join([
'',
'Path specifications that could not be processed:',
''])
self._output_writer.Write(output_text)
for path_spec in processing_status.error_path_specs:
self._output_writer.Write(path_spec.comparable)
self._output_writer.Write('\n')
self._output_writer.Write('\n')
def SetMode(self, mode):
"""Sets the mode.
Args:
mode (str): status view mode.
"""
self._mode = mode
def SetSourceInformation(
self, source_path, source_type, artifact_filters=None, filter_file=None):
"""Sets the source information.
Args:
source_path (str): path of the source.
source_type (str): source type.
artifact_filters (Optional[list[str]]): names of artifact definitions to
use as filters.
filter_file (Optional[str]): filter file.
"""
self._artifact_filters = artifact_filters
self._filter_file = filter_file
self._source_path = source_path
self._source_type = self._SOURCE_TYPES.get(source_type, 'UNKNOWN')
def SetStorageFileInformation(self, storage_file_path):
"""Sets the storage file information.
Args:
storage_file_path (str): path to the storage file.
"""
self._storage_file_path = storage_file_path
| apache-2.0 | -6,684,999,783,999,508,000 | 33.935185 | 80 | 0.660906 | false |
kleinfeld/medpy | setup.py | 1 | 4152 | #!/usr/bin/env python
# version: 0.1.2
import os
# setuptools >= 0.7 supports 'python setup.py develop'
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# The maxflow graphcut wrapper using boost.python
maxflow = Extension('medpy.graphcut.maxflow',
define_macros = [('MAJOR_VERSION', '0'),
('MINOR_VERSION', '1')],
sources = ['lib/maxflow/src/maxflow.cpp', 'lib/maxflow/src/wrapper.cpp', 'lib/maxflow/src/graph.cpp'],
libraries = ['boost_python'],
extra_compile_args = ['-O0'])
setup(name='MedPy',
version='0.1.0', # major.minor.micro
description='Medical image processing in Python',
author='Oskar Maier',
author_email='[email protected]',
url='https://github.com/loli/medpy',
license='LICENSE.txt',
keywords='medical image processing dicom itk insight tool kit MRI CT US graph cut max-flow min-cut',
long_description=read('README.txt'),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Other Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
#'Operating System :: MacOS :: MacOS X',
#'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: C++',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Topic :: Scientific/Engineering :: Image Recognition'
],
install_requires=[
"scipy >= 0.9.0",
"numpy >= 1.6.1",
],
extras_require = {
'Nifti/Analyze': ["nibabel >= 1.3.0", "RXP"],
'Dicom': ["pydicom >= 0.9.7"],
'Additional image formats' : ["itk >= 3.16.0"]
},
packages = [
'medpy',
'medpy.core',
'medpy.features',
'medpy.filter',
'medpy.graphcut',
'medpy.io',
'medpy.itkvtk',
'medpy.itkvtk.filter',
'medpy.itkvtk.utilities',
'medpy.metric',
'medpy.occlusion',
'medpy.utilities'
],
scripts=[
'bin/medpy_anisotropic_diffusion.py',
'bin/medpy_apparent_diffusion_coefficient.py',
'bin/medpy_check_marker_intersection.py',
'bin/medpy_convert.py',
'bin/medpy_count_labels.py',
'bin/medpy_create_empty_volume_by_example.py',
'bin/medpy_dicom_slices_to_volume.py',
'bin/medpy_dicom_to_4D.py',
'bin/medpy_diff.py',
'bin/medpy_evaluate_miccai2007.py',
'bin/medpy_extract_min_max.py',
'bin/medpy_extract_sub_volume_auto.py',
'bin/medpy_extract_sub_volume_by_example.py',
'bin/medpy_extract_sub_volume.py',
'bin/medpy_gradient.py',
'bin/medpy_graphcut_label.py',
'bin/medpy_graphcut_label_bgreduced.py',
'bin/medpy_graphcut_label_w_regional.py',
'bin/medpy_graphcut_label_wsplit.py',
'bin/medpy_graphcut_voxel.py',
'bin/medpy_grid.py',
'bin/medpy_info.py',
'bin/medpy_intensity_range_standardization.py',
'bin/medpy_itk_gradient.py',
'bin/medpy_itk_smoothing.py',
'bin/medpy_itk_watershed.py',
'bin/medpy_join_xd_to_xplus1d.py',
'bin/medpy_merge.py',
'bin/medpy_morphology.py',
'bin/medpy_occlusion.py',
'bin/medpy_reduce.py',
'bin/medpy_resample.py',
'bin/medpy_reslice_3d_to_4d.py',
'bin/medpy_set_pixel_spacing.py',
'bin/medpy_shrink_image.py',
'bin/medpy_split_xd_to_xminus1d.py',
'bin/medpy_stack_sub_volumes.py',
'bin/medpy_superimposition.py',
'bin/medpy_swap_dimensions.py',
'bin/medpy_zoom_image.py'
],
ext_modules = [maxflow],
)
| gpl-3.0 | -4,711,353,523,098,394,000 | 33.31405 | 114 | 0.589355 | false |
CARPEM/GalaxyDocker | data-manager-hegp/analysisManager/analysismanager/STARTUP_Add_Workflows_Information.py | 1 | 8085 | import os
import sys
import json
from datamanagerpkg import ProtonCommunication_data_manager
from datamanagerpkg import GalaxyCommunication_data_manager
from sequencer.models import Experiments, GalaxyUsers
from sequencer.models import GalaxyJobs, ExperimentRawData
from sequencer.models import UserCommonJobs,Supportedfiles
from sequencer.models import Workflows,WorkflowsTools
##########################
#URL SEQUENCER
##########################
from GlobalVariables import sequencer_base_url
from GlobalVariables import sequencer_user
from GlobalVariables import sequencer_password
from GlobalVariables import sequencer_severName
from GlobalVariables import sequencer_ExperimentLimit
from GlobalVariables import toolsInformation
##########################
#URL GALAXY
##########################
from GlobalVariables import galaxy_base_url
from GlobalVariables import apiKey
##########################
#NAs DIr folder
##########################
from GlobalVariables import nasInput
from GlobalVariables import CNVfolderName
from GlobalVariables import plasmaFolderName
from GlobalVariables import nasResults
from GlobalVariables import workflowPath
##########################
#SMTP folder
##########################
from GlobalVariables import smtpServerAphp
from GlobalVariables import smtpPortServer
from GlobalVariables import fromAddrOfficial
from sequencer.views import getDataPath
from pprint import pprint
def uploadAWorkflowToDatabase(pathToWorkflow):
with open(pathToWorkflow) as data_file:
data = json.load(data_file)
pprint(data)
#now I have the key in order
stepkey=data['steps'].keys()
stepkey = [int(x) for x in stepkey]
stepkey.sort()
#create a workflow object
#~ u'annotation': u'plasma workflow to generates all the data',u'name': u'Plasma_mutation',
tryexp = None
try:
tryexp = Workflows.objects.get(name=str(data['name']))
except Workflows.DoesNotExist:
tryexp = None
if (tryexp == None):
workflow_local=Workflows(name=str(data['name']),description=str(data['name']))
workflow_local.save()
workflow_local = Workflows.objects.get(name=str(data['name']))
for step in stepkey:
if data['steps'][str(step)]['tool_id']!=None:
#create a tool
print("find a Tool to add, try to add this new tool to the database")
print(str(data['steps'][str(step)]['tool_id']))
try:
tryexp = WorkflowsTools.objects.get(primary_name=str(data['steps'][str(step)]['tool_id']+"_"+data['steps'][str(step)]['tool_version']+".json"))
except WorkflowsTools.DoesNotExist:
tryexp = None
#~ if tryexp == None:
print("tool found was not added to the DB. We Add now this new tool")
newtool=WorkflowsTools(primary_name=str(data['steps'][str(step)]['tool_id']+"_"+data['steps'][str(step)]['tool_version']+".json"),
name=str(data['steps'][str(step)]['tool_id']),
version=str(data['steps'][str(step)]['tool_version']))
newtool.save()
print("Add the tool definition to the Workflow and link it to the current workflow.")
workflow_local.tools_list.add(newtool)
workflow_local.save()
print("Name of the json file where the tool is define:" +data['steps'][str(step)]['tool_id']+"_"+data['steps'][str(step)]['tool_version']+".json")
#create a tool
with open(toolsInformation+data['steps'][str(step)]['tool_id']+"_"+data['steps'][str(step)]['tool_version']+".json") as data_file_tool:
tool = json.load(data_file_tool)
#~ print(tool['function'][0])
print("#######################input")
#~ print(tool['function'][0]['input'])
for dataInput in tool['function'][0]['input'] :
try:
tryexp = Supportedfiles.objects.get(dataDescription=str(dataInput['dataDescription']))
except Supportedfiles.DoesNotExist:
tryexp = None
newfile=Supportedfiles(dataHandle=str(dataInput['dataHandle']),dataDescription=str(dataInput['dataDescription']),dataFormatEdamOntology=str(dataInput['format'][0]['uri']))
newfile.save()
newtool.inputlist.add(newfile)
newtool.save()
#~ print("#######################dataInpty")
print("#######################output")
for dataInput in tool['function'][0]['input'] :
try:
tryexp = Supportedfiles.objects.get(dataDescription=str(dataInput['dataDescription']))
except Supportedfiles.DoesNotExist:
tryexp = None
#~ if tryexp == None:
newfile=Supportedfiles(dataHandle=str(dataInput['dataHandle']),dataDescription=str(dataInput['dataDescription']),dataFormatEdamOntology=str(dataInput['format'][0]['uri']) )
newfile.save()
newtool.outputlist.add(newfile)
newtool.save()
def AddaWorkflowTool(this_tool):
try:
tryexp = WorkflowsTools.objects.get(primary_name=str(this_tool[0]['id']+"_"+this_tool[0]['version']+".json"))
except WorkflowsTools.DoesNotExist:
tryexp = None
print("tool found was not added to the DB. We Add now this new tool")
newtool=WorkflowsTools(primary_name=str(this_tool[0]['id']+"_"+this_tool[0]['version']+".json"),
name=str(this_tool[0]['id']),
version=str(this_tool[0]['version']))
newtool.save()
print("Add the tool definition to the Workflow and link it to the current workflow.")
print("Name of the json file where the tool is define:" +str(this_tool[0]['id']+"_"+this_tool[0]['version']+".json"))
#create a tool
with open(toolsInformation+str(this_tool[0]['id']+"_"+this_tool[0]['version']+".json")) as data_file_tool:
tool = json.load(data_file_tool)
print("#######################input")
for dataInput in tool['function'][0]['input'] :
try:
tryexp = Supportedfiles.objects.get(dataDescription=str(dataInput['dataDescription']))
except Supportedfiles.DoesNotExist:
tryexp = None
newfile=Supportedfiles(dataHandle=str(dataInput['dataHandle']),dataDescription=str(dataInput['dataDescription']),dataFormatEdamOntology=str(dataInput['format'][0]['uri']))
newfile.save()
newtool.inputlist.add(newfile)
newtool.save()
#~ print("#######################dataInpty")
print("#######################output")
for dataInput in tool['function'][0]['input'] :
try:
tryexp = Supportedfiles.objects.get(dataDescription=str(dataInput['dataDescription']))
except Supportedfiles.DoesNotExist:
tryexp = None
newfile=Supportedfiles(dataHandle=str(dataInput['dataHandle']),dataDescription=str(dataInput['dataDescription']),dataFormatEdamOntology=str(dataInput['format'][0]['uri']) )
newfile.save()
newtool.outputlist.add(newfile)
newtool.save()
if __name__ == "__main__":
print("#######################")
print("#######################")
pathTosamtools='/nas_Dir/workflow/Galaxy-Workflow-demo_samtools.ga'
print("Upload a specific workflow to the database : demo_samtools")
uploadAWorkflowToDatabase(pathTosamtools)
print("#######################")
print("#######################")
pathToWorkflow='/nas_Dir/workflow/Galaxy-Workflow-Plasma_mutation.ga'
print("Upload a specific workflow to the database : Plasma_mutation")
uploadAWorkflowToDatabase(pathToWorkflow)
print("JOB DONE")
| mit | 5,939,821,248,430,112,000 | 48.601227 | 196 | 0.590847 | false |
MMaus/mutils | models/slip_doPri-old.py | 1 | 22110 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 23 14:46:14 2011
@author: moritz
"""
# This file implements the SLIP model
from scipy.integrate.vode import dvode, zvode
from scipy.integrate import odeint, ode
from pylab import (zeros, sin, cos, sqrt, array, linspace,
arange, ones_like, hstack, vstack, argmin,
find, interp,
sign)
from copy import deepcopy
def vHop(t,y,params):
"""
test function
'y': x horizontal position
y vertical position
vx horizontal velocity
vy vertical velocity
'params': damping horizontal
ground elasticity / mass
"""
res = zeros(4)
dx = params[0]
ky = params[1]
res[0] = y[2]
res[1] = y[3]
if y[1] < 0:
res[2] = -dx*y[2]
res[3] = -ky*y[1] - 9.81
else:
res[3] = -9.81
return res
def dk_dL(L0,k,L,dE):
"""
computes the required stiffness change and rest length change
to inject energy without changing the spring force
"""
dL = 2.*dE/(k*(L0 - L))
dk = k*((L-L0)/(L-(L0+dL)) - 1.)
return dk,dL
class SimFailError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def SLIP_step(IC,SLIP_params,sim_params = []):
"""
simulates the SLIP
IC: initial state vector, containing y0, vx0
(x0 is assumed to be 0; vy0 = 0 (apex);
also ground level = 0)
SLIP_params:
k
L0
m
alpha
dE: energy change in "midstance" by changing k and L0
g: gravity (negative! should be ~ -9.81 for SI)
"""
alpha = SLIP_params['alpha']
k = SLIP_params['k']
L0 = SLIP_params['L0']
dE = SLIP_params['dE']
g = SLIP_params['g']
m = SLIP_params['m']
y0 = IC[0]
vx0 = IC[1]
if g >= 0:
raise ValueError, "gravity points into wrong direction!"
# concatenate state vector of four elements:
# (1) time to touchdown
# (2) time to vy = 0
# (3) time to takeoff
# (4) time to apex
# (1) and (4) are analytically described
y_land = L0*sin(alpha)
if y0 < y_land:
raise ValueError, "invalid starting condition"
# before starting, define the model:
def SLIP_ode(y,t,params):
"""
defines the ODE of the SLIP, under stance condition
state:
[x
y
vx
vy]
params:
{'L0' : leg rest length
'x0' : leg touchdown position
'k' : spring stiffness
'm' : mass}
"""
dy0 = y[2]
dy1 = y[3]
L = sqrt((y[0]-params['xF'])**2 + y[1]**2)
F = params['k']*(params['L0']-L)
Fx = F*(y[0]-params['xF'])/L
Fy = F*y[1]/L
dy2 = Fx/m
dy3 = Fy/m + params['g']
return hstack([dy0,dy1,dy2,dy3])
def sim_until(IC, params, stop_fcn, tmax = 2.):
"""
simulated the SLIP_ode until stop_fcn has a zero-crossing
includes a refinement of the time at this instant
stop_fcn must be a function of the system state, e.g.
stop_fcn(IC) must exist
this function is especially adapted to the SLIP state,
so it uses dot(x1) = x3, dot(x2) = x4
tmax: maximal simulation time [s]
"""
init_sign = sign(stop_fcn(IC))
#1st: evaluate a certain fraction
tvec_0 = .0001*arange(50)
sim_results = []
sim_tvecs = []
newIC = IC
sim_results.append (odeint(SLIP_ode,newIC,tvec_0,
args=(params,),rtol=1e-12))
sim_tvecs.append(tvec_0)
check_vec = [init_sign*stop_fcn(x) for x in sim_results[-1]]
t_tot = 0.
while min(check_vec) > 0:
newIC = sim_results[-1][-1,:]
sim_results.append ( odeint(SLIP_ode, newIC, tvec_0,
args=(params,),rtol=1e-12))
sim_tvecs.append(tvec_0)
check_vec = [init_sign*stop_fcn(x) for x in sim_results[-1]]
t_tot += tvec_0[-1]
# time exceeded or ground hit
if t_tot > tmax or min(sim_results[-1][:,1] < 0):
raise SimFailError, "simulation failed"
# now: zero-crossing detected
# -> refine!
minidx = find(array(check_vec) < 0)[0]
if minidx == 0:
# this should not happen because the first value in
# check_vec should be BEFORE the zero_crossing by
# construction
raise ValueError, "ERROR: this should not happen!"
# refine simulation by factor 50, but only for two
# adjacent original time frames
newIC = sim_results[-1][minidx-1,:]
sim_results[-1] = sim_results[-1][:minidx,:]
sim_tvecs[-1] = sim_tvecs[-1][:minidx]
# avoid that last position can be the zero-crossing
n_refine = 10000
tvec_0 = linspace(tvec_0[0], tvec_0[1] + 2./n_refine, n_refine+2)
sim_results.append ( odeint(SLIP_ode, newIC, tvec_0,
args=(params,),rtol=1e-12))
sim_tvecs.append(tvec_0)
# linearly interpolate to zero
check_vec = [init_sign*stop_fcn(x) for x in sim_results[-1]]
minidx = find(array(check_vec) < 0)[0]
if minidx == 0:
# this should not happen because the first value in
# check_vec should be BEFORE the zero_crossing by
# construction
raise ValueError, "ERROR: this should not happen! (2)"
# compute location of zero-crossing
y0 = sim_results[-1][minidx-1,:]
y1 = sim_results[-1][minidx,:]
fcn0 = stop_fcn(y0)
fcn1 = stop_fcn(y1)
t0 = tvec_0[minidx-1]
t1 = tvec_0[minidx]
t_zero = t0 - (t1-t0)*fcn0/(fcn1 - fcn0)
# cut last simulation result and replace last values
# by interpolated values
sim_results[-1] = sim_results[-1][:minidx+1,:]
sim_tvecs[-1] = sim_tvecs[-1][:minidx+1]
for coord in arange(sim_results[-1].shape[1]):
sim_results[-1][-1,coord] = interp(
t_zero, [t0,t1],
[sim_results[-1][-2,coord], sim_results[-1][-1,coord]] )
sim_tvecs[-1][-1] = t_zero
#newIC = sim_results[-1][minidx-1,:]
#sim_results[-1] = sim_results[-1][:minidx,:]
#sim_tvecs[-1] = sim_tvecs[-1][:minidx]
#tvec_0 = linspace(tvec_0[0],tvec_0[1],100)
#sim_results.append ( odeint(SLIP_ode, newIC, tvec_0,
# args=(params,),rtol=1e-9))
#sim_tvecs.append(tvec_0)
# concatenate lists
sim_data = vstack( [x[:-1,:] for x in sim_results[:-1] if x.shape[0] > 1]
+ [sim_results[-1],])
sim_time = [sim_tvecs[0],]
for idx in arange(1,len(sim_tvecs)):
sim_time.append(sim_tvecs[idx] + sim_time[-1][-1])
sim_time = hstack([x[:-1] for x in sim_time[:-1]] + [sim_time[-1],])
return sim_data, sim_time
# Section 1: time to touchdown
# TODO: make sampling frequency regular
t_flight1 = sqrt(-2.*(y0 - y_land)/g)
#t_flight = sqrt()
tvec_flight1 = .01*arange(t_flight1*100.)
vy_flight1 = tvec_flight1*g
y_flight1 = y0 + .5*g*(tvec_flight1**2)
x_flight1 = vx0*tvec_flight1
vx_flight1 = vx0*ones_like(tvec_flight1)
# Section 2: time to vy = 0
# approach: calculate forward -> estimate interval of
# zero position of vy -> refine simulation in that interval
# until a point with vy sufficiently close to zero is in the
# resulting vector
params = {'L0' : L0,
'xF' : t_flight1*vx0 + L0*cos(alpha),
'k' : k,
'm' : m,
'g' : g}
IC = array([t_flight1*vx0, y_land, vx0, t_flight1*g])
# initial guess: L0*cos(alpha)/vx0
#t_sim1 = L0*cos(alpha)/vx0
# TODO: implement sim_fail check!
sim_fail = False
try:
sim_phase2, t_phase2 = sim_until(IC,params,lambda x: x[3])
t_phase2 += t_flight1
except SimFailError:
print 'simulation aborted (phase 2)\n'
sim_fail = True
# Phase 3:
if not sim_fail:
L = sqrt(sim_phase2[-1,1]**2 + (sim_phase2[-1,0]-params['xF'])**2 )
dk, dL = dk_dL(L0,k,L,dE)
params2 = deepcopy(params)
params2['k'] += dk
params2['L0'] += dL
IC = sim_phase2[-1,:]
compression = (lambda x: sqrt(
(x[0]-params2['xF'])**2 + x[1]**2)
- params2['L0'] )
#print ('L:', L, 'dk', dk, 'dL', dL, 'dE', dE, '\ncompression:', compression(IC),
# 'IC', IC)
try:
sim_phase3, t_phase3 = sim_until(IC, params2,compression)
sim_phase3 = sim_phase3[1:,:]
t_phase3 = t_phase3[1:] + t_phase2[-1]
except SimFailError:
print 'simulation aborted (phase 3)\n'
sim_fail = True
# Phase 4:
if not sim_fail:
# time to apex
# TODO: make sampling frequency regular
vy_liftoff = sim_phase3[-1,3]
t_flight2 = -1.*vy_liftoff/g
#t_flight = sqrt()
tvec_flight2 = arange(t_flight2,0,-.001)[::-1]
vy_flight2 = tvec_flight2*g + vy_liftoff
y_flight2 = (sim_phase3[-1,1] + vy_liftoff*tvec_flight2
+ .5*g*(tvec_flight2**2) )
x_flight2 = sim_phase3[-1,0] + sim_phase3[-1,2]*tvec_flight2
vx_flight2 = sim_phase3[-1,2]*ones_like(tvec_flight2)
tvec_flight2 += t_phase3[-1]
# todo: return data until error
if sim_fail:
return { 't': None,
'x': None,
'y': None,
'vx': None,
'vy': None,
'sim_fail': sim_fail,
'dk': None,
'dL': None
}
# finally: concatenate phases
x_final = hstack([x_flight1, sim_phase2[:,0], sim_phase3[:,0], x_flight2 ])
y_final = hstack([y_flight1, sim_phase2[:,1], sim_phase3[:,1], y_flight2 ])
vx_final= hstack([vx_flight1, sim_phase2[:,2], sim_phase3[:,2], vx_flight2])
vy_final= hstack([vy_flight1, sim_phase2[:,3], sim_phase3[:,3], vy_flight2])
tvec_final = hstack([tvec_flight1, t_phase2, t_phase3, tvec_flight2 ])
return {'t': tvec_final,
'x': x_final,
'y': y_final,
'vx': vx_final,
'vy': vy_final,
'sim_fail': sim_fail,
'dk': dk,
'dL': dL,
#'sim_res':sim_res,
#'sim_phase2': sim_phase2_cut,
#'t_phase2': t_phase2_cut
}
def SLIP_step3D(IC,SLIP_params,sim_params = []):
"""
simulates the SLIP in 3D
IC: initial state vector, containing y0, vx0, vz0
(x0 is assumed to be 0;
z0 is assumed to be 0;
vy0 = 0 (apex);
also ground level = 0)
SLIP_params:
k
L0
m
alpha : "original" angle of attack
beta : lateral leg turn
foot position relative to CoM in flight:
xF = vx0*t_flight + L0*cos(alpha)*cos(beta)
yF = -L0*sin(alpha)
zF = vz0*t_flight - L0*cos(alpha)*sin(beta)
dE: energy change in "midstance" by changing k and L0
g: gravity (negative! should be ~ -9.81 for SI)
"""
alpha = SLIP_params['alpha']
beta = SLIP_params['beta']
k = SLIP_params['k']
L0 = SLIP_params['L0']
dE = SLIP_params['dE']
g = SLIP_params['g']
m = SLIP_params['m']
y0 = IC[0]
vx0 = IC[1]
vz0 = IC[2]
if g >= 0:
raise ValueError, "gravity points into wrong direction!"
# concatenate state vector of four elements:
# (1) time to touchdown
# (2) time to vy = 0
# (3) time to takeoff
# (4) time to apex
# (1) and (4) are analytically described
y_land = L0*sin(alpha)
if y0 < y_land:
raise ValueError, "invalid starting condition"
# before starting, define the model:
def SLIP_ode(y,t,params):
"""
defines the ODE of the SLIP, under stance condition
state:
[x
y
z
vx
vy
vz]
params:
{'L0' : leg rest length
'x0' : leg touchdown position
'k' : spring stiffness
'm' : mass
'xF' : anterior foot position
'zF' : lateral foot position }
"""
dy0 = y[3]
dy1 = y[4]
dy2 = y[5]
L = sqrt((y[0]-params['xF'])**2 + y[1]**2 + (y[2]-params['zF'])**2)
F = params['k']*(params['L0']-L)
Fx = F*(y[0]-params['xF'])/L
Fy = F*y[1]/L
Fz = F*(y[2]-params['zF'])/L
dy3 = Fx/m
dy4 = Fy/m + params['g']
dy5 = Fz/m
return hstack([dy0,dy1,dy2,dy3,dy4,dy5])
def sim_until(IC, params, stop_fcn, tmax = 2.):
"""
simulated the SLIP_ode until stop_fcn has a zero-crossing
includes a refinement of the time at this instant
stop_fcn must be a function of the system state, e.g.
stop_fcn(IC) must exist
this function is especially adapted to the SLIP state,
so it uses dot(x1) = x3, dot(x2) = x4
tmax: maximal simulation time [s]
"""
init_sign = sign(stop_fcn(IC))
#1st: evaluate a certain fraction
tvec_0 = .001*arange(50)
sim_results = []
sim_tvecs = []
newIC = IC
sim_results.append (odeint(SLIP_ode,newIC,tvec_0,
args=(params,),rtol=1e-9))
sim_tvecs.append(tvec_0)
check_vec = [init_sign*stop_fcn(x) for x in sim_results[-1]]
t_tot = 0.
while min(check_vec) > 0:
newIC = sim_results[-1][-1,:]
sim_results.append ( odeint(SLIP_ode, newIC, tvec_0,
args=(params,),rtol=1e-9))
sim_tvecs.append(tvec_0)
check_vec = [init_sign*stop_fcn(x) for x in sim_results[-1]]
t_tot += tvec_0[-1]
# time exceeded or ground hit
if t_tot > tmax or min(sim_results[-1][:,1] < 0):
raise SimFailError, "simulation failed"
# now: zero-crossing detected
# -> refine!
minidx = find(array(check_vec) < 0)[0]
if minidx == 0:
# this should not happen because the first value in
# check_vec should be BEFORE the zero_crossing by
# construction
raise ValueError, "ERROR: this should not happen!"
# refine simulation by factor 50, but only for two
# adjacent original time frames
newIC = sim_results[-1][minidx-1,:]
sim_results[-1] = sim_results[-1][:minidx,:]
sim_tvecs[-1] = sim_tvecs[-1][:minidx]
# avoid that last position can be the zero-crossing
n_refine = 100
tvec_0 = linspace(tvec_0[0], tvec_0[1] + 2./n_refine, n_refine+2)
sim_results.append ( odeint(SLIP_ode, newIC, tvec_0,
args=(params,),rtol=1e-9))
sim_tvecs.append(tvec_0)
# linearly interpolate to zero
check_vec = [init_sign*stop_fcn(x) for x in sim_results[-1]]
minidx = find(array(check_vec) < 0)[0]
if minidx == 0:
# this should not happen because the first value in
# check_vec should be BEFORE the zero_crossing by
# construction
raise ValueError, "ERROR: this should not happen! (2)"
# compute location of zero-crossing
y0 = sim_results[-1][minidx-1,:]
y1 = sim_results[-1][minidx,:]
fcn0 = stop_fcn(y0)
fcn1 = stop_fcn(y1)
t0 = tvec_0[minidx-1]
t1 = tvec_0[minidx]
t_zero = t0 - (t1-t0)*fcn0/(fcn1 - fcn0)
# cut last simulation result and replace last values
# by interpolated values
sim_results[-1] = sim_results[-1][:minidx+1,:]
sim_tvecs[-1] = sim_tvecs[-1][:minidx+1]
for coord in arange(sim_results[-1].shape[1]):
sim_results[-1][-1,coord] = interp(
t_zero, [t0,t1],
[sim_results[-1][-2,coord], sim_results[-1][-1,coord]] )
sim_tvecs[-1][-1] = t_zero
#newIC = sim_results[-1][minidx-1,:]
#sim_results[-1] = sim_results[-1][:minidx,:]
#sim_tvecs[-1] = sim_tvecs[-1][:minidx]
#tvec_0 = linspace(tvec_0[0],tvec_0[1],100)
#sim_results.append ( odeint(SLIP_ode, newIC, tvec_0,
# args=(params,),rtol=1e-9))
#sim_tvecs.append(tvec_0)
# concatenate lists
sim_data = vstack( [x[:-1,:] for x in sim_results[:-1] if x.shape[0] > 1]
+ [sim_results[-1],])
sim_time = [sim_tvecs[0],]
for idx in arange(1,len(sim_tvecs)):
sim_time.append(sim_tvecs[idx] + sim_time[-1][-1])
sim_time = hstack([x[:-1] for x in sim_time[:-1]] + [sim_time[-1],])
return sim_data, sim_time
# Section 1: time to touchdown
# TODO: make sampling frequency regular
t_flight1 = sqrt(-2.*(y0 - y_land)/g)
#t_flight = sqrt()
tvec_flight1 = .01*arange(t_flight1*100.)
vy_flight1 = tvec_flight1*g
y_flight1 = y0 + .5*g*(tvec_flight1**2)
x_flight1 = vx0*tvec_flight1
vx_flight1 = vx0*ones_like(tvec_flight1)
z_flight1 = vz0*tvec_flight1
vz_flight1 = vz0*ones_like(tvec_flight1)
x_TD = vx0*t_flight1
z_TD = vz0*t_flight1
# Section 2: time to vy = 0
# approach: calculate forward -> estimate interval of
# zero position of vy -> refine simulation in that interval
# until a point with vy sufficiently close to zero is in the
# resulting vector
params = {'L0' : L0,
'xF' : t_flight1*vx0 + L0*cos(alpha)*cos(beta),
'zF' : t_flight1*vz0 - L0*cos(alpha)*sin(beta),
'k' : k,
'm' : m,
'g' : g}
IC = array([x_TD, y_land, z_TD, vx0, t_flight1*g, vz0])
# initial guess: L0*cos(alpha)/vx0
#t_sim1 = L0*cos(alpha)/vx0
# TODO: implement sim_fail check!
sim_fail = False
try:
sim_phase2, t_phase2 = sim_until(IC,params,lambda x: x[4])
t_phase2 += t_flight1
except SimFailError:
print 'simulation aborted (phase 2)\n'
sim_fail = True
# Phase 3:
if not sim_fail:
L = sqrt(sim_phase2[-1,1]**2
+ (sim_phase2[-1,0]-params['xF'])**2
+ (sim_phase2[-1,2]-params['zF'])**2 )
dk, dL = dk_dL(L0,k,L,dE)
params2 = deepcopy(params)
params2['k'] += dk
params2['L0'] += dL
IC = sim_phase2[-1,:]
compression = (lambda x: sqrt(
(x[0]-params2['xF'])**2 + x[1]**2
+(x[2]-params['zF'])**2)
- params2['L0'] )
#print ('L:', L, 'dk', dk, 'dL', dL, 'dE', dE, '\ncompression:', compression(IC),
# 'IC', IC)
try:
sim_phase3, t_phase3 = sim_until(IC, params2,compression)
sim_phase3 = sim_phase3[1:,:]
t_phase3 = t_phase3[1:] + t_phase2[-1]
except SimFailError:
print 'simulation aborted (phase 3)\n'
sim_fail = True
# Phase 4:
if not sim_fail:
# time to apex
# TODO: make sampling frequency regular
vy_liftoff = sim_phase3[-1,4]
#vz_liftoff = sim_phase3[-1,5]
t_flight2 = -1.*vy_liftoff/g
#t_flight = sqrt()
tvec_flight2 = arange(t_flight2,0,-.001)[::-1]
vy_flight2 = tvec_flight2*g + vy_liftoff
y_flight2 = (sim_phase3[-1,1] + vy_liftoff*tvec_flight2
+ .5*g*(tvec_flight2**2) )
x_flight2 = sim_phase3[-1,0] + sim_phase3[-1,3]*tvec_flight2
vx_flight2 = sim_phase3[-1,3]*ones_like(tvec_flight2)
z_flight2 = sim_phase3[-1,2] + sim_phase3[-1,5]*tvec_flight2
vz_flight2 = sim_phase3[-1,5]*ones_like(tvec_flight2)
#print tvec_flight2
tvec_flight2 += t_phase3[-1]
# todo: return data until error
if sim_fail:
return { 't': None,
'x': None,
'y': None,
'z': None,
'vx': None,
'vy': None,
'vz': None,
'sim_fail': sim_fail,
'dk': None,
'dL': None
}
# finally: concatenate phases
x_final = hstack([x_flight1, sim_phase2[:,0], sim_phase3[:,0], x_flight2 ])
y_final = hstack([y_flight1, sim_phase2[:,1], sim_phase3[:,1], y_flight2 ])
z_final = hstack([z_flight1, sim_phase2[:,2], sim_phase3[:,2], z_flight2 ])
vx_final= hstack([vx_flight1, sim_phase2[:,3], sim_phase3[:,3], vx_flight2])
vy_final= hstack([vy_flight1, sim_phase2[:,4], sim_phase3[:,4], vy_flight2])
vz_final= hstack([vz_flight1, sim_phase2[:,5], sim_phase3[:,5], vz_flight2])
tvec_final = hstack([tvec_flight1, t_phase2, t_phase3, tvec_flight2 ])
return {'t': tvec_final,
'x': x_final,
'y': y_final,
'z': z_final,
'vx': vx_final,
'vy': vy_final,
'vz': vz_final,
'sim_fail': sim_fail,
'dk': dk,
'dL': dL,
#'sim_res':sim_res,
#'sim_phase2': sim_phase2_cut,
#'t_phase2': t_phase2_cut
}
| gpl-2.0 | -5,720,251,461,055,179,000 | 33.332298 | 89 | 0.501131 | false |
rasbt/protein-science | tutorials/substructure_alignment/Scripts/multimol2_rmsd_align.py | 1 | 1043 | # Sebastian Raschka 2014
#
# Aligns multiple mol2 files to a reference mol2 files and
# writes the aligned targets to the hard drive.
#
# USAGE from command shell command line:
# %> python3 multimol2_rmsd_align.py input_dir/ output_dir/ ref.mol2 smiles_string
import subprocess
import os
import sys
RMSD_TOOL = "/soft/linux64/.../oechem-utilities/rmsd" # put the correct path to the RMSD bin here
try:
assert len(sys.argv) == 5
INPUT_DIR = sys.argv[1]
TARGET_DIR = sys.argv[2]
REFERENCE_MOL = sys.argv[3]
SMILES = sys.argv[4]
if not os.path.exists(TARGET_DIR):
os.mkdir(TARGET_DIR)
for i in [m for m in os.listdir(INPUT_DIR) if m.endswith('.mol2')]:
in_mol = INPUT_DIR + '/' + i
out_mol = TARGET_DIR + '/' + i
subprocess.call("{} -in {} -ref {} -overlay -out {} -smarts '{}'".format(
RMSD_TOOL, in_mol, REFERENCE_MOL, out_mol, SMILES), shell=True)
except:
print("ERROR\nUSAGE: python3 multimol2_rmsd_align.py input_dir/ output_dir/ ref.mol2 smiles_string")
| gpl-3.0 | 2,310,414,354,662,612,000 | 30.606061 | 104 | 0.651007 | false |
CymricNPG/include_cleaner | dependency_graph.py | 1 | 2889 | #!/usr/bin/python
import os
from common import prepare_include_data, log_block, build_lib_dependencies, find_package_for_file
import config
__author__ = '[email protected]'
myConfig = config.get_config()
def write_lib_graph(data):
log_block("writing library dependency graph")
dependencies, ignore = build_lib_dependencies(data)
with open('libraries.gv', 'w') as output:
output.write("digraph G {\n")
for src in dependencies.keys():
for dst in dependencies[src]:
line = ""
if src in myConfig.dependencies and dst in myConfig.dependencies[src]:
line = src + " -> " + dst + ";\n"
else:
line = src + " -> " + dst + " [color = red];\n"
output.write(line)
for src in myConfig.dependencies:
for dst in myConfig.dependencies[src]:
if src in dependencies and dst in dependencies[src]:
continue
else:
line = src + " -> " + dst + " [color = blue];\n"
output.write(line)
output.write("}\n")
def build_package_cluster_name(container):
"""
hack to create a cluster of packages
:param container: the name of the container (normally the library name)
:return: add "cluster_" to the container name
"""
if myConfig.ignore_package_cluster:
return container
else:
return "cluster_" + container
def write_package_graph(data):
log_block("writing package dependency graph")
already_written = {}
with open('packages.gv', 'w') as output:
output.write("digraph G {\n")
for container in myConfig.libraries:
output.write(" subgraph \"" + build_package_cluster_name(container) + "\" {\n")
for package in myConfig.libraries[container]:
output.write(" " + package + ";\n")
output.write(" }\n")
for cfile in data.files:
src = find_package_for_file(cfile, data)
if src is None:
continue
for include in data.all_includes[cfile]:
dst = None
if include in data.include_path_mapping:
dst = find_package_for_file(data.include_path_mapping[include], data)
else:
dst = find_package_for_file(include, data)
if dst is None or src is dst:
continue
line = src + " -> " + dst + ";\n"
if line not in already_written:
output.write(line)
already_written[line] = True
output.write("}\n")
data = prepare_include_data()
write_lib_graph(data)
write_package_graph(data)
os.system("dot -Tjpg packages.gv -opackages.jpg")
os.system("dot -Tjpg libraries.gv -olibraries.jpg")
| gpl-3.0 | -1,794,301,910,090,229,000 | 32.593023 | 97 | 0.553479 | false |
mensi/cydra | cydra/permission/htpasswd.py | 1 | 3013 | # -*- coding: utf-8 -*-
import os
import hashlib
from passlib.apache import HtpasswdFile
from cydra.component import Component, implements
from cydra.permission import User
from cydra.permission.interfaces import IUserTranslator, IUserAuthenticator, IUserStore
from cydra.error import InsufficientConfiguration
import logging
logger = logging.getLogger(__name__)
import warnings
class HtpasswdUser(User):
supports_check_password = True
supports_set_password = True
valid_for_authentication = True
def __init__(self, htpasswdusers, userid, **kwargs):
super(HtpasswdUser, self).__init__(htpasswdusers.compmgr, userid, **kwargs)
self.htpasswd = htpasswdusers.htpasswd
def check_password(self, password):
self.htpasswd.load_if_changed()
return self.htpasswd.check_password(self.userid, password)
def set_password(self, password):
self.htpasswd.load_if_changed()
self.htpasswd.set_password(self.id, password)
self.htpasswd.save()
class HtpasswdUsers(Component):
implements(IUserAuthenticator)
implements(IUserTranslator)
implements(IUserStore)
def __init__(self):
config = self.get_component_config()
if 'file' not in config:
raise InsufficientConfiguration(missing='file', component=self.get_component_name())
self.htpasswd = HtpasswdFile(config['file'])
def username_to_user(self, username):
self.htpasswd.load_if_changed()
if username in self.htpasswd.users():
return HtpasswdUser(self, username, username=username, full_name=username)
def userid_to_user(self, userid):
if userid is None or userid == '*':
warnings.warn("You should not call this directly. Use cydra.get_user()", DeprecationWarning, stacklevel=2)
return self.compmgr.get_user(userid='*')
self.htpasswd.load_if_changed()
if userid in self.htpasswd.users():
return HtpasswdUser(self, userid, username=userid, full_name=userid)
else:
# since the client was looking for a specific ID,
# we return a dummy user object with empty data
return User(self, userid, full_name='N/A')
def groupid_to_group(self, groupid):
pass
def user_password(self, user, password):
self.htpasswd.load_if_changed()
return self.htpasswd.check_password(user.userid, password)
def create_user(self, **kwargs):
self.htpasswd.load_if_changed()
userid = None
if 'id' in kwargs:
userid = kwargs['id']
elif 'username' in kwargs:
userid = kwargs['username']
else:
raise ValueError("No username/id specified")
if userid in self.htpasswd.users():
raise ValueError("User with this id already exists")
else:
self.htpasswd.set_password(userid, hashlib.sha1(os.urandom(8)).hexdigest())
self.htpasswd.save()
return userid
| gpl-3.0 | 4,904,282,968,786,408,000 | 31.75 | 118 | 0.658812 | false |
burnpanck/traits | traits/etsconfig/tests/test_etsconfig.py | 1 | 6766 | """ Tests the 'ETSConfig' configuration object. """
# Standard library imports.
import contextlib
import os
import shutil
import sys
import tempfile
import time
import unittest
# Enthought library imports.
from traits.etsconfig.api import ETSConfig
@contextlib.contextmanager
def temporary_directory():
"""
Context manager to create and clean up a temporary directory.
"""
temp_dir = tempfile.mkdtemp()
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir)
@contextlib.contextmanager
def restore_mapping_entry(mapping, key):
"""
Context manager that restores a mapping entry to its previous
state on exit.
"""
missing = object()
old_value = mapping.get(key, missing)
try:
yield
finally:
if old_value is missing:
mapping.pop(key, None)
else:
mapping[key] = old_value
@contextlib.contextmanager
def temporary_home_directory():
"""
Context manager that temporarily remaps HOME / APPDATA
to a temporary directory.
"""
# Use the same recipe as in ETSConfig._initialize_application_data
# to determine the home directory.
home_var = 'APPDATA' if sys.platform == 'win32' else 'HOME'
with temporary_directory() as temp_home:
with restore_mapping_entry(os.environ, home_var):
os.environ[home_var] = temp_home
yield
class ETSConfigTestCase(unittest.TestCase):
""" Tests the 'ETSConfig' configuration object. """
###########################################################################
# 'TestCase' interface.
###########################################################################
#### public methods #######################################################
def setUp(self):
"""
Prepares the test fixture before each test method is called.
"""
# Make a fresh instance each time.
self.ETSConfig = type(ETSConfig)()
def run(self, result=None):
# Extend TestCase.run to use a temporary home directory.
with temporary_home_directory():
super(ETSConfigTestCase, self).run(result)
###########################################################################
# 'ETSConfigTestCase' interface.
###########################################################################
#### public methods #######################################################
def test_application_data(self):
"""
application data
"""
dirname = self.ETSConfig.application_data
self.assertEqual(os.path.exists(dirname), True)
self.assertEqual(os.path.isdir(dirname), True)
return
def test_set_application_data(self):
"""
set application data
"""
old = self.ETSConfig.application_data
self.ETSConfig.application_data = 'foo'
self.assertEqual('foo', self.ETSConfig.application_data)
self.ETSConfig.application_data = old
self.assertEqual(old, self.ETSConfig.application_data)
return
def test_application_data_is_idempotent(self):
"""
application data is idempotent
"""
# Just do the previous test again!
self.test_application_data()
self.test_application_data()
return
def test_write_to_application_data_directory(self):
"""
write to application data directory
"""
self.ETSConfig.company = 'Blah'
dirname = self.ETSConfig.application_data
path = os.path.join(dirname, 'dummy.txt')
data = str(time.time())
f = open(path, 'w')
f.write(data)
f.close()
self.assertEqual(os.path.exists(path), True)
f = open(path)
result = f.read()
f.close()
os.remove(path)
self.assertEqual(data, result)
return
def test_default_company(self):
"""
default company
"""
self.assertEqual(self.ETSConfig.company, 'Enthought')
return
def test_set_company(self):
"""
set company
"""
old = self.ETSConfig.company
self.ETSConfig.company = 'foo'
self.assertEqual('foo', self.ETSConfig.company)
self.ETSConfig.company = old
self.assertEqual(old, self.ETSConfig.company)
return
def _test_default_application_home(self):
"""
application home
"""
# This test is only valid when run with the 'main' at the end of this
# file: "python app_dat_locator_test_case.py", in which case the
# app_name will be the directory this file is in ('tests').
app_home = self.ETSConfig.application_home
(dirname, app_name) = os.path.split(app_home)
self.assertEqual(dirname, self.ETSConfig.application_data)
self.assertEqual(app_name, 'tests')
def test_user_data(self):
"""
user data
"""
dirname = self.ETSConfig.user_data
self.assertEqual(os.path.exists(dirname), True)
self.assertEqual(os.path.isdir(dirname), True)
return
def test_set_user_data(self):
"""
set user data
"""
old = self.ETSConfig.user_data
self.ETSConfig.user_data = 'foo'
self.assertEqual('foo', self.ETSConfig.user_data)
self.ETSConfig.user_data = old
self.assertEqual(old, self.ETSConfig.user_data)
return
def test_user_data_is_idempotent(self):
"""
user data is idempotent
"""
# Just do the previous test again!
self.test_user_data()
return
def test_write_to_user_data_directory(self):
"""
write to user data directory
"""
self.ETSConfig.company = 'Blah'
dirname = self.ETSConfig.user_data
path = os.path.join(dirname, 'dummy.txt')
data = str(time.time())
f = open(path, 'w')
f.write(data)
f.close()
self.assertEqual(os.path.exists(path), True)
f = open(path)
result = f.read()
f.close()
os.remove(path)
self.assertEqual(data, result)
return
# For running as an individual set of tests.
if __name__ == '__main__':
# Add the non-default test of application_home...non-default because it
# must be run using this module as a script to be valid.
suite = unittest.TestLoader().loadTestsFromTestCase(ETSConfigTestCase)
suite.addTest(ETSConfigTestCase('_test_default_application_home'))
unittest.TextTestRunner(verbosity=2).run(suite)
#### EOF ######################################################################
| bsd-3-clause | -2,372,237,688,121,575,400 | 22.09215 | 79 | 0.557198 | false |
qxsch/QXSConsolas | examples/CopyThat/copyThat/CTSplunk/Test.py | 1 | 2084 | #!/usr/bin/python
import logging, os
from QXSConsolas.Cli import CliApp
from QXSConsolas.Command import SSH, call
@CliApp(
Name = "Tests something",
Description = "A very nice description cannot live without the text",
Opts = [
{ "argument": "--name:", "default": None, "multiple": True, "description": "den namen eingeben", "valuename": "NAME" },
{ "argument": "--verbose::", "default": 0, "description": "schwatzen?", "valuename": "VERBOSITY" },
{ "argument": "-v::", "default": 0, "references": "--verbose::", "valuename": "VERBOSITY" },
{ "argument": "--name=", "default": None, "description": "", "valuename": "NAME"}
]
)
def Test(app):
print("Hello " + os.getlogin() + " - (Real user even after sudo / su)")
print("Options:")
print(app.options)
print("Arguments:")
print(app.arguments)
print("System Configuration:")
print(app.configuration)
if not app.data is None:
print("Data:")
print(app.data.dump())
# iterate the configuration keys
s = ""
for key in app.data:
s = s + " " + str(app.data[key])
print(s.strip())
print("")
# injected logger
app.logger.warning("hello from the injected loggger")
# Using explicitely the root logger always logs to the console
logging.debug("This is an info of the root logger")
# Logging from myapp.lib
myapp_cli_logger = logging.getLogger('myapp.cli')
myapp_cli_logger.info("This is an info from myapp.cli") # Not recorded
myapp_cli_logger.warning("This is a warning from myapp.cli") # -> sample.log
myapp_cli_logger.error("This is an error from myapp.cli") # -> sample.log
myapp_cli_logger.critical("This is a critical from myapp.cli") # -> sample.log
print(call(["echo", ["hi", "$x", "a"]], shell = True))
print(call(["./test.sh", "QXS"], shell = True))
print(call(["./test.sh", "QXS"], shell = False))
print(1/0)
| gpl-3.0 | -3,096,518,374,377,826,000 | 41.530612 | 136 | 0.573417 | false |
mozilla/ichnaea | ichnaea/data/tests/test_datamap.py | 1 | 5174 | from collections import defaultdict
from datetime import timedelta
from ichnaea.data.tasks import cleanup_datamap, update_datamap
from ichnaea.models.content import DataMap, encode_datamap_grid
from ichnaea import util
class TestDataMapCleaner(object):
@property
def today(self):
return util.utcnow().date()
def _one(self, lat, lon, time):
lat, lon = DataMap.scale(lat, lon)
return DataMap.shard_model(lat, lon)(
grid=(lat, lon), created=time, modified=time
)
def test_empty(self, celery, session):
for shard_id, shard in DataMap.shards().items():
cleanup_datamap.delay(shard_id=shard_id).get()
assert session.query(shard).count() == 0
def test_cleanup(self, celery, session):
session.add_all(
[
self._one(37.0, 6.0, self.today),
self._one(37.0, 6.1, self.today - timedelta(days=366)),
self._one(37.0, 4.0, self.today),
self._one(37.0, 4.1, self.today - timedelta(days=366)),
self._one(10.0, 6.0, self.today),
self._one(10.0, 6.1, self.today - timedelta(days=366)),
self._one(10.0, 4.0, self.today),
self._one(10.0, 4.1, self.today - timedelta(days=366)),
]
)
session.flush()
for shard_id, shard in DataMap.shards().items():
cleanup_datamap.delay(shard_id=shard_id).get()
assert session.query(shard).count() == 1
class TestDataMapUpdater(object):
@property
def today(self):
return util.utcnow().date()
@property
def yesterday(self):
return self.today - timedelta(days=1)
def _add(self, session, entries):
for lat, lon, time in entries:
lat, lon = DataMap.scale(lat, lon)
session.add(
DataMap.shard_model(lat, lon)(
grid=(lat, lon), created=time, modified=time
)
)
session.flush()
def _check_position(self, stat, lat, lon):
assert stat.grid == DataMap.scale(lat, lon)
def _queue(self, celery, pairs):
grids = defaultdict(list)
for lat, lon in pairs:
lat, lon = DataMap.scale(lat, lon)
shard_id = DataMap.shard_id(lat, lon)
grids[shard_id].append(encode_datamap_grid(lat, lon))
for shard_id, values in grids.items():
queue = celery.data_queues["update_datamap_" + shard_id]
queue.enqueue(list(values))
def test_empty(self, celery, session):
for shard_id, shard in DataMap.shards().items():
update_datamap.delay(shard_id=shard_id).get()
assert session.query(shard).count() == 0
def test_one(self, celery, session):
lat = 1.234567
lon = 2.345678
shard_id = DataMap.shard_id(*DataMap.scale(lat, lon))
self._queue(celery, [(lat, lon)])
update_datamap.delay(shard_id=shard_id).get()
grids = session.query(DataMap.shards()[shard_id]).all()
assert len(grids) == 1
self._check_position(grids[0], 1.235, 2.346)
assert grids[0].created == self.today
assert grids[0].modified == self.today
def test_update(self, celery, session):
lat = 1.0
lon = 2.0
shard_id = DataMap.shard_id(*DataMap.scale(lat, lon))
self._add(session, [(lat, lon, self.yesterday)])
self._queue(celery, [(lat, lon)])
update_datamap.delay(shard_id=shard_id).get()
grids = session.query(DataMap.shards()[shard_id]).all()
assert len(grids) == 1
self._check_position(grids[0], 1.0, 2.0)
assert grids[0].created == self.yesterday
assert grids[0].modified == self.today
def test_multiple(self, celery, session):
self._add(
session,
[
(0.0, 1.0, self.today),
(1.0, 2.0, self.yesterday),
(-10.0, 40.0, self.yesterday),
],
)
self._queue(
celery,
[
(0.0, 1.0),
(1.0, 2.0),
(1.0, 2.0),
(40.0011, 3.0011),
(40.0012, 3.0012),
(40.0013, 3.0013),
(0.0, 0.0),
(1.0, 2.0),
(1.00001, 2.00001),
],
)
for shard_id in DataMap.shards():
update_datamap.delay(shard_id=shard_id).get()
rows = []
for shard in DataMap.shards().values():
rows.extend(session.query(shard).all())
assert len(rows) == 5
created = set()
modified = set()
positions = set()
for row in rows:
lat, lon = row.grid
created.add(row.created)
modified.add(row.modified)
positions.add((lat / 1000.0, lon / 1000.0))
assert created == set([self.today, self.yesterday])
assert modified == set([self.today, self.yesterday])
assert positions == set(
[(0.0, 0.0), (0.0, 1.0), (1.0, 2.0), (-10.0, 40.0), (40.001, 3.001)]
)
| apache-2.0 | -5,722,421,735,843,132,000 | 32.597403 | 80 | 0.529378 | false |
bmazin/ARCONS-pipeline | flatcal/illuminationCal.py | 1 | 16204 | #!/bin/python
"""
Author: Matt Strader Date:August 19,2012
Opens a twilight flat h5 and makes the spectrum of each pixel.
Then takes the median of each energy over all pixels
A factor is then calculated for each energy in each pixel of its
twilight count rate / median count rate
The factors are written out in an h5 file
"""
import sys,os
import tables
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from functools import partial
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_pdf import PdfPages
from util.popup import PopUp,plotArray,pop
from util.ObsFile import ObsFile
from util.readDict import readDict
from util.FileName import FileName
from util.utils import nearestNRobustMeanFilter
import hotpix.hotPixels as hp
from astropy.stats.funcs import sigma_clip
def onscroll_cbar(fig, event):
if event.inaxes is fig.cbar.ax:
increment=0.05
currentClim = fig.cbar.mappable.get_clim()
if event.button == 'up':
newClim = (currentClim[0],(1.+increment)*currentClim[1])
if event.button == 'down':
newClim = (currentClim[0],(1.-increment)*currentClim[1])
fig.cbar.mappable.set_clim(newClim)
fig.canvas.draw()
def onclick_cbar(fig,event):
if event.inaxes is fig.cbar.ax:
if event.button == 1:
fig.oldClim = fig.cbar.mappable.get_clim()
fig.cbar.mappable.set_clim(fig.oldClim[0],event.ydata*fig.oldClim[1])
fig.canvas.draw()
if event.button == 3:
fig.oldClim = fig.cbar.mappable.get_clim()
fig.cbar.mappable.set_clim(fig.oldClim[0],1/event.ydata*fig.oldClim[1])
fig.canvas.draw()
class FlatCal:
def __init__(self,paramFile):
"""
opens flat file,sets wavelength binnning parameters, and calculates flat factors for the file
"""
self.params = readDict()
self.params.read_from_file(paramFile)
run = self.params['run']
sunsetDate = self.params['sunsetDate']
flatTstamp = self.params['flatTstamp']
wvlSunsetDate = self.params['wvlSunsetDate']
wvlTimestamp = self.params['wvlTimestamp']
obsSequence = self.params['obsSequence']
needTimeAdjust = self.params['needTimeAdjust']
self.deadtime = self.params['deadtime'] #from firmware pulse detection
self.intTime = self.params['intTime']
self.timeSpacingCut = self.params['timeSpacingCut']
self.nSigmaClip = self.params['nSigmaClip']
self.nNearest = self.params['nNearest']
obsFNs = [FileName(run=run,date=sunsetDate,tstamp=obsTstamp) for obsTstamp in obsSequence]
self.obsFileNames = [fn.obs() for fn in obsFNs]
self.obsList = [ObsFile(obsFileName) for obsFileName in self.obsFileNames]
timeMaskFileNames = [fn.timeMask() for fn in obsFNs]
timeAdjustFileName = FileName(run=run).timeAdjustments()
print len(self.obsFileNames), 'flat files to co-add'
self.flatCalFileName = FileName(run=run,date=sunsetDate,tstamp=flatTstamp).illumSoln()
if wvlSunsetDate != '':
wvlCalFileName = FileName(run=run,date=wvlSunsetDate,tstamp=wvlTimestamp).calSoln()
for iObs,obs in enumerate(self.obsList):
if wvlSunsetDate != '':
obs.loadWvlCalFile(wvlCalFileName)
else:
obs.loadBestWvlCalFile()
if needTimeAdjust:
obs.loadTimeAdjustmentFile(timeAdjustFileName)
timeMaskFileName = timeMaskFileNames[iObs]
print timeMaskFileName
#Temporary step, remove old hotpix file
#if os.path.exists(timeMaskFileName):
# os.remove(timeMaskFileName)
if not os.path.exists(timeMaskFileName):
print 'Running hotpix for ',obs
hp.findHotPixels(self.obsFileNames[iObs],timeMaskFileName,fwhm=np.inf,useLocalStdDev=True)
print "Flux file pixel mask saved to %s"%(timeMaskFileName)
obs.loadHotPixCalFile(timeMaskFileName)
self.wvlFlags = self.obsList[0].wvlFlagTable
self.nRow = self.obsList[0].nRow
self.nCol = self.obsList[0].nCol
print 'files opened'
#self.wvlBinWidth = params['wvlBinWidth'] #angstroms
self.energyBinWidth = self.params['energyBinWidth'] #eV
self.wvlStart = self.params['wvlStart'] #angstroms
self.wvlStop = self.params['wvlStop'] #angstroms
self.wvlBinEdges = ObsFile.makeWvlBins(self.energyBinWidth,self.wvlStart,self.wvlStop)
self.intTime = self.params['intTime']
self.countRateCutoff = self.params['countRateCutoff']
self.fractionOfChunksToTrim = self.params['fractionOfChunksToTrim']
#wvlBinEdges includes both lower and upper limits, so number of bins is 1 less than number of edges
self.nWvlBins = len(self.wvlBinEdges)-1
#print 'wrote to',self.flatCalFileName
def __del__(self):
pass
def loadFlatSpectra(self):
self.spectralCubes = []#each element will be the spectral cube for a time chunk
self.cubeEffIntTimes = []
self.frames = []
for iObs,obs in enumerate(self.obsList):
print 'obs',iObs
for firstSec in range(0,obs.getFromHeader('exptime'),self.intTime):
print 'sec',firstSec
cubeDict = obs.getSpectralCube(firstSec=firstSec,integrationTime=self.intTime,weighted=False,wvlBinEdges = self.wvlBinEdges,timeSpacingCut = self.timeSpacingCut)
cube = np.array(cubeDict['cube'],dtype=np.double)
effIntTime = cubeDict['effIntTime']
#add third dimension for broadcasting
effIntTime3d = np.reshape(effIntTime,np.shape(effIntTime)+(1,))
cube /= effIntTime3d
cube[np.isnan(cube)]=0
frame = np.sum(cube,axis=2) #in counts per sec
#correct nonlinearity due to deadtime in firmware
nonlinearFactors = 1. / (1. - frame*self.deadtime)
nonlinearFactors[np.isnan(nonlinearFactors)]=0.
frame = frame * nonlinearFactors
nonlinearFactors = np.reshape(nonlinearFactors,np.shape(nonlinearFactors)+(1,))
cube = cube * nonlinearFactors
self.frames.append(frame)
self.spectralCubes.append(cube)
self.cubeEffIntTimes.append(effIntTime3d)
obs.file.close()
self.spectralCubes = np.array(self.spectralCubes)
self.cubeEffIntTimes = np.array(self.cubeEffIntTimes)
self.countCubes = self.cubeEffIntTimes * self.spectralCubes
self.spectralCubes = self.intTime * self.spectralCubes # in counts
def checkCountRates(self):
medianCountRates = np.array([np.median(frame[frame!=0]) for frame in self.frames])
boolIncludeFrames = medianCountRates <= self.countRateCutoff
#boolIncludeFrames = np.logical_and(boolIncludeFrames,medianCountRates >= 200)
#mask out frames, or cubes from integration time chunks with count rates too high
self.spectralCubes = np.array([cube for cube,boolIncludeFrame in zip(self.spectralCubes,boolIncludeFrames) if boolIncludeFrame==True])
self.frames = [frame for frame,boolIncludeFrame in zip(self.frames,boolIncludeFrames) if boolIncludeFrame==True]
print 'few enough counts in the chunk',zip(medianCountRates,boolIncludeFrames)
def calculateWeights(self):
"""
finds illum cal factors by making an image per wavelength bin, then smoothing it and dividing the mean of the smoothedImage over pixels by the smoothedImage
"""
cubeWeightsList = []
self.averageSpectra = []
deltaWeightsList = []
self.totalCube = np.sum(self.spectralCubes,axis=0) #sum all cubes
self.totalFrame = np.sum(self.totalCube,axis=-1)#sum over wvl
weights = []
for iWvl in xrange(self.nWvlBins):
wvlSlice = self.totalCube[:,:,iWvl]
wvlSlice[wvlSlice == 0] = np.nan
nanMask = np.isnan(wvlSlice)
#do a sigma-clipping on the wvlSlice and insert it back in the cube
maskedWvlSlice = np.ma.array(wvlSlice,mask=nanMask)
clippedWvlSlice = sigma_clip(wvlSlice,sig=self.nSigmaClip,iters=None,cenfunc=np.ma.median)
wvlSlice[clippedWvlSlice.mask] = np.nan
self.totalCube[:,:,iWvl] = wvlSlice
#do a smoothing over the slice
smoothedWvlSlice = nearestNRobustMeanFilter(wvlSlice,n=self.nNearest,nSigmaClip=self.nSigmaClip)
wvlIllumWeights = np.mean(smoothedWvlSlice)/smoothedWvlSlice
weights.append(wvlIllumWeights)
self.weights = np.array(weights)
#move the wvl dimension to the end
self.weights = np.swapaxes(self.weights,0,1)
self.weights = np.swapaxes(self.weights,1,2)
self.deltaWeights = np.zeros_like(self.weights)
self.flags = np.zeros_like(self.weights)
def plotWeightsWvlSlices(self,verbose=True):
flatCalPath,flatCalBasename = os.path.split(self.flatCalFileName)
pdfBasename = os.path.splitext(flatCalBasename)[0]+'_wvlSlices.pdf'
pdfFullPath = os.path.join(flatCalPath,pdfBasename)
pp = PdfPages(pdfFullPath)
nPlotsPerRow = 2
nPlotsPerCol = 4
nPlotsPerPage = nPlotsPerRow*nPlotsPerCol
iPlot = 0
if verbose:
print 'plotting weights in wavelength sliced images'
matplotlib.rcParams['font.size'] = 4
wvls = self.wvlBinEdges[0:-1]
cmap = matplotlib.cm.hot
cmap.set_bad('0.15')
for iWvl,wvl in enumerate(wvls):
if verbose:
print 'wvl ',iWvl
if iPlot % nPlotsPerPage == 0:
fig = plt.figure(figsize=(10,10),dpi=100)
ax = fig.add_subplot(nPlotsPerCol,nPlotsPerRow,iPlot%nPlotsPerPage+1)
ax.set_title(r'Weights %.0f $\AA$'%wvl)
image = self.weights[:,:,iWvl]
handleMatshow = ax.matshow(image,cmap=cmap,origin='lower',vmax=1.5,vmin=.5)
cbar = fig.colorbar(handleMatshow)
if iPlot%nPlotsPerPage == nPlotsPerPage-1:
pp.savefig(fig)
iPlot += 1
if iPlot % nPlotsPerPage == 0:
fig = plt.figure(figsize=(10,10),dpi=100)
ax = fig.add_subplot(nPlotsPerCol,nPlotsPerRow,iPlot%nPlotsPerPage+1)
ax.set_title(r'Twilight Image %.0f $\AA$'%wvl)
image = self.totalCube[:,:,iWvl]
nSdev = 3.
goodImage = image[np.isfinite(image)]
vmax = np.mean(goodImage)+nSdev*np.std(goodImage)
handleMatshow = ax.matshow(image,cmap=cmap,origin='lower',vmax=vmax)
cbar = fig.colorbar(handleMatshow)
if iPlot%nPlotsPerPage == nPlotsPerPage-1:
pp.savefig(fig)
iPlot += 1
pp.savefig(fig)
pp.close()
def plotWeightsByPixel(self,verbose=True):
flatCalPath,flatCalBasename = os.path.split(self.flatCalFileName)
pdfBasename = os.path.splitext(flatCalBasename)[0]+'.pdf'
pdfFullPath = os.path.join(flatCalPath,pdfBasename)
pp = PdfPages(pdfFullPath)
nPlotsPerRow = 2
nPlotsPerCol = 4
nPlotsPerPage = nPlotsPerRow*nPlotsPerCol
iPlot = 0
if verbose:
print 'plotting weights by pixel at ',pdfFullPath
matplotlib.rcParams['font.size'] = 4
wvls = self.wvlBinEdges[0:-1]
nCubes = len(self.spectralCubes)
for iRow in xrange(self.nRow):
if verbose:
print 'row',iRow
for iCol in xrange(self.nCol):
weights = self.weights[iRow,iCol,:]
deltaWeights = self.deltaWeights[iRow,iCol,:]
if iPlot % nPlotsPerPage == 0:
fig = plt.figure(figsize=(10,10),dpi=100)
ax = fig.add_subplot(nPlotsPerCol,nPlotsPerRow,iPlot%nPlotsPerPage+1)
ax.set_ylim(.5,2.)
weights = self.weights[iRow,iCol]
ax.errorbar(wvls,weights,yerr=deltaWeights,label='weights',color='k')
ax.set_title('p %d,%d'%(iRow,iCol))
ax.set_ylabel('weight')
ax.set_xlabel(r'$\lambda$ ($\AA$)')
#ax.plot(wvls,flatSpectrum,label='pixel',alpha=.5)
#ax.legend(loc='lower left')
#ax2.legend(loc='lower right')
if iPlot%nPlotsPerPage == nPlotsPerPage-1 or (iRow == self.nRow-1 and iCol == self.nCol-1):
pp.savefig(fig)
iPlot += 1
#Put a plot of twilight spectrums for this pixel
if iPlot % nPlotsPerPage == 0:
fig = plt.figure(figsize=(10,10),dpi=100)
ax = fig.add_subplot(nPlotsPerCol,nPlotsPerRow,iPlot%nPlotsPerPage+1)
ax.plot(wvls,self.totalCube[iRow,iCol,:],label='spectrum')
ax.set_title('p %d,%d'%(iRow,iCol))
ax.set_xlabel(r'$\lambda$ ($\AA$)')
ax.set_ylabel('twilight cps')
#ax.plot(wvls,flatSpectrum,label='pixel',alpha=.5)
#ax.legend(loc='lower left')
#ax2.legend(loc='lower right')
if iPlot%nPlotsPerPage == nPlotsPerPage-1 or (iRow == self.nRow-1 and iCol == self.nCol-1):
pp.savefig(fig)
#plt.show()
iPlot += 1
pp.close()
def writeWeights(self):
"""
Writes an h5 file to put calculated flat cal factors in
"""
if os.path.isabs(self.flatCalFileName) == True:
fullFlatCalFileName = self.flatCalFileName
else:
scratchDir = os.getenv('MKID_PROC_PATH')
flatDir = os.path.join(scratchDir,'flatCalSolnFiles')
fullFlatCalFileName = os.path.join(flatDir,self.flatCalFileName)
try:
flatCalFile = tables.openFile(fullFlatCalFileName,mode='w')
except:
print 'Error: Couldn\'t create flat cal file, ',fullFlatCalFileName
return
print 'wrote to',self.flatCalFileName
calgroup = flatCalFile.createGroup(flatCalFile.root,'flatcal','Table of flat calibration weights by pixel and wavelength')
caltable = tables.Array(calgroup,'weights',object=self.weights,title='Illumination calibration Weights indexed by pixelRow,pixelCol,wavelengthBin')
errtable = tables.Array(calgroup,'errors',object=self.deltaWeights,title='Errors in Weights indexed by pixelRow,pixelCol,wavelengthBin')
flagtable = tables.Array(calgroup,'flags',object=self.flags,title='Illumination cal flags indexed by pixelRow,pixelCol,wavelengthBin. 0 is Good. By default, all are good.')
bintable = tables.Array(calgroup,'wavelengthBins',object=self.wvlBinEdges,title='Wavelength bin edges corresponding to third dimension of weights array')
flatCalFile.flush()
flatCalFile.close()
npzFileName = os.path.splitext(fullFlatCalFileName)[0]+'.npz'
#calculate total spectra and medians for programs that expect old format flat cal
spectra = np.array(np.sum(self.spectralCubes,axis=0))
wvlAverages = np.zeros(self.nWvlBins)
spectra2d = np.reshape(spectra,[self.nRow*self.nCol,self.nWvlBins ])
np.savez(npzFileName,binEdges=self.wvlBinEdges,spectra=spectra,weights=self.weights,deltaWeights=self.deltaWeights,totalFrame=self.totalFrame,totalCube=self.totalCube,spectralCubes=self.spectralCubes,countCubes=self.countCubes,cubeEffIntTimes=self.cubeEffIntTimes )
if __name__ == '__main__':
paramFile = sys.argv[1]
flatcal = FlatCal(paramFile)
flatcal.loadFlatSpectra()
flatcal.checkCountRates()
flatcal.calculateWeights()
flatcal.writeWeights()
flatcal.plotWeightsWvlSlices()
flatcal.plotWeightsByPixel()
| gpl-2.0 | 2,251,571,697,630,836,500 | 43.152589 | 273 | 0.630462 | false |
MateuszG/django-user-example | app/settings.py | 1 | 2129 | """
Django settings for app project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j7%se66*%_c%^+$q27nukm93$yo@-_km4tt3&61u52b9hbbp9!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'user'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'app.urls'
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'user.User'
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
| mit | 5,518,857,587,328,920,000 | 23.755814 | 71 | 0.720996 | false |
MockyJoke/numbers | ex1/code/monthly_totals.py | 1 | 3064 |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
def get_precip_data():
return pd.read_csv('precipitation.csv', parse_dates=[2])
def date_to_month(d):
return '%04i-%02i' % (d.year, d.month)
def pivot_months_pandas(data):
"""
Create monthly precipitation totals for each station in the data set.
This should use Pandas methods to manipulate the data.
"""
# ...
#d = d.set_index('date').groupby('name').resample('M').sum()
month_col = data['date'].apply(date_to_month)
data = data.assign(month = month_col)
monthly = data.drop(['station', 'latitude','longitude','elevation','date'], axis=1)
monthly = monthly.groupby(['name','month']).sum().reset_index()
monthly = monthly.pivot(index='name',columns='month',values= "precipitation")
counts = data.drop(['station', 'latitude','longitude','elevation',"precipitation"], axis=1)
counts = counts.groupby(['name','month']).count().reset_index()
counts = counts.pivot(index='name',columns='month',values= "date")
return monthly, counts
def pivot_months_loops(data):
"""
Create monthly precipitation totals for each station in the data set.
This does it the hard way: using Pandas as a dumb data store, and iterating in Python.
"""
# Find all stations and months in the data set.
stations = set()
months = set()
for i,r in data.iterrows():
stations.add(r['name'])
m = date_to_month(r['date'])
months.add(m)
# Aggregate into dictionaries so we can look up later.
stations = sorted(list(stations))
row_to_station = dict(enumerate(stations))
station_to_row = {s: i for i,s in row_to_station.items()}
months = sorted(list(months))
col_to_month = dict(enumerate(months))
month_to_col = {m: i for i,m in col_to_month.items()}
# Create arrays for the data, and fill them.
precip_total = np.zeros((len(row_to_station), 12), dtype=np.uint)
obs_count = np.zeros((len(row_to_station), 12), dtype=np.uint)
for _, row in data.iterrows():
m = date_to_month(row['date'])
r = station_to_row[row['name']]
c = month_to_col[m]
precip_total[r, c] += row['precipitation']
obs_count[r, c] += 1
# Build the DataFrames we needed all along (tidying up the index names while we're at it).
totals = pd.DataFrame(
data=precip_total,
index=stations,
columns=months,
)
totals.index.name = 'name'
totals.columns.name = 'month'
counts = pd.DataFrame(
data=obs_count,
index=stations,
columns=months,
)
counts.index.name = 'name'
counts.columns.name = 'month'
return totals, counts
def main():
data = get_precip_data()
#totals, counts = pivot_months_loops(data)
totals, counts = pivot_months_pandas(data)
totals.to_csv('totals.csv')
counts.to_csv('counts.csv')
np.savez('monthdata.npz', totals=totals.values, counts=counts.values)
if __name__ == '__main__':
main()
| mit | 3,098,951,167,550,225,000 | 27.635514 | 95 | 0.620431 | false |
AsymmetricVentures/asym-logging | asymmetricbase/logging/audit.py | 1 | 4522 | # -*- coding: utf-8 -*-
# Asymmetric Base Framework - A collection of utilities for django frameworks
# Copyright (C) 2013 Asymmetric Ventures Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
class AuditLoggingHandler(logging.Handler):
"""
Performs our Audit logging. If there is a model included in the record,
we will also include Object Retention info
"""
def __init__(self, *args, **kwargs):
super(AuditLoggingHandler, self).__init__(*args, **kwargs)
self.django_request = None
def _get_current_user_info(self):
pass
def emit(self, record):
log_generator = AuditLogGenerator(self.django_request, record)
log_generator.generate()
class AuditLogGenerator(object):
def __init__(self, request, record):
self.request = request
self.record = record
def generate(self):
from django.conf import settings
if getattr(settings, 'IS_IN_TEST', False):
return
if not hasattr(self, 'django_request') or self.django_request is None:
return
self._get_access_type()
self._get_log_type()
self._get_success()
if self._do_ignore_log():
return
self._get_current_user_info()
self._get_ip()
self._get_model()
self._get_view_name()
self._save_object_contnt()
self._save_log_entry()
def _save_log_entry(self):
from django.db import transaction
from .models import AuditEntry
with transaction.commit_on_success():
l = AuditEntry(
log_type = self.log_type,
access_type = self.access_type,
user_id = self.user.id if self.user is not None else None,
ip = self.ip,
message = self.record.msg,
model_name = self.model_str,
view_name = self.view_name,
success = self.success,
object_content = self.object_content,
)
l.save()
def _save_object_contnt(self):
from .models import ObjectContent
from django.core import serializers
if not self._is_save_object_content_required():
self.object_content = None
return
# serializer only accepts iterables!
content_in_json = serializers.serialize('json', [self.model], ensure_ascii = False)
oc = ObjectContent(content_in_json = content_in_json)
oc.save()
self.object_content = oc
def _is_save_object_content_required(self):
from .models import LogEntryType, AccessType
if self.log_type != LogEntryType.MODEL:
return False
if self.access_type not in (AccessType.ADD, AccessType.WRITE):
return False
if not self.success:
return False
return True
def _get_current_user_info(self):
try:
self.user = self.request.user
except AttributeError:
self.user = None
pass
def _get_ip(self):
self.ip = self.request.META['REMOTE_ADDR']
def _get_access_type(self):
try:
self.access_type = self.record.access_type
except AttributeError:
from .models import AccessType
self.access_type = AccessType.OTHER
def _get_log_type(self):
try:
self.log_type = self.record.log_type
except AttributeError:
from .models import LogEntryType
self.log_type = LogEntryType.OTHER
def _get_model(self):
try:
self.model = self.record.model
self.model_str = u"{model.__class__.__name__}.{model.id}".format(model = self.model)
except AttributeError:
self.model = None
self.model_str = None
def _get_view_name(self):
try:
self.view_name = self.record.view_name
except AttributeError:
self.view_name = None
def _get_success(self):
try:
self.success = self.record.success
except AttributeError:
self.success = None
def _do_ignore_log(self):
from django.conf import settings
from .models import LogEntryType, AccessType
if (not settings.LOG_MODEL_ACCESS_READ) and \
self.log_type == LogEntryType.MODEL and \
self.access_type == AccessType.READ and \
self.success == True:
return True
return False
| gpl-2.0 | 5,634,017,856,155,444,000 | 27.086957 | 87 | 0.700796 | false |
nathanielvarona/airflow | airflow/sensors/bash.py | 1 | 3395 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from subprocess import PIPE, STDOUT, Popen
from tempfile import NamedTemporaryFile, TemporaryDirectory, gettempdir
from airflow.sensors.base import BaseSensorOperator
class BashSensor(BaseSensorOperator):
"""
Executes a bash command/script and returns True if and only if the
return code is 0.
:param bash_command: The command, set of commands or reference to a
bash script (must be '.sh') to be executed.
:type bash_command: str
:param env: If env is not None, it must be a mapping that defines the
environment variables for the new process; these are used instead
of inheriting the current process environment, which is the default
behavior. (templated)
:type env: dict
:param output_encoding: output encoding of bash command.
:type output_encoding: str
"""
template_fields = ('bash_command', 'env')
def __init__(self, *, bash_command, env=None, output_encoding='utf-8', **kwargs):
super().__init__(**kwargs)
self.bash_command = bash_command
self.env = env
self.output_encoding = output_encoding
def poke(self, context):
"""
Execute the bash command in a temporary directory
which will be cleaned afterwards
"""
bash_command = self.bash_command
self.log.info("Tmp dir root location: \n %s", gettempdir())
with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f:
f.write(bytes(bash_command, 'utf_8'))
f.flush()
fname = f.name
script_location = tmp_dir + "/" + fname
self.log.info("Temporary script location: %s", script_location)
self.log.info("Running command: %s", bash_command)
# pylint: disable=subprocess-popen-preexec-fn
with Popen(
['bash', fname],
stdout=PIPE,
stderr=STDOUT,
close_fds=True,
cwd=tmp_dir,
env=self.env,
preexec_fn=os.setsid,
) as resp:
self.log.info("Output:")
for line in iter(resp.stdout.readline, b''):
line = line.decode(self.output_encoding).strip()
self.log.info(line)
resp.wait()
self.log.info("Command exited with return code %s", resp.returncode)
return not resp.returncode
| apache-2.0 | -6,741,692,652,340,570,000 | 39.416667 | 88 | 0.619735 | false |
MichaelAquilina/Simple-Calculator | tests/calculate_test.py | 1 | 1141 | from calculate import calculate
def test_basic_operators():
assert calculate('5+5') == 10
assert calculate('5*5') == 25
assert calculate('10/2') == 5
assert calculate('10-4') == 6
assert calculate('10%4') == 2
assert calculate('6/3') == 2
assert calculate('5-10') == -5
assert calculate('55+20') == 75
def test_operator_precedence():
# Test Precedence of operators
assert calculate('2+7*2') == 16
assert calculate('4-6/3') == 2
assert calculate('4+5%3') == 6
def test_parentheses():
assert calculate('7+(5*2)') == 17
assert calculate('7*(5+2)') == 49
assert calculate('(7*3)+(9/3)') == 24
assert calculate('7+(7*(6+10/(1*2)))') == 84
def test_whitespaces():
assert calculate('5 + 5') == 10
assert calculate('5+5 -3') == 7
assert calculate('5 + 9') == 14
assert calculate('5\t+6') == 11
assert calculate('8\n+5+ 7') == 20
def test_floating_point():
assert calculate('10/4') == 2.5
assert calculate('5.5*2') == 11
assert calculate('100/3') - 33.33 < 0.1
assert calculate('5.5+3.5') == 9
assert calculate('3.4-1.4') == 2
| mit | 555,783,336,309,220,540 | 24.355556 | 48 | 0.573181 | false |
eustislab/horton | horton/meanfield/test/test_scf_ediis.py | 1 | 4344 | # -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
#pylint: skip-file
import numpy as np
from horton import *
from horton.meanfield.test.common import check_hf_cs_hf, check_lih_os_hf, \
check_water_cs_hfs, check_n2_cs_hfs, check_h3_os_hfs, check_h3_os_pbe, \
check_co_cs_pbe
def test_hf_cs_hf():
check_hf_cs_hf(EDIISSCFSolver(threshold=1e-7))
def test_lih_os_hf():
check_lih_os_hf(EDIISSCFSolver(threshold=1e-7))
def test_water_cs_hfs():
check_water_cs_hfs(EDIISSCFSolver(threshold=1e-6))
def test_n2_cs_hfs():
check_n2_cs_hfs(EDIISSCFSolver(threshold=1e-6))
def test_h3_os_hfs():
check_h3_os_hfs(EDIISSCFSolver(threshold=1e-6))
def test_co_cs_pbe():
check_co_cs_pbe(EDIISSCFSolver(threshold=1e-5))
def test_h3_os_pbe():
check_h3_os_pbe(EDIISSCFSolver(threshold=1e-6))
def test_interpol_hf_cs_hf():
fn_fchk = context.get_fn('test/hf_sto3g.fchk')
mol = IOData.from_file(fn_fchk)
olp = mol.obasis.compute_overlap(mol.lf)
kin = mol.obasis.compute_kinetic(mol.lf)
na = mol.obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, mol.lf)
er = mol.obasis.compute_electron_repulsion(mol.lf)
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RExchangeTerm(er, 'x_hf'),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms, external)
occ_model = AufbauOccModel(5)
exps = [mol.exp_alpha]
check_interpol_hf(ham, mol.lf, exps, olp, kin, na, occ_model)
def test_interpol_lih_os_hf():
fn_fchk = context.get_fn('test/li_h_3-21G_hf_g09.fchk')
mol = IOData.from_file(fn_fchk)
olp = mol.obasis.compute_overlap(mol.lf)
kin = mol.obasis.compute_kinetic(mol.lf)
na = mol.obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, mol.lf)
er = mol.obasis.compute_electron_repulsion(mol.lf)
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
UTwoIndexTerm(kin, 'kin'),
UDirectTerm(er, 'hartree'),
UExchangeTerm(er, 'x_hf'),
UTwoIndexTerm(na, 'ne'),
]
ham = UEffHam(terms, external)
occ_model = AufbauOccModel(2, 1)
exps = [mol.exp_alpha, mol.exp_beta]
check_interpol_hf(ham, mol.lf, exps, olp, kin, na, occ_model)
def check_interpol_hf(ham, lf, exps, olp, kin, na, occ_model):
guess_core_hamiltonian(olp, kin, na, *exps)
dms = [exp.to_dm() for exp in exps]
scf_solver = EDIISSCFSolver(maxiter=4)
try:
scf_solver(ham, lf, olp, occ_model, *dms)
except NoSCFConvergence:
pass
# test harmonic approximation of the energy. This requires access to the
# internals of the ediis solver.
b, e = scf_solver._history._setup_equations()
x = np.zeros(len(e))
alphas = np.arange(0.0, 1.00001, 0.01)
npt = len(alphas)
energies_approx = np.zeros(npt)
energies_hf = np.zeros(npt)
for ipt in xrange(npt):
x[0] = 1-alphas[ipt]
x[1] = alphas[ipt]
energies_approx[ipt] = np.dot(x, 0.5*np.dot(b, x)-e)
# compute the hf energy
scf_solver._history._build_combinations(x, dms, None)
ham.reset(*dms)
energies_hf[ipt] = ham.compute_energy()
if False:
import matplotlib.pyplot as pt
pt.clf()
pt.plot(alphas, energies_approx, 'k-', label='approx')
pt.plot(alphas, energies_hf, 'r-', label='hf')
pt.legend(loc=0)
pt.savefig('foo.png')
assert abs(energies_approx - energies_hf).max() < 1e-6
| gpl-3.0 | 470,660,891,456,897,600 | 31.177778 | 91 | 0.65884 | false |
Leopardob/dice-dev | core_apps/Home/core_app.py | 1 | 1888 | import os
from PyQt5.QtCore import pyqtSignal, pyqtProperty, qDebug, pyqtSlot
from dice.dice_extras.core_app import CoreApp
from dice.dice_extras.tools.json_sync import JsonList
class Home(CoreApp):
def __init__(self, parent=None):
super(Home, self).__init__(parent)
settings_folder = os.path.join(os.path.expanduser("~"), ".config", "DICE")
if not os.path.exists(settings_folder):
os.makedirs(settings_folder)
self.__recent_projects = JsonList(os.path.join(settings_folder, "recent_projects.json"))
self.__max_recent_projects = 10 # TODO: get this value from settings
recent_projects_changed = pyqtSignal(name="recentProjectsChanged")
@property
def recent_projects(self):
return self.__recent_projects.to_simple_list()
recentProjects = pyqtProperty("QVariantList", fget=recent_projects.fget, notify=recent_projects_changed)
def add_recent_project(self, project_name, location):
recent_locations = [recent_project['location'] for recent_project in self.__recent_projects]
recent_project = {'projectName': project_name, 'location': location}
if location not in recent_locations:
self.__recent_projects.insert(0, recent_project)
while len(self.__recent_projects) > self.__max_recent_projects:
self.__recent_projects.pop()
self.recent_projects_changed.emit()
else:
# add the project on top of the list
index = self.__recent_projects.index(recent_project)
if index != 0:
self.__recent_projects.pop(index)
self.__recent_projects.insert(0, recent_project)
self.recent_projects_changed.emit()
@pyqtSlot(name="closeProject")
def close_project(self):
self.dice.project.close()
self.dice.desk.clear_workspace() | gpl-3.0 | -5,843,003,365,226,793,000 | 39.191489 | 108 | 0.652542 | false |
PressLabs/lithium | lithium/views/base.py | 1 | 1507 | from functools import wraps
import json
from flask import request, Response
from flask.ext.classy import FlaskView
def get_request_type():
types = {
'application/json': 'json',
'application/xml': 'xml'
}
if 'Content-Type' in request.headers:
if request.headers['Content-Type'] in types:
return types[request.headers['Content-Type']]
return 'html'
def serialize_response(request_type, response):
serializers = {
'json': lambda response: json.dumps(response),
'xml': lambda response: json.dumps(response),
}
if isinstance(response, basestring) or isinstance(response, Response):
return response
if request_type in serializers:
return serializers[request_type](response)
return json.dumps(response)
def serialize(f):
@wraps(f)
def decorator(*args, **kwargs):
response = f(*args, **kwargs)
request_type = get_request_type()
return serialize_response(request_type, response)
return decorator
class class_property(property):
def __get__(self, instance, type):
if instance is None:
return super(class_property, self).__get__(type, type)
return super(class_property, self).__get__(instance, type)
class BaseView(FlaskView):
__decorators = [serialize]
def __init__(self, *args, **kwargs):
super(BaseView, self).__init__(*args, **kwargs)
@class_property
def decorators(cls):
return cls.__decorators
@decorators.setter
def decorators(cls, decorator):
cls.__decorators.insert(0, decorator)
| apache-2.0 | 7,260,151,536,983,540,000 | 23.306452 | 72 | 0.689449 | false |
bloem-project/bloem-server | files/tasks.py | 1 | 1132 | import hashlib
import os
import logging
from celery import shared_task
from .models import File, Directory
logger = logging.getLogger()
@shared_task
def scan_directory(root, type):
"""Walk through a directory and add files matching a certain pattern to the database."""
for path, dirs, files in os.walk(root):
for file in files:
logger.debug("Found a file with filename {0}.".format(file))
try:
if File.objects.get(file_name=file).objects.filter(file_name=file).exists():
logger.debug("File is already in the database. Skipping.")
continue
except File.DoesNotExist:
hasher = hashlib.sha256()
with open(os.path.join(path, file), 'rb') as _file:
for chunk in iter(lambda: _file.read(65536), b""):
hasher.update(chunk)
_hash = hasher.hexdigest()
_directory = Directory.objects.get(path=root)
entry = File(hash=_hash, file_name=file, directory=_directory, path=path)
entry.save()
| gpl-3.0 | -8,059,939,730,043,644,000 | 38.034483 | 92 | 0.582155 | false |
cgwire/zou | zou/app/api.py | 1 | 4415 | import os
import sys
from zou.app.utils import events, api as api_utils
from flask import Blueprint
from .blueprints.assets import blueprint as assets_blueprint
from .blueprints.auth import blueprint as auth_blueprint
from .blueprints.breakdown import blueprint as breakdown_blueprint
from .blueprints.comments import blueprint as comments_blueprint
from .blueprints.crud import blueprint as crud_blueprint
from .blueprints.events import blueprint as events_blueprint
from .blueprints.export import blueprint as export_blueprint
from .blueprints.files import blueprint as files_blueprint
from .blueprints.index import blueprint as index_blueprint
from .blueprints.news import blueprint as news_blueprint
from .blueprints.persons import blueprint as persons_blueprint
from .blueprints.playlists import blueprint as playlists_blueprint
from .blueprints.projects import blueprint as projects_blueprint
from .blueprints.previews import blueprint as previews_blueprint
from .blueprints.source import blueprint as import_blueprint
from .blueprints.shots import blueprint as shots_blueprint
from .blueprints.tasks import blueprint as tasks_blueprint
from .blueprints.user import blueprint as user_blueprint
def configure(app):
"""
Turn Flask app into a REST API. It configures routes, auth and events
system.
"""
app.url_map.strict_slashes = False
configure_api_routes(app)
register_event_handlers(app)
load_plugins(app)
return app
def configure_api_routes(app):
"""
Register blueprints (modules). Each blueprint describe routes and
associated resources (controllers).
"""
app.register_blueprint(auth_blueprint)
app.register_blueprint(assets_blueprint)
app.register_blueprint(breakdown_blueprint)
app.register_blueprint(comments_blueprint)
app.register_blueprint(crud_blueprint)
app.register_blueprint(export_blueprint)
app.register_blueprint(events_blueprint)
app.register_blueprint(files_blueprint)
app.register_blueprint(import_blueprint)
app.register_blueprint(index_blueprint)
app.register_blueprint(news_blueprint)
app.register_blueprint(persons_blueprint)
app.register_blueprint(playlists_blueprint)
app.register_blueprint(projects_blueprint)
app.register_blueprint(shots_blueprint)
app.register_blueprint(tasks_blueprint)
app.register_blueprint(previews_blueprint)
app.register_blueprint(user_blueprint)
return app
def register_event_handlers(app):
"""
Load code from event handlers folder. Then it registers in the event manager
each event handler listed in the __init_.py.
"""
sys.path.insert(0, app.config["EVENT_HANDLERS_FOLDER"])
try:
import event_handlers
events.register_all(event_handlers.event_map, app)
except ImportError:
# Event handlers folder is not properly configured.
# Handlers are optional, that's why this error is ignored.
app.logger.info("No event handlers folder is configured.")
return app
def load_plugins(app):
"""
Load plugin, (bunch of resources dedicated to a specific usage).
"""
if os.path.exists(app.config["PLUGIN_FOLDER"]):
plugins = load_plugin_modules(app.config["PLUGIN_FOLDER"])
for plugin in plugins:
load_plugin(app, plugin)
def load_plugin_modules(plugin_folder):
"""
Run Python import on all plugin listed in plugin folder. It returns the
imported module.
"""
sys.path.insert(0, plugin_folder)
return [
__import__(file_name)
for file_name in os.listdir(plugin_folder)
if os.path.isdir(os.path.join(plugin_folder, file_name))
and file_name != "__pycache__"
]
def load_plugin(app, plugin):
"""
Load a given plugin as an API plugin: add configured routes to the API. It
assumes that the plugin is already loaded in memory has a blueprint
structure.
"""
routes = [
("/plugins%s" % route_path, resource)
for (route_path, resource) in plugin.routes
if len(route_path) > 0 and route_path[0] == "/"
]
plugin.routes = routes
plugin.blueprint = Blueprint(plugin.name, plugin.name)
plugin.api = api_utils.configure_api_from_blueprint(
plugin.blueprint, plugin.routes
)
app.register_blueprint(plugin.blueprint)
app.logger.info("Plugin %s loaded." % plugin.name)
return plugin
| agpl-3.0 | -2,686,815,057,494,757,400 | 34.32 | 80 | 0.726614 | false |
velenux/photofix | photofix.py | 1 | 7000 | # encoding: utf-8
import os
import sys
# for file hash calculation
import hashlib
# datetime manipulation
from datetime import datetime
# exif tags
from gi.repository import GObject, GExiv2
# for moving files and dirs
import shutil
import errno
# configuration
VALID_IMAGES = set(['.cr2', '.cr3', '.crw', '.dng', '.png', '.jpg', '.jpeg', '.tif', '.tiff', '.gpr'])
VALID_VIDEO = set(['.mp4', '.mkv'])
PATH = {
'image': 'storage/images',
'video': 'storage/video',
'non-image': 'storage/non-images',
'duplicate': 'storage/duplicates',
'failed': 'storage/failed'
}
DUP_COUNTER = 0
TS = datetime.strftime(datetime.now(), "%Y-%m-%d")
EXISTING_FILES = set([])
#
# useful function from
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
#
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
#
# get_file_datetime(filename)
# retrieves the EXIF date, falls back to filesystem date
#
def get_file_datetime(filename):
fs_date = datetime.fromtimestamp(os.path.getmtime(filename))
#print "%s fs_date: %s" % (filename, fs_date.strftime("%s")) # debug
try:
exif_date = GExiv2.Metadata(filename).get_date_time()
#print "%s exif_date: %s" % (filename, exif_date.strftime("%s")) # debug
# avoid using the epoch if possible
if (int(fs_date.strftime("%s")) == 0 or fs_date > exif_date):
return exif_date
else:
return fs_date
except:
return fs_date
#
# get_file_hash(filename)
# returns the sha256 sum for the file as a string
#
def get_file_hash(filename):
sha = hashlib.sha256()
with open(filename, 'rb') as fp:
buf = fp.read(262144)
while len(buf) > 0:
sha.update(buf)
buf = fp.read(262144)
return sha.hexdigest()
#
# move_file(filename, destination)
# moves the file and outputs the source and destination for logging
#
def move_file(filename, destination):
global PATH
global DUP_COUNTER
(original_directory, original_filename) = os.path.split(filename)
(destination_directory, destination_filename) = os.path.split(destination)
(original_base_filename, original_extension) = os.path.splitext(original_filename)
destination_hash = destination_filename[16:]
# if the destination is a directory, rebuild the destination with
# directory and original filename so it becomes a full path
if os.path.isdir(destination):
destination = os.path.join(destination, original_filename)
# handle destination links
if os.path.islink(destination):
print('WARNING: destination', destination, 'is a link, redirecting', filename, 'to failed')
newdest = os.path.join(PATH['failed'], original_filename)
return move_file(filename, newdest)
# handle duplicates
if os.path.isfile(destination) or destination_hash in EXISTING_FILES:
print('WARNING:', filename, 'seems like a duplicate, redirecting...')
DUP_COUNTER += 1
if (original_filename != destination_filename):
# if the filenames are different, save the original one for reference
newdest = os.path.join(PATH['duplicate'], original_base_filename + '_' + str(DUP_COUNTER) + '-' + destination_filename)
else:
newdest = os.path.join(PATH['duplicate'], original_base_filename + '_' + str(DUP_COUNTER) + '.' + original_extension)
return move_file(filename, newdest)
mkdir_p(destination_directory)
print('mv to', destination)
try:
shutil.move(filename, destination)
if destination_directory.startswith(PATH['image']):
EXISTING_FILES.add(destination_hash)
except:
print('WARNING: failed to move', filename, 'to', destination, 'redirecting to failed...')
#
# explore_path(path)
# recursively iterates on path, moving images around
#
def explore_path(path):
for root, dirs, files in os.walk(path):
for f in files:
fullfn = os.path.join(root, f)
# skip symlinks and files that have already been moved (eg. xmp files)
if not os.path.isfile(fullfn): continue
# save the file name and extension
# in the base of sidecar files, bn will be the original image
# /path/to/image.ext.xmp -> /path/to/image.ext + .xmp
bn, ext = os.path.splitext(fullfn)
ext = ext.lower()
# print the file we're working on
print(fullfn)
# handle different types of files
if ext in VALID_IMAGES:
handle_image(fullfn)
continue
elif ext in VALID_VIDEO:
handle_video(fullfn)
continue
elif ext == '.xmp' and os.path.isfile(bn):
# skip sidecar files with matching images: they will be handled
# during the original image handling pass
continue
else:
move_file(fullfn, PATH['non-image'])
continue
for d in dirs:
fulldn = os.path.join(root, d)
# skip symlinks
if os.path.islink(fulldn): continue
# recursively calls itself to check the other directories
explore_path(fulldn)
#
# handle_image(filename)
# renames and moves the single image
#
def handle_image(fullfn):
# get filename and extension
dir, fn = os.path.split(fullfn) # dir and filename
bn, ext = os.path.splitext(fn) # basename and extension
ext = ext.lower() # lowercase extension
# recover metadata from the image
file_date = get_file_datetime(fullfn)
file_hash = get_file_hash(fullfn)
# destination is: PATH['image']/TS/YYYY/mm/YYYYmmdd-HHMMSS_HASH.EXTENSION
destfn = os.path.join(PATH['image'], TS, file_date.strftime("%Y"), file_date.strftime("%m"), file_date.strftime("%Y%m%d-%H%M%S") + '_' + file_hash + ext)
# move the file
move_file(fullfn, destfn)
# if there is an XMP sidecar file, move that as well
for f in os.listdir(dir):
f_low = f.lower()
if f.startswith(fn) and f_low.endswith('.xmp'):
move_file(os.path.join(dir, f), destfn + '.xmp')
#
# handle_video(filename)
# recursively iterates on path, moving videos around
#
def handle_video(fullfn):
# get filename and extension
fn = os.path.split(fullfn)[1] # filename
bn, ext = os.path.splitext(fn) # basename and extension
ext = ext.lower() # lowercase extension
# recover metadata from the video
file_date = get_file_datetime(fullfn)
# destination is: PATH['video']/TS/YYYY/mm/YYYYmmdd-HHMMSS_HASH.EXTENSION
destfn = os.path.join(PATH['video'], TS, file_date.strftime("%Y"), file_date.strftime("%m"), file_date.strftime("%Y%m%d-%H%M%S") + '_' + bn + ext)
move_file(fullfn, destfn)
# fai girare sul primo argomento
explore_path(sys.argv[1])
| gpl-2.0 | 2,211,774,835,877,225,000 | 31.55814 | 157 | 0.632857 | false |
kyokley/MediaConverter | tests/unit/test_tvrunner.py | 1 | 12299 | import unittest
import mock
from mock import call
from tv_runner import TvRunner
class TestTvRunner(unittest.TestCase):
def setUp(self):
self._sort_unsorted_files_patcher = mock.patch('tv_runner.TvRunner._sort_unsorted_files')
self.mock_sort_unsorted_files = self._sort_unsorted_files_patcher.start()
self.tvRunner = TvRunner()
def tearDown(self):
self._sort_unsorted_files_patcher.stop()
@mock.patch('tv_runner.Path')
def test_loadPaths(self, mock_path):
fake_paths = [object() for i in xrange(3)]
mock_path.getAllTVPaths.return_value = fake_paths
self.tvRunner.loadPaths()
self.assertEquals(fake_paths, self.tvRunner.paths)
@mock.patch('tv_runner.Path')
def test_getOrCreateRemotePath(self, mock_path):
expectedPathID = 123
testData = {'results': [{'pk': expectedPathID}]}
testPath = 'test path'
mock_path.getTVPathByLocalPathAndRemotePath.return_value = testData
actualPathID = self.tvRunner.getOrCreateRemotePath(testPath)
self.assertEquals(expectedPathID, actualPathID)
@mock.patch('tv_runner.File')
def test_buildRemoteFileSetForPathIDs(self, mock_file):
testData = {-1: ['invalid'],
1: ['test1'],
12: ['test12'],
123: ['test123'],
}
expectedSet = set(['test1',
'test12',
'test123',
])
mock_file.getTVFileSet = lambda x: testData.get(x)
actualSet = self.tvRunner.buildRemoteFileSetForPathIDs([-1,
1,
12,
123])
self.assertEquals(expectedSet, actualSet)
# TODO: Switch to using setUp/tearDown patching
@mock.patch('tv_runner.os.path.basename')
@mock.patch('tv_runner.os.path.getsize')
@mock.patch('tv_runner.os.path.exists')
@mock.patch('tv_runner.TvRunner.getOrCreateRemotePath')
@mock.patch('tv_runner.makeFileStreamable')
@mock.patch('tv_runner.File')
def test_updateFileRecords(self,
mock_file,
mock_makeFileStreamable,
mock_getOrCreateRemotePath,
mock_os_path_exists,
mock_os_path_getsize,
mock_os_path_basename):
mock_getOrCreateRemotePath.return_value = 1
mock_os_path_exists.return_value = True
mock_os_path_getsize.return_value = 1
mock_os_path_basename.return_value = 'basename'
test_path = '/a/local/path'
test_localFileSet = set(['file1',
'file2',
'file3',
'newfile',
])
test_remoteFileSet = set(['file1',
'file2',
'file3',
])
self.tvRunner.updateFileRecords(test_path, test_localFileSet, test_remoteFileSet)
mock_makeFileStreamable.assert_called_with('/a/local/path/newfile',
appendSuffix=True,
removeOriginal=True,
dryRun=False)
mock_file.assert_called_with('basename',
1,
1,
True)
def test_run(self):
test_data = {'asdf': [1],
'sdfg': [12, 23],
}
self.tvRunner.paths = test_data
self.tvRunner.loadPaths = mock.MagicMock()
self.tvRunner.buildLocalFileSet = mock.MagicMock()
self.tvRunner.buildLocalFileSet.return_value = set(['some', 'paths'])
self.tvRunner.buildRemoteFileSetForPathIDs = mock.MagicMock()
self.tvRunner.buildRemoteFileSetForPathIDs.return_value = set(['some', 'remote', 'paths'])
self.tvRunner.updateFileRecords = mock.MagicMock()
self.tvRunner.handleDirs = mock.MagicMock()
self.tvRunner.run()
self.mock_sort_unsorted_files.assert_called_once_with()
self.tvRunner.buildLocalFileSet.assert_has_calls([call('sdfg'),
call('asdf')],
any_order=True)
self.assertEqual(2, self.tvRunner.buildLocalFileSet.call_count)
self.tvRunner.buildRemoteFileSetForPathIDs.assert_has_calls([call([1]),
call([12, 23])],
any_order=True)
self.assertEqual(2, self.tvRunner.buildRemoteFileSetForPathIDs.call_count)
self.tvRunner.updateFileRecords.assert_has_calls(
[call('sdfg',
set(['paths', 'some']),
set(['remote', 'some', 'paths'])),
call('asdf',
set(['paths', 'some']),
set(['remote', 'some', 'paths']))],
any_order=True)
self.assertEqual(2, self.tvRunner.updateFileRecords.call_count)
self.tvRunner.handleDirs.assert_has_calls([call('sdfg'),
call('asdf')])
class TestHandleDirs(unittest.TestCase):
def setUp(self):
self.SMALL_FILE_SIZE_patcher = mock.patch('tv_runner.SMALL_FILE_SIZE', 100)
self.SMALL_FILE_SIZE_patcher.start()
self.test_walk = [('/path/to/tv_path/Test.Dir.Path',
['Test.Dir.Path.S04E03.WEBRip.x264-MV'],
['Test.Dir.Path.S04E02.WEBRip.x264-MV.mp4']),
('/path/to/tv_path/Test.Dir.Path/Test.Dir.Path.S04E03.WEBRip.x264-MV',
['Subs'],
['Test.Dir.Path.S04E03.WEBRip.x264-MV.mp4', 'info.txt', 'Small.mp4']),
('/path/to/tv_path/Test.Dir.Path/Test.Dir.Path.S04E03.WEBRip.x264-MV/Subs', [], ['2_Eng.srt'])]
self.walk_patcher = mock.patch('tv_runner.os.walk')
self.mock_walk = self.walk_patcher.start()
self.mock_walk.return_value = self.test_walk
self.exists_patcher = mock.patch('tv_runner.os.path.exists')
self.mock_exists = self.exists_patcher.start()
self.mock_exists.return_value = True
self.isdir_patcher = mock.patch('tv_runner.os.path.isdir')
self.mock_isdir = self.isdir_patcher.start()
self.mock_isdir.side_effect = [False, True, True, True, True]
self.getsize_patcher = mock.patch('tv_runner.os.path.getsize')
self.mock_getsize = self.getsize_patcher.start()
self.mock_getsize.side_effect = [1000, 10]
self.rename_patcher = mock.patch('tv_runner.os.rename')
self.mock_rename = self.rename_patcher.start()
self.rmtree_patcher = mock.patch('tv_runner.shutil.rmtree')
self.mock_rmtree = self.rmtree_patcher.start()
self.tv_runner = TvRunner()
def tearDown(self):
self.SMALL_FILE_SIZE_patcher.stop()
self.walk_patcher.stop()
self.exists_patcher.stop()
self.isdir_patcher.stop()
self.getsize_patcher.stop()
self.rename_patcher.stop()
self.rmtree_patcher.stop()
def test_path_does_not_exist(self):
self.mock_exists.return_value = False
self.tv_runner.handleDirs('/path/to/tv_path/Test.Dir.Path')
self.assertFalse(self.mock_rename.called)
self.assertFalse(self.mock_rmtree.called)
def test_handleDirs(self):
self.tv_runner.handleDirs('/path/to/tv_path/Test.Dir.Path')
self.mock_rename.assert_has_calls([mock.call('/path/to/tv_path/Test.Dir.Path/Test.Dir.Path.S04E03.WEBRip.x264-MV/Test.Dir.Path.S04E03.WEBRip.x264-MV.mp4',
'/path/to/tv_path/Test.Dir.Path/Test.Dir.Path.S04E03.WEBRip.x264-MV.mp4'),
mock.call('/path/to/tv_path/Test.Dir.Path/Test.Dir.Path.S04E03.WEBRip.x264-MV/Subs/2_Eng.srt',
'/path/to/tv_path/Test.Dir.Path/Test.Dir.Path.S04E03.WEBRip.x264-MV.srt'),
])
self.mock_rmtree.assert_called_once_with('/path/to/tv_path/Test.Dir.Path/Test.Dir.Path.S04E03.WEBRip.x264-MV')
class TestSortUnsortedFiles(unittest.TestCase):
def setUp(self):
self.UNSORTED_PATHS_patcher = mock.patch('tv_runner.UNSORTED_PATHS', ['/path/to/unsorted'])
self.UNSORTED_PATHS_patcher.start()
self.exists_patcher = mock.patch('tv_runner.os.path.exists')
self.mock_exists = self.exists_patcher.start()
self.listdir_patcher = mock.patch('tv_runner.os.listdir')
self.mock_listdir = self.listdir_patcher.start()
self.get_localpath_by_filename_patcher = mock.patch('tv_runner.get_localpath_by_filename')
self.mock_get_localpath_by_filename = self.get_localpath_by_filename_patcher.start()
self.move_patcher = mock.patch('tv_runner.shutil.move')
self.mock_move = self.move_patcher.start()
self.tv_runner = TvRunner()
def tearDown(self):
self.UNSORTED_PATHS_patcher.stop()
self.exists_patcher.stop()
self.listdir_patcher.stop()
self.get_localpath_by_filename_patcher.stop()
self.move_patcher.stop()
def test_unsorted_path_does_not_exist(self):
self.mock_exists.return_value = False
expected = None
actual = self.tv_runner._sort_unsorted_files()
self.assertEqual(expected, actual)
self.mock_exists.assert_called_once_with('/path/to/unsorted')
self.assertFalse(self.mock_listdir.called)
self.assertFalse(self.mock_get_localpath_by_filename.called)
self.assertFalse(self.mock_move.called)
def test_no_localpath_for_filename(self):
self.mock_exists.return_value = True
self.mock_listdir.return_value = ['new.show.s02e10']
self.mock_get_localpath_by_filename.return_value = None
expected = None
actual = self.tv_runner._sort_unsorted_files()
self.assertEqual(expected, actual)
self.mock_exists.assert_called_once_with('/path/to/unsorted')
self.mock_get_localpath_by_filename.assert_called_once_with('new.show.s02e10')
self.assertFalse(self.mock_move.called)
def test_localpath_does_not_exist(self):
self.mock_exists.side_effect = [True, False]
self.mock_listdir.return_value = ['new.show.s02e10']
self.mock_get_localpath_by_filename.return_value = '/path/to/local/new.show'
expected = None
actual = self.tv_runner._sort_unsorted_files()
self.assertEqual(expected, actual)
self.mock_exists.assert_has_calls([mock.call('/path/to/unsorted'),
mock.call('/path/to/local/new.show'),
])
self.mock_get_localpath_by_filename.assert_called_once_with('new.show.s02e10')
self.assertFalse(self.mock_move.called)
def test_localpath_for_filename(self):
self.mock_exists.return_value = True
self.mock_listdir.return_value = ['new.show.s02e10']
self.mock_get_localpath_by_filename.return_value = '/path/to/local/new.show'
expected = None
actual = self.tv_runner._sort_unsorted_files()
self.assertEqual(expected, actual)
self.mock_exists.assert_has_calls([mock.call('/path/to/unsorted'),
mock.call('/path/to/local/new.show'),
])
self.mock_get_localpath_by_filename.assert_called_once_with('new.show.s02e10')
self.mock_move.assert_called_once_with('/path/to/unsorted/new.show.s02e10',
'/path/to/local/new.show/new.show.s02e10')
| mit | 1,511,983,220,262,552,000 | 43.241007 | 162 | 0.557362 | false |
majetideepak/arrow | python/pyarrow/tests/test_serialization.py | 1 | 30729 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import division
import pytest
import collections
import datetime
import os
import pickle
import subprocess
import string
import sys
import pyarrow as pa
import numpy as np
import pyarrow.tests.util as test_util
try:
import torch
except ImportError:
torch = None
# Blacklist the module in case `import torch` is costly before
# failing (ARROW-2071)
sys.modules['torch'] = None
def assert_equal(obj1, obj2):
if torch is not None and torch.is_tensor(obj1) and torch.is_tensor(obj2):
if obj1.is_sparse:
obj1 = obj1.to_dense()
if obj2.is_sparse:
obj2 = obj2.to_dense()
assert torch.equal(obj1, obj2)
return
module_numpy = (type(obj1).__module__ == np.__name__ or
type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ()) or
(hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently np.testing.assert_equal
# fails because we do not properly handle different numerical
# types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) ==
set(list(obj2.__dict__.keys()) + special_keys)), ("Objects {} "
"and {} are "
"different."
.format(
obj1,
obj2))
try:
# Workaround to make comparison of OrderedDicts work on Python 2.7
if obj1 == obj2:
return
except Exception:
pass
if obj1.__dict__ == {}:
print("WARNING: Empty dict in ", obj1)
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples with "
"different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (pa.lib.is_named_tuple(type(obj1)) or
pa.lib.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), ("Objects {} and {} are named tuples "
"with different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif isinstance(obj1, pa.Array) and isinstance(obj2, pa.Array):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.Tensor) and isinstance(obj2, pa.Tensor):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.Tensor) and isinstance(obj2, pa.Tensor):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.RecordBatch) and isinstance(obj2, pa.RecordBatch):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.Table) and isinstance(obj2, pa.Table):
assert obj1.equals(obj2)
else:
assert type(obj1) == type(obj2) and obj1 == obj2, \
"Objects {} and {} are different.".format(obj1, obj2)
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 999,
[1 << 100, [1 << 100]], "a", string.printable, "\u262F",
"hello world", u"hello world", u"\xff\xfe\x9c\x001\x000\x00",
None, True, False, [], (), {}, {(1, 2): 1}, {(): 2},
[1, "hello", 3.0], u"\u262F", 42.0, (1.0, "hi"),
[1, 2, 3, None], [(None,), 3, 1.0], ["h", "e", "l", "l", "o", None],
(None, None), ("hello", None), (True, False),
{True: "hello", False: "world"}, {"hello": "world", 1: 42, 2.5: 45},
{"hello": set([2, 3]), "world": set([42.0]), "this": None},
np.int8(3), np.int32(4), np.int64(5),
np.uint8(3), np.uint32(4), np.uint64(5),
np.float16(1.9), np.float32(1.9),
np.float64(1.9), np.zeros([8, 20]),
np.random.normal(size=[17, 10]), np.array(["hi", 3]),
np.array(["hi", 3], dtype=object),
np.random.normal(size=[15, 13]).T
]
if sys.version_info >= (3, 0):
PRIMITIVE_OBJECTS += [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
PRIMITIVE_OBJECTS += [long(42), long(1 << 62), long(0), # noqa
np.array([["hi", u"hi"],
[1.3, long(1)]])] # noqa
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{"obj{}".format(i): np.random.normal(size=[4, 4]) for i in range(5)},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
((((((((((),),),),),),),),),),
{"a": {"b": {"c": {"d": {}}}}},
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(1), Foo(42)]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class SubQuxPickle(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = collections.namedtuple("Point", ["x", "y"])
NamedTupleExample = collections.namedtuple(
"Example", "field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [Exception("Test object."), CustomError(), Point(11, y=22),
Foo(), Bar(), Baz(), Qux(), SubQux(), SubQuxPickle(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
collections.OrderedDict([("hello", 1), ("world", 2)]),
collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
collections.Counter([1, 1, 1, 2, 2, 3, "a", "b"])]
def make_serialization_context():
context = pa.default_serialization_context()
context.register_type(Foo, "Foo")
context.register_type(Bar, "Bar")
context.register_type(Baz, "Baz")
context.register_type(Qux, "Quz")
context.register_type(SubQux, "SubQux")
context.register_type(SubQuxPickle, "SubQuxPickle", pickle=True)
context.register_type(Exception, "Exception")
context.register_type(CustomError, "CustomError")
context.register_type(Point, "Point")
context.register_type(NamedTupleExample, "NamedTupleExample")
return context
global_serialization_context = make_serialization_context()
def serialization_roundtrip(value, scratch_buffer,
context=global_serialization_context):
writer = pa.FixedSizeBufferWriter(scratch_buffer)
pa.serialize_to(value, writer, context=context)
reader = pa.BufferReader(scratch_buffer)
result = pa.deserialize_from(reader, None, context=context)
assert_equal(value, result)
_check_component_roundtrip(value, context=context)
def _check_component_roundtrip(value, context=global_serialization_context):
# Test to/from components
serialized = pa.serialize(value, context=context)
components = serialized.to_components()
from_comp = pa.SerializedPyObject.from_components(components)
recons = from_comp.deserialize(context=context)
assert_equal(value, recons)
@pytest.yield_fixture(scope='session')
def large_buffer(size=32*1024*1024):
return pa.allocate_buffer(size)
def large_memory_map(tmpdir_factory, size=100*1024*1024):
path = (tmpdir_factory.mktemp('data')
.join('pyarrow-serialization-tmp-file').strpath)
# Create a large memory mapped file
with open(path, 'wb') as f:
f.write(np.random.randint(0, 256, size=size)
.astype('u1')
.tobytes()
[:size])
return path
def test_clone():
context = pa.SerializationContext()
class Foo(object):
pass
def custom_serializer(obj):
return 0
def custom_deserializer(serialized_obj):
return (serialized_obj, 'a')
context.register_type(Foo, 'Foo', custom_serializer=custom_serializer,
custom_deserializer=custom_deserializer)
new_context = context.clone()
f = Foo()
serialized = pa.serialize(f, context=context)
deserialized = serialized.deserialize(context=context)
assert deserialized == (0, 'a')
serialized = pa.serialize(f, context=new_context)
deserialized = serialized.deserialize(context=new_context)
assert deserialized == (0, 'a')
def test_primitive_serialization_notbroken(large_buffer):
serialization_roundtrip({(1, 2): 2}, large_buffer)
def test_primitive_serialization_broken(large_buffer):
serialization_roundtrip({(): 2}, large_buffer)
def test_primitive_serialization(large_buffer):
for obj in PRIMITIVE_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_integer_limits(large_buffer):
# Check that Numpy scalars can be represented up to their limit values
# (except np.uint64 which is limited to 2**63 - 1)
for dt in [np.int8, np.int64, np.int32, np.int64,
np.uint8, np.uint64, np.uint32, np.uint64]:
scal = dt(np.iinfo(dt).min)
serialization_roundtrip(scal, large_buffer)
if dt is not np.uint64:
scal = dt(np.iinfo(dt).max)
serialization_roundtrip(scal, large_buffer)
else:
scal = dt(2**63 - 1)
serialization_roundtrip(scal, large_buffer)
for v in (2**63, 2**64 - 1):
scal = dt(v)
with pytest.raises(pa.ArrowInvalid):
pa.serialize(scal)
def test_serialize_to_buffer():
for nthreads in [1, 4]:
for value in COMPLEX_OBJECTS:
buf = pa.serialize(value).to_buffer(nthreads=nthreads)
result = pa.deserialize(buf)
assert_equal(value, result)
def test_complex_serialization(large_buffer):
for obj in COMPLEX_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_custom_serialization(large_buffer):
for obj in CUSTOM_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_default_dict_serialization(large_buffer):
pytest.importorskip("cloudpickle")
obj = collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)])
serialization_roundtrip(obj, large_buffer)
def test_numpy_serialization(large_buffer):
for t in ["bool", "int8", "uint8", "int16", "uint16", "int32",
"uint32", "float16", "float32", "float64", "<U1", "<U2", "<U3",
"<U4", "|S1", "|S2", "|S3", "|S4", "|O",
np.dtype([('a', 'int64'), ('b', 'float')]),
np.dtype([('x', 'uint32'), ('y', '<U8')])]:
obj = np.random.randint(0, 10, size=(100, 100)).astype(t)
serialization_roundtrip(obj, large_buffer)
obj = obj[1:99, 10:90]
serialization_roundtrip(obj, large_buffer)
def test_datetime_serialization(large_buffer):
data = [
# Principia Mathematica published
datetime.datetime(year=1687, month=7, day=5),
# Some random date
datetime.datetime(year=1911, month=6, day=3, hour=4,
minute=55, second=44),
# End of WWI
datetime.datetime(year=1918, month=11, day=11),
# Beginning of UNIX time
datetime.datetime(year=1970, month=1, day=1),
# The Berlin wall falls
datetime.datetime(year=1989, month=11, day=9),
# Another random date
datetime.datetime(year=2011, month=6, day=3, hour=4,
minute=0, second=3),
# Another random date
datetime.datetime(year=1970, month=1, day=3, hour=4,
minute=0, second=0)
]
for d in data:
serialization_roundtrip(d, large_buffer)
def test_torch_serialization(large_buffer):
pytest.importorskip("torch")
serialization_context = pa.default_serialization_context()
pa.register_torch_serialization_handlers(serialization_context)
# Dense tensors:
# These are the only types that are supported for the
# PyTorch to NumPy conversion
for t in ["float32", "float64",
"uint8", "int16", "int32", "int64"]:
obj = torch.from_numpy(np.random.randn(1000).astype(t))
serialization_roundtrip(obj, large_buffer,
context=serialization_context)
tensor_requiring_grad = torch.randn(10, 10, requires_grad=True)
serialization_roundtrip(tensor_requiring_grad, large_buffer,
context=serialization_context)
# Sparse tensors:
# These are the only types that are supported for the
# PyTorch to NumPy conversion
for t in ["float32", "float64",
"uint8", "int16", "int32", "int64"]:
i = torch.LongTensor([[0, 2], [1, 0], [1, 2]])
v = torch.from_numpy(np.array([3, 4, 5]).astype(t))
obj = torch.sparse_coo_tensor(i.t(), v, torch.Size([2, 3]))
serialization_roundtrip(obj, large_buffer,
context=serialization_context)
@pytest.mark.skipif(not torch or not torch.cuda.is_available(),
reason="requires pytorch with CUDA")
def test_torch_cuda():
# ARROW-2920: This used to segfault if torch is not imported
# before pyarrow
# Note that this test will only catch the issue if it is run
# with a pyarrow that has been built in the manylinux1 environment
torch.nn.Conv2d(64, 2, kernel_size=3, stride=1,
padding=1, bias=False).cuda()
def test_numpy_immutable(large_buffer):
obj = np.zeros([10])
writer = pa.FixedSizeBufferWriter(large_buffer)
pa.serialize_to(obj, writer, global_serialization_context)
reader = pa.BufferReader(large_buffer)
result = pa.deserialize_from(reader, None, global_serialization_context)
with pytest.raises(ValueError):
result[0] = 1.0
def test_numpy_base_object(tmpdir):
# ARROW-2040: deserialized Numpy array should keep a reference to the
# owner of its memory
path = os.path.join(str(tmpdir), 'zzz.bin')
data = np.arange(12, dtype=np.int32)
with open(path, 'wb') as f:
f.write(pa.serialize(data).to_buffer())
serialized = pa.read_serialized(pa.OSFile(path))
result = serialized.deserialize()
assert_equal(result, data)
serialized = None
assert_equal(result, data)
assert result.base is not None
# see https://issues.apache.org/jira/browse/ARROW-1695
def test_serialization_callback_numpy():
class DummyClass(object):
pass
def serialize_dummy_class(obj):
x = np.zeros(4)
return x
def deserialize_dummy_class(serialized_obj):
return serialized_obj
context = pa.default_serialization_context()
context.register_type(DummyClass, "DummyClass",
custom_serializer=serialize_dummy_class,
custom_deserializer=deserialize_dummy_class)
pa.serialize(DummyClass(), context=context)
def test_numpy_subclass_serialization():
# Check that we can properly serialize subclasses of np.ndarray.
class CustomNDArray(np.ndarray):
def __new__(cls, input_array):
array = np.asarray(input_array).view(cls)
return array
def serializer(obj):
return {'numpy': obj.view(np.ndarray)}
def deserializer(data):
array = data['numpy'].view(CustomNDArray)
return array
context = pa.default_serialization_context()
context.register_type(CustomNDArray, 'CustomNDArray',
custom_serializer=serializer,
custom_deserializer=deserializer)
x = CustomNDArray(np.zeros(3))
serialized = pa.serialize(x, context=context).to_buffer()
new_x = pa.deserialize(serialized, context=context)
assert type(new_x) == CustomNDArray
assert np.alltrue(new_x.view(np.ndarray) == np.zeros(3))
def test_numpy_matrix_serialization(tmpdir):
class CustomType(object):
def __init__(self, val):
self.val = val
rec_type = np.dtype([('x', 'int64'), ('y', 'double'), ('z', '<U4')])
path = os.path.join(str(tmpdir), 'pyarrow_npmatrix_serialization_test.bin')
array = np.random.randint(low=-1, high=1, size=(2, 2))
for data_type in [str, int, float, rec_type, CustomType]:
matrix = np.matrix(array.astype(data_type))
with open(path, 'wb') as f:
f.write(pa.serialize(matrix).to_buffer())
serialized = pa.read_serialized(pa.OSFile(path))
result = serialized.deserialize()
assert_equal(result, matrix)
assert_equal(result.dtype, matrix.dtype)
serialized = None
assert_equal(result, matrix)
assert result.base is not None
def test_pyarrow_objects_serialization(large_buffer):
# NOTE: We have to put these objects inside,
# or it will affect 'test_total_bytes_allocated'.
pyarrow_objects = [
pa.array([1, 2, 3, 4]), pa.array(['1', u'never U+1F631', '',
u"233 * U+1F600"]),
pa.array([1, None, 2, 3]),
pa.Tensor.from_numpy(np.random.rand(2, 3, 4)),
pa.RecordBatch.from_arrays(
[pa.array([1, None, 2, 3]),
pa.array(['1', u'never U+1F631', '', u"233 * u1F600"])],
['a', 'b']),
pa.Table.from_arrays([pa.array([1, None, 2, 3]),
pa.array(['1', u'never U+1F631', '',
u"233 * u1F600"])],
['a', 'b'])
]
for obj in pyarrow_objects:
serialization_roundtrip(obj, large_buffer)
def test_buffer_serialization():
class BufferClass(object):
pass
def serialize_buffer_class(obj):
return pa.py_buffer(b"hello")
def deserialize_buffer_class(serialized_obj):
return serialized_obj
context = pa.default_serialization_context()
context.register_type(
BufferClass, "BufferClass",
custom_serializer=serialize_buffer_class,
custom_deserializer=deserialize_buffer_class)
b = pa.serialize(BufferClass(), context=context).to_buffer()
assert pa.deserialize(b, context=context).to_pybytes() == b"hello"
@pytest.mark.skip(reason="extensive memory requirements")
def test_arrow_limits(self):
def huge_memory_map(temp_dir):
return large_memory_map(temp_dir, 100 * 1024 * 1024 * 1024)
with pa.memory_map(huge_memory_map, mode="r+") as mmap:
# Test that objects that are too large for Arrow throw a Python
# exception. These tests give out of memory errors on Travis and need
# to be run on a machine with lots of RAM.
x = 2 ** 29 * [1.0]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * ["s"]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * [["1"], 2, 3, [{"s": 4}]]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * [{"s": 1}] + 2 ** 29 * [1.0]
serialization_roundtrip(x, mmap)
del x
x = np.zeros(2 ** 25)
serialization_roundtrip(x, mmap)
del x
x = [np.zeros(2 ** 18) for _ in range(2 ** 7)]
serialization_roundtrip(x, mmap)
del x
def test_serialization_callback_error():
class TempClass(object):
pass
# Pass a SerializationContext into serialize, but TempClass
# is not registered
serialization_context = pa.SerializationContext()
val = TempClass()
with pytest.raises(pa.SerializationCallbackError) as err:
serialized_object = pa.serialize(val, serialization_context)
assert err.value.example_object == val
serialization_context.register_type(TempClass, "TempClass")
serialized_object = pa.serialize(TempClass(), serialization_context)
deserialization_context = pa.SerializationContext()
# Pass a Serialization Context into deserialize, but TempClass
# is not registered
with pytest.raises(pa.DeserializationCallbackError) as err:
serialized_object.deserialize(deserialization_context)
assert err.value.type_id == "TempClass"
class TempClass2(object):
pass
# Make sure that we receive an error when we use an inappropriate value for
# the type_id argument.
with pytest.raises(TypeError):
serialization_context.register_type(TempClass2, 1)
def test_fallback_to_subclasses():
class SubFoo(Foo):
def __init__(self):
Foo.__init__(self)
# should be able to serialize/deserialize an instance
# if a base class has been registered
serialization_context = pa.SerializationContext()
serialization_context.register_type(Foo, "Foo")
subfoo = SubFoo()
# should fallbact to Foo serializer
serialized_object = pa.serialize(subfoo, serialization_context)
reconstructed_object = serialized_object.deserialize(
serialization_context
)
assert type(reconstructed_object) == Foo
class Serializable(object):
pass
def serialize_serializable(obj):
return {"type": type(obj), "data": obj.__dict__}
def deserialize_serializable(obj):
val = obj["type"].__new__(obj["type"])
val.__dict__.update(obj["data"])
return val
class SerializableClass(Serializable):
def __init__(self):
self.value = 3
def test_serialize_subclasses():
# This test shows how subclasses can be handled in an idiomatic way
# by having only a serializer for the base class
# This technique should however be used with care, since pickling
# type(obj) with couldpickle will include the full class definition
# in the serialized representation.
# This means the class definition is part of every instance of the
# object, which in general is not desirable; registering all subclasses
# with register_type will result in faster and more memory
# efficient serialization.
context = pa.default_serialization_context()
context.register_type(
Serializable, "Serializable",
custom_serializer=serialize_serializable,
custom_deserializer=deserialize_serializable)
a = SerializableClass()
serialized = pa.serialize(a, context=context)
deserialized = serialized.deserialize(context=context)
assert type(deserialized).__name__ == SerializableClass.__name__
assert deserialized.value == 3
def test_serialize_to_components_invalid_cases():
buf = pa.py_buffer(b'hello')
components = {
'num_tensors': 0,
'num_ndarrays': 0,
'num_buffers': 1,
'data': [buf]
}
with pytest.raises(pa.ArrowInvalid):
pa.deserialize_components(components)
components = {
'num_tensors': 0,
'num_ndarrays': 1,
'num_buffers': 0,
'data': [buf, buf]
}
with pytest.raises(pa.ArrowInvalid):
pa.deserialize_components(components)
def test_deserialize_components_in_different_process():
arr = pa.array([1, 2, 5, 6], type=pa.int8())
ser = pa.serialize(arr)
data = pickle.dumps(ser.to_components(), protocol=-1)
code = """if 1:
import pickle
import pyarrow as pa
data = {0!r}
components = pickle.loads(data)
arr = pa.deserialize_components(components)
assert arr.to_pylist() == [1, 2, 5, 6], arr
""".format(data)
subprocess_env = test_util.get_modified_env_with_pythonpath()
print("** sys.path =", sys.path)
print("** setting PYTHONPATH to:", subprocess_env['PYTHONPATH'])
subprocess.check_call(["python", "-c", code], env=subprocess_env)
def test_serialize_read_concatenated_records():
# ARROW-1996 -- see stream alignment work in ARROW-2840, ARROW-3212
f = pa.BufferOutputStream()
pa.serialize_to(12, f)
pa.serialize_to(23, f)
buf = f.getvalue()
f = pa.BufferReader(buf)
pa.read_serialized(f).deserialize()
pa.read_serialized(f).deserialize()
@pytest.mark.skipif(os.name == 'nt', reason="deserialize_regex not pickleable")
def test_deserialize_in_different_process():
from multiprocessing import Process, Queue
import re
regex = re.compile(r"\d+\.\d*")
serialization_context = pa.SerializationContext()
serialization_context.register_type(type(regex), "Regex", pickle=True)
serialized = pa.serialize(regex, serialization_context)
serialized_bytes = serialized.to_buffer().to_pybytes()
def deserialize_regex(serialized, q):
import pyarrow as pa
q.put(pa.deserialize(serialized))
q = Queue()
p = Process(target=deserialize_regex, args=(serialized_bytes, q))
p.start()
assert q.get().pattern == regex.pattern
p.join()
def test_deserialize_buffer_in_different_process():
import tempfile
f = tempfile.NamedTemporaryFile(delete=False)
b = pa.serialize(pa.py_buffer(b'hello')).to_buffer()
f.write(b.to_pybytes())
f.close()
subprocess_env = test_util.get_modified_env_with_pythonpath()
dir_path = os.path.dirname(os.path.realpath(__file__))
python_file = os.path.join(dir_path, 'deserialize_buffer.py')
subprocess.check_call([sys.executable, python_file, f.name],
env=subprocess_env)
def test_set_pickle():
# Use a custom type to trigger pickling.
class Foo(object):
pass
context = pa.SerializationContext()
context.register_type(Foo, 'Foo', pickle=True)
test_object = Foo()
# Define a custom serializer and deserializer to use in place of pickle.
def dumps1(obj):
return b'custom'
def loads1(serialized_obj):
return serialized_obj + b' serialization 1'
# Test that setting a custom pickler changes the behavior.
context.set_pickle(dumps1, loads1)
serialized = pa.serialize(test_object, context=context).to_buffer()
deserialized = pa.deserialize(serialized.to_pybytes(), context=context)
assert deserialized == b'custom serialization 1'
# Define another custom serializer and deserializer.
def dumps2(obj):
return b'custom'
def loads2(serialized_obj):
return serialized_obj + b' serialization 2'
# Test that setting another custom pickler changes the behavior again.
context.set_pickle(dumps2, loads2)
serialized = pa.serialize(test_object, context=context).to_buffer()
deserialized = pa.deserialize(serialized.to_pybytes(), context=context)
assert deserialized == b'custom serialization 2'
@pytest.mark.skipif(sys.version_info < (3, 6), reason="need Python 3.6")
def test_path_objects(tmpdir):
# Test compatibility with PEP 519 path-like objects
import pathlib
p = pathlib.Path(tmpdir) / 'zzz.bin'
obj = 1234
pa.serialize_to(obj, p)
res = pa.deserialize_from(p, None)
assert res == obj
def test_tensor_alignment():
# Deserialized numpy arrays should be 64-byte aligned.
x = np.random.normal(size=(10, 20, 30))
y = pa.deserialize(pa.serialize(x).to_buffer())
assert y.ctypes.data % 64 == 0
xs = [np.random.normal(size=i) for i in range(100)]
ys = pa.deserialize(pa.serialize(xs).to_buffer())
for y in ys:
assert y.ctypes.data % 64 == 0
xs = [np.random.normal(size=i * (1,)) for i in range(20)]
ys = pa.deserialize(pa.serialize(xs).to_buffer())
for y in ys:
assert y.ctypes.data % 64 == 0
xs = [np.random.normal(size=i * (5,)) for i in range(1, 8)]
xs = [xs[i][(i + 1) * (slice(1, 3),)] for i in range(len(xs))]
ys = pa.deserialize(pa.serialize(xs).to_buffer())
for y in ys:
assert y.ctypes.data % 64 == 0
def test_serialization_determinism():
for obj in COMPLEX_OBJECTS:
buf1 = pa.serialize(obj).to_buffer()
buf2 = pa.serialize(obj).to_buffer()
assert buf1.to_pybytes() == buf2.to_pybytes()
def test_serialize_recursive_objects():
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Make a numpy array that contains itself.
arr = np.array([None], dtype=object)
arr[0] = arr
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1, arr]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
pa.serialize(obj).deserialize()
| apache-2.0 | 6,047,868,374,056,788,000 | 32.041935 | 79 | 0.607179 | false |
ANGELHACK-JARVIS/safe-locality | app.py | 1 | 9649 |
from flask import Flask, render_template, json, request, redirect, session
from flask.ext.mysql import MySQL
from werkzeug import generate_password_hash, check_password_hash
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map
import pygal
from pygal.style import BlueStyle, NeonStyle,DarkSolarizedStyle, LightSolarizedStyle, LightColorizedStyle, DarkColorizedStyle, TurquoiseStyle
app = Flask(__name__)
GoogleMaps(app)
app.secret_key = 'ssh...Big secret!'
#MySQL configurations
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = 'tekken5'
app.config['MYSQL_DATABASE_DB'] = 'safelocality'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
# route to index.html
@app.route("/")
def main():
if(session.get('user')):
return render_template('home.html',session = session)
else:
return render_template('home.html')
# route to signup.html
@app.route('/showSignUp')
def showSignUp():
return render_template('signup.html')
# interact with MySQL for sign up
@app.route('/signUp',methods=['POST'])
def signUp():
try:
_name = request.form['inputName']
_email = request.form['inputEmail']
_password = request.form['inputPassword']
_firstname = request.form['inputFirstName']
_lastname = request.form['inputLastName']
# validate the received values
if _name and _email and _password:
# All Good, let's call MySQL
conn = mysql.connect()
cursor = conn.cursor()
_hashed_password = generate_password_hash(_password)
cursor.callproc('sp_createUser',(_name,_firstname,_lastname,_email,_hashed_password))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return redirect('/showSignin')
else:
return json.dumps({'error':str(data[0])})
else:
return json.dumps({'html':'<span>Enter the required fields</span>'})
except Exception as e:
return json.dumps({'error':str(e)})
finally:
cursor.close()
conn.close()
@app.route('/showSignin')
def showSignin():
return render_template('signin.html')
@app.route('/validateLogin',methods=['POST'])
def validateLogin():
try:
_username = request.form['inputEmail']
_password = request.form['inputPassword']
# connect to mysql
con = mysql.connect()
cursor = con.cursor()
cursor.callproc('sp_validateLogin',(_username,))
data = cursor.fetchall()
if len(data) > 0:
if check_password_hash(str(data[0][5]),_password):
session['user'] = data[0][0]
print "here"
return render_template('home.html')
else:
return render_template('error.html',error = 'Wrong Email address or Password.')
else:
return render_template('error.html',error = 'Wrong Email address or Password.')
except Exception as e:
return render_template('error.html',error = str(e))
finally:
cursor.close()
con.close()
@app.route('/dashboard')
def demo():
newDict = {}
with open('Places.txt','r') as lines:
for i in lines:
k=i.split(',')
v=k[2].strip("\n").strip("\r")
cord=[k[1],v]
newDict[k[0]] = cord
conn = mysql.connect()
cursor = conn.cursor()
#No need to repeatedly create and delete tuples from the Coordinates table
#place=[]
#lat=[]
#lon=[]
#k=0
#print newDict
#for i in newDict:
# place.append(i)
# lat.append(float(newDict[i][0]))
# lon.append(float(newDict[i][1]))
#cursor.callproc('sp_addLoc',('dfsd',12.12,12.1234,))
#for i in range(0,len(place)):
# cursor.callproc('sp_addLoc',(place[i],lat[i],lon[i]))
#cursor.execute("DELETE FROM Coordinates WHERE Loc_id<6 and Loc_id>8")
cursor.execute("SELECT Loc_name FROM Coordinates ORDER BY Loc_name DESC")
data = cursor.fetchall()
print data
conn.commit()
cursor.close()
conn.close()
if(session.get('user')):
return render_template('dashboard.html', data = data,session=session)
else:
return render_template('dashboard.html',data = data)
######################################################################################33
#This is the review form implementation
@app.route('/addStats')
def displayForm():
return render_template('addStats.html')
@app.route('/addStats', methods=['POST'])
def takeData():
locale=str(request.form['inputLocale'])
water=int(request.form['inputWater'])
electricity=int(request.form['inputElectricity'])
network=int(request.form['inputNetworkAvailability'])
cleanliness=int(request.form['inputCleanliness'])
green=int(request.form['inputGreenSpace'])
life=int(request.form['inputNightlife'])
rmen=int(request.form['inputRepairmenAvailability'])
edu=int(request.form['inputeducation'])
nhood=int(request.form['inputNeighbourhood'])
lent=int(request.form['inputLocalEntertainment'])
rev=str(request.form['inputReview'])
uid=int(session.get('user'))
conn=mysql.connect()
cur=conn.cursor()
cur.execute("Select Loc_id from Coordinates where Loc_name=%s",(locale))
lid=int(cur.fetchone()[0])
cur.execute("Insert into Review (UserId,Loc_id,review_text) values(%s,%s,%s)",(uid,lid,rev))
conn.commit()
cur.callproc('sp_addStats',(uid,lid,water,electricity,network,cleanliness, green, lent, life, rmen, edu, nhood))
conn.commit()
cur.close()
conn.close()
return render_template('home.html')
######################################################################################
@app.route('/places/<place_name>/')
def places(place_name):
if session.get('user'):
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT * FROM Coordinates WHERE Loc_name = %s", (place_name))
data = cursor.fetchall()[0]
name=data[1]
conn.commit()
cursor.close()
conn.close()
mymap = Map(
identifier="view-side",
lat=data[2],
lng=data[3],
markers=[(37.4419, -122.1419)]
)
lat = data[2]
lon = data[3]
#The graph is made and passed on from here onwards
###################################################
title="Crime Rates"
crime_graph=pygal.Bar(width=600, height=600, explicit_size=True, title=title, style=BlueStyle, disable_xml_declaration=True, range=(0,10))
crime_labels=['Theft','Violence', 'Harassment']
conn = mysql.connect()
cursor = conn.cursor()
#procedure not used for simpler execution
cursor.execute("select avg(Theft), avg(Violence), avg(Harassment) from Security, Coordinates where Coordinates.Loc_id=Security.Loc_id and Coordinates.Loc_name=%s",(place_name))
data1=cursor.fetchone()
crime_values=[data1[0],data1[1],data1[2]]
crime_graph.x_labels=crime_labels
crime_graph.add('Rating', crime_values)
lifestyle_graph=pygal.Bar(width=1200, height=600, explicit_size=True, title="Living Standards", style=BlueStyle, disable_xml_declaration=True, range=(0,10))
cursor.close()
conn.close()
conn = mysql.connect()
cursor = conn.cursor()
#procedure not used for simpler execution
cursor.execute("SELECT avg(Water), avg(Electricity), avg(Network_Availability), avg(Cleanliness), avg(Green_space), avg(Local_Entertainment), avg(NightLife), avg(Repairmen_avail), avg(Education), avg(Neighbourhood) from LifeStyle, Coordinates where Coordinates.Loc_id=LifeStyle.Loc_id and Coordinates.Loc_name=%s",(place_name))
data1=cursor.fetchone()
lifestyle_values=[data1[0], data1[1], data1[2], data1[3], data1[4], data1[5], data1[6], data1[7], data1[8], data1[9]]
lifestyle_labels=["Water", "Electricity", "Network Availability", "Cleanliness", "Green Space", "Local Entertainment", "Night Life", "Services", "Education", "Neighbourhood"]
lifestyle_graph.x_labels=lifestyle_labels
lifestyle_graph.add('Rating', lifestyle_values)
graphs=[crime_graph, lifestyle_graph]
cursor.close()
conn.close()
########################################################
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT * FROM Review where Loc_id = (SELECT Loc_id from Coordinates where Loc_name=%s) ",(place_name))
dat = cursor.fetchall()
use_fec=[]
for review in dat:
cursor.execute("SELECT UserName from User where UserId = %s", review[0])
use_fec.append([cursor.fetchone()[0],review[2]])
print use_fec
return render_template('demo.html', use_fec=use_fec, rev_data=dat,name=name, mymap=mymap, data=data,lat = data[2], lon=data[3], graphs=graphs,dat=dat)
else:
return render_template('error.html',error = 'Unauthorized Access')
@app.route('/demo')
def userHome():
if session.get('user'):
mymap = Map(
identifier="view-side",
lat=37.4419,
lng=-122.1419,
markers=[(37.4419, -122.1419)]
)
return render_template('demo.html', mymap=mymap)
else:
return render_template('error.html',error = 'Unauthorized Access')
@app.route('/logout')
def logout():
session.pop('user',None)
return render_template('home.html')
if __name__ == "__main__":
app.debug = True
app.run()
| mit | -2,559,263,458,218,067,500 | 36.691406 | 335 | 0.611462 | false |
darren-wang/gl | glance/api/v1/upload_utils.py | 1 | 12541 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store as store_api
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import webob.exc
from glance.common import exception
from glance.common import store_utils
from glance.common import utils
import glance.db
from glance import i18n
import glance.registry.client.v1.api as registry
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
def initiate_deletion(req, location_data, id):
"""
Deletes image data from the location of backend store.
:param req: The WSGI/Webob Request object
:param location_data: Location to the image data in a data store
:param id: Opaque image identifier
"""
store_utils.delete_image_location_from_backend(req.context,
id, location_data)
def _kill(req, image_id, from_state):
"""
Marks the image status to `killed`.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
:param from_state: Permitted current status for transition to 'killed'
"""
# TODO(dosaboy): http://docs.openstack.org/developer/glance/statuses.html
# needs updating to reflect the fact that queued->killed and saving->killed
# are both allowed.
registry.update_image_metadata(req.context, image_id,
{'status': 'killed'},
from_state=from_state)
def safe_kill(req, image_id, from_state):
"""
Mark image killed without raising exceptions if it fails.
Since _kill is meant to be called from exceptions handlers, it should
not raise itself, rather it should just log its error.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
:param from_state: Permitted current status for transition to 'killed'
"""
try:
_kill(req, image_id, from_state)
except Exception:
LOG.exception(_LE("Unable to kill image %(id)s: ") % {'id': image_id})
def upload_data_to_store(req, image_meta, image_data, store, notifier):
"""
Upload image data to specified store.
Upload image data to the store and cleans up on error.
"""
image_id = image_meta['id']
db_api = glance.db.get_api()
image_size = image_meta.get('size')
try:
# By default image_data will be passed as CooperativeReader object.
# But if 'user_storage_quota' is enabled and 'remaining' is not None
# then it will be passed as object of LimitingReader to
# 'store_add_to_backend' method.
image_data = utils.CooperativeReader(image_data)
remaining = glance.api.common.check_quota(
req.context, image_size, db_api, image_id=image_id)
if remaining is not None:
image_data = utils.LimitingReader(image_data, remaining)
(uri,
size,
checksum,
location_metadata) = store_api.store_add_to_backend(
image_meta['id'],
image_data,
image_meta['size'],
store,
context=req.context)
location_data = {'url': uri,
'metadata': location_metadata,
'status': 'active'}
try:
# recheck the quota in case there were simultaneous uploads that
# did not provide the size
glance.api.common.check_quota(
req.context, size, db_api, image_id=image_id)
except exception.StorageQuotaFull:
with excutils.save_and_reraise_exception():
LOG.info(_LI('Cleaning up %s after exceeding '
'the quota') % image_id)
store_utils.safe_delete_from_backend(
req.context, image_meta['id'], location_data)
def _kill_mismatched(image_meta, attr, actual):
supplied = image_meta.get(attr)
if supplied and supplied != actual:
msg = (_("Supplied %(attr)s (%(supplied)s) and "
"%(attr)s generated from uploaded image "
"(%(actual)s) did not match. Setting image "
"status to 'killed'.") % {'attr': attr,
'supplied': supplied,
'actual': actual})
LOG.error(msg)
safe_kill(req, image_id, 'saving')
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
# Verify any supplied size/checksum value matches size/checksum
# returned from store when adding image
_kill_mismatched(image_meta, 'size', size)
_kill_mismatched(image_meta, 'checksum', checksum)
# Update the database with the checksum returned
# from the backend store
LOG.debug("Updating image %(image_id)s data. "
"Checksum set to %(checksum)s, size set "
"to %(size)d", {'image_id': image_id,
'checksum': checksum,
'size': size})
update_data = {'checksum': checksum,
'size': size}
try:
try:
state = 'saving'
image_meta = registry.update_image_metadata(req.context,
image_id,
update_data,
from_state=state)
except exception.Duplicate:
image = registry.get_image_metadata(req.context, image_id)
if image['status'] == 'deleted':
raise exception.NotFound()
else:
raise
except exception.NotAuthenticated as e:
# Delete image data due to possible token expiration.
LOG.debug("Authentication error - the token may have "
"expired during file upload. Deleting image data for "
" %s " % image_id)
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPUnauthorized(explanation=e.msg, request=req)
except exception.NotFound:
msg = _LI("Image %s could not be found after upload. The image may"
" have been deleted during the upload.") % image_id
LOG.info(msg)
# NOTE(jculp): we need to clean up the datastore if an image
# resource is deleted while the image data is being uploaded
#
# We get "location_data" from above call to store.add(), any
# exceptions that occur there handle this same issue internally,
# Since this is store-agnostic, should apply to all stores.
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPPreconditionFailed(explanation=msg,
request=req,
content_type='text/plain')
except store_api.StoreAddDisabled:
msg = _("Error in store configuration. Adding images to store "
"is disabled.")
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPGone(explanation=msg, request=req,
content_type='text/plain')
except exception.Duplicate as e:
msg = (_("Attempt to upload duplicate image: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
# NOTE(dosaboy): do not delete the image since it is likely that this
# conflict is a result of another concurrent upload that will be
# successful.
notifier.error('image.upload', msg)
raise webob.exc.HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden upload attempt: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except store_api.StorageFull as e:
msg = (_("Image storage media is full: %s") %
utils.exception_to_str(e))
LOG.error(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except store_api.StorageWriteDenied as e:
msg = (_("Insufficient permissions on image storage media: %s") %
utils.exception_to_str(e))
LOG.error(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPServiceUnavailable(explanation=msg,
request=req,
content_type='text/plain')
except exception.ImageSizeLimitExceeded as e:
msg = (_("Denying attempt to upload image larger than %d bytes.")
% CONF.image_size_cap)
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except exception.StorageQuotaFull as e:
msg = (_("Denying attempt to upload image because it exceeds the "
"quota: %s") % utils.exception_to_str(e))
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except webob.exc.HTTPError:
# NOTE(bcwaldon): Ideally, we would just call 'raise' here,
# but something in the above function calls is affecting the
# exception context and we must explicitly re-raise the
# caught exception.
msg = _LE("Received HTTP error while uploading image %s") % image_id
notifier.error('image.upload', msg)
with excutils.save_and_reraise_exception():
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
except (ValueError, IOError) as e:
msg = _("Client disconnected before sending all data to backend")
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
except Exception as e:
msg = _("Failed to upload image %s") % image_id
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPInternalServerError(explanation=msg,
request=req,
content_type='text/plain')
return image_meta, location_data
| apache-2.0 | -4,218,632,327,757,525,500 | 41.225589 | 79 | 0.553225 | false |
Hellowlol/plexpy | plexpy/activity_handler.py | 1 | 11652 | # This file is part of PlexPy.
#
# PlexPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PlexPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PlexPy. If not, see <http://www.gnu.org/licenses/>.
import time
import plexpy
from plexpy import logger, pmsconnect, activity_processor, threading, notification_handler, helpers
class ActivityHandler(object):
def __init__(self, timeline):
self.timeline = timeline
# print timeline
def is_valid_session(self):
if 'sessionKey' in self.timeline:
if str(self.timeline['sessionKey']).isdigit():
return True
return False
def get_session_key(self):
if self.is_valid_session():
return int(self.timeline['sessionKey'])
return None
def get_live_session(self):
pms_connect = pmsconnect.PmsConnect()
session_list = pms_connect.get_current_activity()
for session in session_list['sessions']:
if int(session['session_key']) == self.get_session_key():
return session
return None
def update_db_session(self):
# Update our session temp table values
monitor_proc = activity_processor.ActivityProcessor()
monitor_proc.write_session(session=self.get_live_session(), notify=False)
def on_start(self):
if self.is_valid_session() and self.get_live_session():
logger.debug(u"PlexPy ActivityHandler :: Session %s has started." % str(self.get_session_key()))
# Fire off notifications
threading.Thread(target=notification_handler.notify,
kwargs=dict(stream_data=self.get_live_session(), notify_action='play')).start()
# Write the new session to our temp session table
self.update_db_session()
def on_stop(self, force_stop=False):
if self.is_valid_session():
logger.debug(u"PlexPy ActivityHandler :: Session %s has stopped." % str(self.get_session_key()))
# Set the session last_paused timestamp
ap = activity_processor.ActivityProcessor()
ap.set_session_last_paused(session_key=self.get_session_key(), timestamp=None)
# Update the session state and viewOffset
# Set force_stop to true to disable the state set
if not force_stop:
ap.set_session_state(session_key=self.get_session_key(),
state=self.timeline['state'],
view_offset=self.timeline['viewOffset'])
# Retrieve the session data from our temp table
db_session = ap.get_session_by_key(session_key=self.get_session_key())
# Fire off notifications
threading.Thread(target=notification_handler.notify,
kwargs=dict(stream_data=db_session, notify_action='stop')).start()
# Write it to the history table
monitor_proc = activity_processor.ActivityProcessor()
monitor_proc.write_session_history(session=db_session)
# Remove the session from our temp session table
ap.delete_session(session_key=self.get_session_key())
def on_pause(self):
if self.is_valid_session():
logger.debug(u"PlexPy ActivityHandler :: Session %s has been paused." % str(self.get_session_key()))
# Set the session last_paused timestamp
ap = activity_processor.ActivityProcessor()
ap.set_session_last_paused(session_key=self.get_session_key(), timestamp=int(time.time()))
# Update the session state and viewOffset
ap.set_session_state(session_key=self.get_session_key(),
state=self.timeline['state'],
view_offset=self.timeline['viewOffset'])
# Retrieve the session data from our temp table
db_session = ap.get_session_by_key(session_key=self.get_session_key())
# Fire off notifications
threading.Thread(target=notification_handler.notify,
kwargs=dict(stream_data=db_session, notify_action='pause')).start()
def on_resume(self):
if self.is_valid_session():
logger.debug(u"PlexPy ActivityHandler :: Session %s has been resumed." % str(self.get_session_key()))
# Set the session last_paused timestamp
ap = activity_processor.ActivityProcessor()
ap.set_session_last_paused(session_key=self.get_session_key(), timestamp=None)
# Update the session state and viewOffset
ap.set_session_state(session_key=self.get_session_key(),
state=self.timeline['state'],
view_offset=self.timeline['viewOffset'])
# Retrieve the session data from our temp table
db_session = ap.get_session_by_key(session_key=self.get_session_key())
# Fire off notifications
threading.Thread(target=notification_handler.notify,
kwargs=dict(stream_data=db_session, notify_action='resume')).start()
def on_buffer(self):
if self.is_valid_session():
logger.debug(u"PlexPy ActivityHandler :: Session %s is buffering." % self.get_session_key())
ap = activity_processor.ActivityProcessor()
db_stream = ap.get_session_by_key(session_key=self.get_session_key())
# Increment our buffer count
ap.increment_session_buffer_count(session_key=self.get_session_key())
# Get our current buffer count
current_buffer_count = ap.get_session_buffer_count(self.get_session_key())
logger.debug(u"PlexPy ActivityHandler :: Session %s buffer count is %s." %
(self.get_session_key(), current_buffer_count))
# Get our last triggered time
buffer_last_triggered = ap.get_session_buffer_trigger_time(self.get_session_key())
time_since_last_trigger = 0
if buffer_last_triggered:
logger.debug(u"PlexPy ActivityHandler :: Session %s buffer last triggered at %s." %
(self.get_session_key(), buffer_last_triggered))
time_since_last_trigger = int(time.time()) - int(buffer_last_triggered)
if plexpy.CONFIG.BUFFER_THRESHOLD > 0 and (current_buffer_count >= plexpy.CONFIG.BUFFER_THRESHOLD and \
time_since_last_trigger == 0 or time_since_last_trigger >= plexpy.CONFIG.BUFFER_WAIT):
ap.set_session_buffer_trigger_time(session_key=self.get_session_key())
threading.Thread(target=notification_handler.notify,
kwargs=dict(stream_data=db_stream, notify_action='buffer')).start()
# This function receives events from our websocket connection
def process(self):
if self.is_valid_session():
ap = activity_processor.ActivityProcessor()
db_session = ap.get_session_by_key(session_key=self.get_session_key())
this_state = self.timeline['state']
this_key = str(self.timeline['ratingKey'])
# If we already have this session in the temp table, check for state changes
if db_session:
last_state = db_session['state']
last_key = str(db_session['rating_key'])
# Make sure the same item is being played
if this_key == last_key:
# Update the session state and viewOffset
if this_state == 'playing':
ap.set_session_state(session_key=self.get_session_key(),
state=this_state,
view_offset=self.timeline['viewOffset'])
# Start our state checks
if this_state != last_state:
if this_state == 'paused':
self.on_pause()
elif last_state == 'paused' and this_state == 'playing':
self.on_resume()
elif this_state == 'stopped':
self.on_stop()
elif this_state == 'buffering':
self.on_buffer()
# If a client doesn't register stop events (I'm looking at you PHT!) check if the ratingKey has changed
else:
# Manually stop and start
# Set force_stop so that we don't overwrite our last viewOffset
self.on_stop(force_stop=True)
self.on_start()
# Monitor if the stream has reached the watch percentage for notifications
# The only purpose of this is for notifications
progress_percent = helpers.get_percent(self.timeline['viewOffset'], db_session['duration'])
if progress_percent >= plexpy.CONFIG.NOTIFY_WATCHED_PERCENT and this_state != 'buffering':
threading.Thread(target=notification_handler.notify,
kwargs=dict(stream_data=db_session, notify_action='watched')).start()
else:
# We don't have this session in our table yet, start a new one.
if this_state != 'buffering':
self.on_start()
class TimelineHandler(object):
def __init__(self, timeline):
self.timeline = timeline
#logger.debug(timeline)
def is_item(self):
if 'itemID' in self.timeline:
return True
return False
def get_rating_key(self):
if self.is_item():
return int(self.timeline['itemID'])
return None
def get_metadata(self):
pms_connect = pmsconnect.PmsConnect()
metadata_list = pms_connect.get_metadata_details(self.get_rating_key())
if metadata_list:
return metadata_list['metadata']
return None
def on_created(self):
if self.is_item():
logger.debug(u"PlexPy TimelineHandler :: Library item %s has been added to Plex." % str(self.get_rating_key()))
# Fire off notifications
threading.Thread(target=notification_handler.notify_timeline,
kwargs=dict(timeline_data=self.get_metadata(), notify_action='created')).start()
# This function receives events from our websocket connection
def process(self):
if self.is_item():
this_state = self.timeline['state']
this_type = self.timeline['type']
this_metadataState = self.timeline.get('metadataState', None)
this_mediaState = self.timeline.get('mediaState', None)
# state: 5: done processing metadata
# type: 1: movie, 2: tv show, 4: episode, 8: artist, 10: track
types = [1, 2, 4, 8, 10]
if this_state == 5 and this_type in types and this_metadataState == None and this_mediaState == None:
self.on_created() | gpl-3.0 | 3,072,818,931,053,455,000 | 43.30038 | 123 | 0.588841 | false |
olliemath/Python-TinyEvolver | examples/Example3.py | 1 | 1280 | from tinyevolver import Population
import random
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
random.seed(1234)
"""
For this example we're going to try fiting a power of x to some data:
so e.g. d(t) = ax^n + b should resonably approximate some data
depending on t.
"""
# We want n to be integer, and a,b to be floats, so
prototype = [1.0, 1, 1.0]
# And we restrict the possible genes to these intervals:
bounds = [(0.0, 1.0), (0, 3), (0, 5.0)]
# How fit an individual is will depend on how well it approximates the
# data. So let's cook up some data:
times = range(20)
data = [0.5 * time ** 2 + 1.0 + random.uniform(0, 10) for time in times]
def fitness(ind):
curve = [ind[0] * time ** ind[1] + ind[2] for time in times]
square_error = [(f - d) ** 2 for f, d in zip(curve, data)]
# More error = less fit
try:
return 20.0 / sum(square_error)
except ZeroDivisionError:
return float('inf')
# Now to populate and evolve:
p = Population(prototype, bounds, fitness)
p.populate()
p.evolve()
# Let's see how we did:
if plt:
best_ind = p.best
best_fit = [best_ind[0] * time ** best_ind[1] + best_ind[2] for time in times]
plt.plot(times, data)
plt.plot(times, best_fit)
plt.show()
| gpl-2.0 | -5,832,435,131,763,731,000 | 25.666667 | 82 | 0.639063 | false |
scylladb/scylla-cluster-tests | sdcm/results_analyze/test.py | 1 | 18542 | import re
import typing
from datetime import datetime
import logging
from sdcm.es import ES
from test_lib.utils import get_class_by_path
from .base import ClassBase, __DEFAULT__
from .metrics import ScyllaTestMetrics
LOGGER = logging.getLogger(__name__)
ES_LUCENE_ESCAPE_REGEXP = re.compile(r'([^0-9a-zA-Z_.])')
class DateClassBase(ClassBase):
value: datetime.date = None
def load_from_es_data(self, es_data):
raise NotImplementedError()
def save_to_es_data(self):
raise NotImplementedError()
def save_to_report_data(self):
if self.value is None:
return None
return self.value.strftime('%Y-%m-%d')
def as_string(self, datetime_format='%Y-%m-%d'):
if self.value is None:
return None
return self.value.strftime(datetime_format)
def is_valid(self):
return self.value is not None
def _check_if_comparable(self, other):
if type(self) is not type(other):
raise ValueError(f"{self.__class__.__name__} can be compared only to same class")
if not self.is_valid():
raise ValueError(f"Can't compare {self.__class__.__name__} if it is not valid")
if not other.is_valid():
raise ValueError(f"Can't compare {self.__class__.__name__} to not valid instance")
def __le__(self, other):
self._check_if_comparable(other)
return self.value <= other.date
def __lt__(self, other):
self._check_if_comparable(other)
return self.value < other.value
def __gt__(self, other):
self._check_if_comparable(other)
return self.value > other.value
def __ge__(self, other):
self._check_if_comparable(other)
return self.value >= other.value
def __eq__(self, other):
self._check_if_comparable(other)
return self.value == other.value
class FloatDateClassBase(DateClassBase):
def load_from_es_data(self, es_data):
try:
self.value = datetime.utcfromtimestamp(es_data)
except ValueError:
pass
def save_to_es_data(self):
if self.value is None:
return None
return self.value.timestamp()
class StrDateClassBase(DateClassBase):
value: datetime.date = None
_format = None
def load_from_es_data(self, es_data):
try:
self.value = datetime.strptime(es_data, self._format)
except ValueError:
pass
def save_to_es_data(self):
if self.value is None:
return None
return self.value.strftime(self._format)
class SoftwareVersionDate(StrDateClassBase):
_format = "%Y%m%d"
class SoftwareVersion(ClassBase):
as_string = None
def load_from_es_data(self, es_data):
try:
self.as_string = es_data
except ValueError:
pass
def save_to_es_data(self):
return self.as_string
@property
def as_int(self):
if self.as_string == '666.development' or self.as_string.endswith('.dev'):
return (100 ** 5) * 10
version_parts = self.as_string.split('.')
idx = 100**5
output = 0
for version_part in version_parts:
if version_part.isdecimal():
output += idx * int(version_part)
elif 'rc' in version_part:
output += (idx//100) * int(version_part.replace('rc', ''))
else:
raise ValueError(f"Can't parse version string {self.as_string}")
idx = idx // 100
return output
@property
def major_as_int(self):
if self.as_string == '666.development' or self.as_string.endswith('.dev'):
return (100 ** 5) * 10
version_parts = self.as_string.split('.')
idx = 100**5
output = 0
if len(version_parts) >= 2:
for version_part in version_parts[:-1]:
if version_part.isdecimal():
output += idx * int(version_part)
else:
raise ValueError(f"Can't parse version string {self.as_string}")
idx = idx // 100
return output
class SoftwareVersionInfoBase(ClassBase):
name = None
version: SoftwareVersion = None
date: SoftwareVersionDate = None
commit: str = None
_es_data_mapping = {
'commit': 'commit_id'
}
def is_valid(self):
return self.version and self.date and self.date.is_valid()
def is_same_kind(self, other):
return self.__class__ is other.__class__ and other.name == self.name
def _check_if_can_compare(self, other):
if not self.is_same_kind(other):
raise ValueError(f"Can't compare {self.__class__.__name__} with other type")
if not self.is_valid() or not other.is_valid():
raise ValueError(f"Can't compare {self.__class__.__name__}, both should be valid")
def __le__(self, other):
self._check_if_can_compare(other)
self_version = self.version.as_int
other_version = other.version.as_int
if self_version != other_version:
return self_version <= other_version
return self.date <= other.date
def __lt__(self, other):
self._check_if_can_compare(other)
self_version = self.version.as_int
other_version = other.version.as_int
if self_version != other_version:
return self_version < other_version
return self.date < other.date
def __gt__(self, other):
self._check_if_can_compare(other)
self_version = self.version.as_int
other_version = other.version.as_int
if self_version != other_version:
return self_version > other_version
return self.date > other.date
def __ge__(self, other):
self._check_if_can_compare(other)
self_version = self.version.as_int
other_version = other.version.as_int
if self_version != other_version:
return self_version >= other_version
return self.date >= other.date
def __eq__(self, other):
self._check_if_can_compare(other)
self_version = self.version.as_int
other_version = other.version.as_int
if self_version != other_version:
return False
if self.date != other.date:
return False
return True
class ScyllaVersionInfo(SoftwareVersionInfoBase):
name = 'scylla-server'
class ScyllaEnterpriseVersionInfo(SoftwareVersionInfoBase):
name = 'scylla-enterprise-server'
class SoftwareVersions(ClassBase):
_es_data_mapping = {
'scylla_server': 'scylla-server',
'scylla_enterprise_server': 'scylla-enterprise-server',
}
scylla_server: ScyllaVersionInfo = None
scylla_enterprise_server: ScyllaEnterpriseVersionInfo = None
@property
def scylla_server_any(self) -> typing.Union[ScyllaVersionInfo, ScyllaEnterpriseVersionInfo]:
return self.scylla_server if self.scylla_server else self.scylla_enterprise_server
def is_valid(self):
for data_name in self.__annotations__.keys(): # pylint: disable=no-member
default = getattr(self.__class__, data_name)
value = getattr(self, data_name, None)
if value is default:
continue
if isinstance(value, ClassBase):
if not value.is_valid():
continue
return True
return False
class SctClusterBase(ClassBase):
nodes: int = None
type: str = None
gce_type: str = None
ami_id: str = None
def __eq__(self, other):
for name in ['nodes', 'type', 'gce_type']:
if getattr(self, name, None) != getattr(other, name, None):
return False
return True
def is_valid(self):
return bool(self.nodes and (self.type or self.gce_type))
class LoaderCluster(SctClusterBase):
_es_data_mapping = {
'nodes': 'n_loaders',
'type': 'instance_type_loader',
'gce_type': 'gce_instance_type_db',
'ami_id': 'ami_id_db_scylla'
}
class DbCluster(SctClusterBase):
_es_data_mapping = {
'nodes': 'n_db_nodes',
'type': 'instance_type_db',
'gce_type': 'gce_instance_type_db',
'ami_id': 'ami_id_db_scylla'
}
class MonitorCluster(SctClusterBase):
_es_data_mapping = {
'nodes': 'n_monitor_nodes',
'type': 'instance_type_monitor',
'gce_type': 'gce_instance_type_monitor',
'ami_id': 'ami_id_db_scylla'
}
class TestCompleteTime(StrDateClassBase):
_format = "%Y-%m-%d %H:%M"
class TestStartDate(FloatDateClassBase):
pass
class CassandraStressRateInfo(ClassBase):
threads: int = None
throttle: int = None
_es_data_mapping = {
'treads': 'rate threads',
'throttle': 'throttle threads',
}
class CassandraStressCMDInfo(ClassBase):
mode: str = None
no_warmup: bool = None
ops: str = None
raw_cmd: str = None
port: str = None
profile: str = None
cl: str = None
command: str = None
n: int = None
rate: CassandraStressRateInfo = None
_es_data_mapping = {
'no_warmup': 'no-warmup',
'rate': ''
}
def is_valid(self):
return bool(self.raw_cmd) and self.command
class GeminiCMDInfo(ClassBase):
mode: str = None
no_warmup: bool = None
ops: str = None
raw_cmd: str = None
port: str = None
profile: str = None
cl: str = None
command: str = None
n: int = None
rate: CassandraStressRateInfo = None
_es_data_mapping = {
'no_warmup': 'no-warmup',
'rate': ''
}
def is_valid(self):
return bool(self.raw_cmd) and self.command
class JenkinsRunInfo(ClassBase):
job_name: str = None
job_url: str = None
class TestResultClass(ClassBase):
_es_data_mapping = {
'metrics': '_source',
'subtest_name': '_source.test_details.sub_type',
'test_id': '_id',
'software': '_source.versions',
'grafana_snapshots': '_source.test_details.grafana_snapshots',
'grafana_screenshots': '_source.test_details.grafana_screenshots',
'es_index': '_index',
'main_test_id': '_source.test_details.test_id',
'setup_details': '_source.setup_details',
'test_name': '_source.test_details.test_name',
'db_cluster': '_source.setup_details',
'loader_cluster': '_source.setup_details',
'monitor_cluster': '_source.setup_details',
'complete_time': '_source.test_details.time_completed',
'start_time': '_source.test_details.start_time',
'jenkins': '_source.test_details',
'preload_cassandra_stress': '_source.test_details.preload-cassandra-stress',
'cassandra_stress': '_source.test_details.cassandra-stress',
'started_by': '_source.test_details.started_by', # Never there
'scylla_repo': '_source.setup_details.scylla_repo_m',
'scylla_repo_uuid': '_source.setup_details.scylla_repo_uuid', # Never there
'scylla_mgmt_repo': '_source.setup_details.scylla_mgmt_repo',
'backend': '_source.setup_details.cluster_backend',
'ostype': '_source.setup_details.cluster_backend.ostype', # Never there
'gemini_version': '_source.setup_details.gemini_version',
'status': '_source.status'
}
_es_field_indexes = ['_id']
test_id: str = None
main_test_id: str = None
backend: str = None
db_cluster: DbCluster = None
loader_cluster: LoaderCluster = None
monitor_cluster: MonitorCluster = None
metrics: ScyllaTestMetrics = None
subtest_name: str = None
software: SoftwareVersions = None
grafana_snapshots: list = []
grafana_screenshots: list = []
test_name: str = None
es_index: str = None
started_by: str = None
jenkins: JenkinsRunInfo = None
setup_details: dict = None
preload_cassandra_stress: CassandraStressCMDInfo = None
cassandra_stress: CassandraStressCMDInfo = None
complete_time: TestCompleteTime = None
start_time: TestStartDate = None
scylla_repo: str = None
scylla_repo_uuid: str = None
scylla_mgmt_repo: str = None
ostype: str = None
gemini_version: str = None
status: str = None
remark = ''
def __init__(self, es_data: dict, **kwargs):
self._es_data = es_data
super().__init__(es_data, **kwargs)
def is_valid(self):
return self.software and self.software.is_valid() and \
self.setup_details and self.test_id and \
self.monitor_cluster and self.monitor_cluster.is_valid() and \
self.loader_cluster and self.loader_cluster.is_valid() and \
self.db_cluster and self.db_cluster.is_valid()
def is_gce(self):
return self.setup_details and self.setup_details.get('cluster_backend', None) == 'gce'
@classmethod
def _get_es_filters(cls, depth=2):
tmp = []
for es_filter in cls._get_all_es_data_mapping().values():
es_filter = '.'.join(es_filter.split('.')[:depth])
if es_filter not in tmp:
tmp.append(es_filter)
return ['hits.hits.' + es_filter for es_filter in tmp]
@classmethod
def _get_es_query_from_instance_data(cls, instance_data: dict):
mappings = cls._get_all_es_data_mapping()
es_data = {}
# TBD: _get_all_data_mapping overwrites es data path when _data_mapping value is list
for data_path, data_value in instance_data.items():
es_data_path = mappings.get(data_path, None)
if es_data_path is None:
raise ValueError(
f"Unknown data path {data_path} only following are known:\n" + '\n'.join(mappings.keys()))
es_data[es_data_path] = data_value
return cls._get_es_query_from_es_data(es_data)
@classmethod
def _get_es_query_from_es_data(cls, es_data: dict):
filters = []
for es_data_path, data_value in es_data.items():
es_data_path = es_data_path.split('.')
if es_data_path[0] == '_source':
es_data_path = es_data_path[1:]
es_data_path = '.'.join(es_data_path)
es_data_path = cls._escape_filter_key(es_data_path)
if isinstance(data_value, str) and es_data_path not in cls._es_field_indexes and data_value != '*':
filters.append(f'{es_data_path}.keyword: \"{data_value}\"')
elif isinstance(data_value, bool):
filters.append(f'{es_data_path}: {str(data_value).lower()}')
else:
filters.append(f'{es_data_path}: {data_value}')
return ' AND '.join(filters)
@staticmethod
def _escape_filter_key(filter_key):
return ES_LUCENE_ESCAPE_REGEXP.sub(r'\\\1', filter_key)
def _get_es_query_from_self(self, data_path_patterns: list):
"""
Builds ES query from self parameters using list of data path patterns from data_path_patterns
data pattern can have some_attr.*, which tells to build query using every data point from attribute some_attr
"""
output = self._get_es_data_path_and_values_from_patterns(data_path_patterns, flatten=True)
return self._get_es_query_from_es_data(output)
@classmethod
def get_by_params(cls, es_index=es_index, **params):
es_query = cls._get_es_query_from_instance_data(params)
filter_path = cls._get_es_filters()
es_data = ES().search(
index=es_index,
q=es_query,
filter_path=filter_path,
size=10000)
if not es_data:
return []
es_data = es_data.get('hits', {}).get('hits', {})
if not es_data:
return []
return [cls(es_data=es_test_data) for es_test_data in es_data]
@classmethod
def get_by_test_id(cls, test_id, es_index):
return cls.get_by_params(es_index=es_index, test_id=test_id)
@classmethod
def get_metric_class(cls, metric_path, default=__DEFAULT__):
if default is __DEFAULT__:
return get_class_by_path(cls, metric_path)
return get_class_by_path(cls, metric_path, default)
@staticmethod
def gen_kibana_dashboard_url(
dashboard_path="app/kibana#/dashboard/03414b70-0e89-11e9-a976-2fe0f5890cd0?_g=()"
):
return "%s/%s" % (ES()._conf.get('kibana_url'), dashboard_path) # pylint: disable=protected-access
def get_subtests(self):
return self.get_by_params(es_index=self.es_index, main_test_id=self.test_id, subtest_name='*')
def get_same_tests_query(self):
list_of_attributes = [
'loader_cluster.type',
'loader_cluster.gce_type',
'loader_cluster.nodes',
'db_cluster.type',
'db_cluster.gce_type',
'db_cluster.nodes'
]
if self.jenkins and self.jenkins.is_valid():
list_of_attributes.append('jenkins.job_name')
if self.test_name:
list_of_attributes.append('test_name')
if self.subtest_name:
list_of_attributes.append('subtest_name')
if self.preload_cassandra_stress and self.preload_cassandra_stress.is_valid():
list_of_attributes.append('preload_cassandra_stress.*')
if self.cassandra_stress and self.cassandra_stress.is_valid():
list_of_attributes.extend([
'cassandra_stress.mode',
'cassandra_stress.no_warmup',
'cassandra_stress.ops',
'cassandra_stress.profile',
'cassandra_stress.cl',
'cassandra_stress.command',
'cassandra_stress.n',
'cassandra_stress.rate.*',
])
return self._get_es_query_from_self(list_of_attributes)
def get_prior_tests(self, filter_path=None) -> typing.List['TestResultClass']:
output = []
es_query = self.get_same_tests_query()
es_result = ES().search(index=self._es_data['_index'], q=es_query, filter_path=filter_path,
size=10000) # pylint: disable=unexpected-keyword-arg
es_result = es_result.get('hits', {}).get('hits', None) if es_result else None
if not es_result:
return output
for es_data in es_result: # pylint: disable=not-an-iterable
test = TestResultClass(es_data)
output.append(test)
return output
| agpl-3.0 | -765,569,574,074,466,000 | 32.712727 | 121 | 0.594434 | false |
rgerkin/python-neo | neo/io/asciispiketrainio.py | 1 | 3629 | # -*- coding: utf-8 -*-
"""
Classe for reading/writing SpikeTrains in a text file.
It is the simple case where different spiketrains are written line by line.
Supported : Read/Write
Author: sgarcia
"""
import os
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import Segment, SpikeTrain
class AsciiSpikeTrainIO(BaseIO):
"""
Class for reading/writing SpikeTrains in a text file.
Each Spiketrain is a line.
Usage:
>>> from neo import io
>>> r = io.AsciiSpikeTrainIO( filename = 'File_ascii_spiketrain_1.txt')
>>> seg = r.read_segment()
>>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<SpikeTrain(array([ 3.89981604, 4.73258781, 0.608428 , 4.60246277, 1.23805797,
...
"""
is_readable = True
is_writable = True
supported_objects = [Segment, SpikeTrain]
readable_objects = [Segment]
writeable_objects = [Segment]
has_header = False
is_streameable = False
read_params = {
Segment: [
('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
('t_start', {'value': 0., }),
]
}
write_params = {
Segment: [
('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
]
}
name = None
extensions = ['txt']
mode = 'file'
def __init__(self, filename=None):
"""
This class read/write SpikeTrains in a text file.
Each row is a spiketrain.
**Arguments**
filename : the filename to read/write
"""
BaseIO.__init__(self)
self.filename = filename
def read_segment(self,
lazy=False,
delimiter='\t',
t_start=0. * pq.s,
unit=pq.s,
):
"""
Arguments:
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
t_start : time start of all spiketrain 0 by default
unit : unit of spike times, can be a str or directly a Quantities
"""
assert not lazy, 'Do not support lazy'
unit = pq.Quantity(1, unit)
seg = Segment(file_origin=os.path.basename(self.filename))
f = open(self.filename, 'Ur')
for i, line in enumerate(f):
alldata = line[:-1].split(delimiter)
if alldata[-1] == '':
alldata = alldata[:-1]
if alldata[0] == '':
alldata = alldata[1:]
spike_times = np.array(alldata).astype('f')
t_stop = spike_times.max() * unit
sptr = SpikeTrain(spike_times * unit, t_start=t_start, t_stop=t_stop)
sptr.annotate(channel_index=i)
seg.spiketrains.append(sptr)
f.close()
seg.create_many_to_one_relationship()
return seg
def write_segment(self, segment,
delimiter='\t',
):
"""
Write SpikeTrain of a Segment in a txt file.
Each row is a spiketrain.
Arguments:
segment : the segment to write. Only analog signals will be written.
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
information of t_start is lost
"""
f = open(self.filename, 'w')
for s, sptr in enumerate(segment.spiketrains):
for ts in sptr:
f.write('{:f}{}'.format(ts, delimiter))
f.write('\n')
f.close()
| bsd-3-clause | -4,014,761,197,161,626,000 | 25.683824 | 97 | 0.527142 | false |
op3/hdtv | hdtv/backgroundmodels/exponential.py | 1 | 2206 | # -*- coding: utf-8 -*-
# HDTV - A ROOT-based spectrum analysis software
# Copyright (C) 2006-2009 The HDTV development team (see file AUTHORS)
#
# This file is part of HDTV.
#
# HDTV is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# HDTV is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with HDTV; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import ROOT
from .background import BackgroundModel
class BackgroundModelExponential(BackgroundModel):
"""
Exponential background model
"""
def __init__(self):
super(BackgroundModelExponential, self).__init__()
self.fParStatus = {"nparams": 2}
self.fValidParStatus = {"nparams": [int, "free"]}
self.ResetParamStatus()
self.name = "exponential"
self.requiredBgRegions = 1
def ResetParamStatus(self):
"""
Reset parameter status to defaults
"""
self.fParStatus["nparams"] = 2
def GetFitter(self, integrate, likelihood, nparams=None, nbg=None):
"""
Creates a C++ Fitter object, which can then do the real work
"""
if nparams is not None:
self.fFitter = ROOT.HDTV.Fit.ExpBg(nparams, integrate, likelihood)
self.fParStatus["nparams"] = nparams
elif isinstance(self.fParStatus["nparams"], int):
self.fFitter = ROOT.HDTV.Fit.ExpBg(
self.fParStatus["nparams"], integrate, likelihood
)
else:
msg = (
"Status specifier %s of background fitter is invalid."
% fParStatus["nparams"]
)
raise ValueError(msg)
self.ResetGlobalParams()
return self.fFitter
| gpl-2.0 | 4,411,505,221,114,811,400 | 32.424242 | 78 | 0.647325 | false |
wagnerand/olympia | src/olympia/signing/tests/test_views.py | 1 | 37839 | # -*- coding: utf-8 -*-
import json
import os
from datetime import datetime, timedelta
from django.conf import settings
from django.forms import ValidationError
from django.test.utils import override_settings
from django.utils import translation
import mock
import responses
from rest_framework.response import Response
from waffle.testutils import override_switch
from olympia import amo
from olympia.access.models import Group, GroupUser
from olympia.addons.models import Addon, AddonUser
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import (
addon_factory, reverse_ns, TestCase, developer_factory)
from olympia.api.tests.utils import APIKeyAuthTestMixin
from olympia.applications.models import AppVersion
from olympia.files.models import File, FileUpload
from olympia.lib.akismet.models import AkismetReport
from olympia.signing.views import VersionView
from olympia.users.models import UserProfile
from olympia.versions.models import Version
class SigningAPITestMixin(APIKeyAuthTestMixin):
def setUp(self):
self.user = developer_factory(email='[email protected]')
self.api_key = self.create_api_key(self.user, str(self.user.pk) + ':f')
class BaseUploadVersionTestMixin(SigningAPITestMixin):
def setUp(self):
super(BaseUploadVersionTestMixin, self).setUp()
self.guid = '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}'
addon_factory(
guid=self.guid, file_kw={'is_webextension': True},
version_kw={'version': '2.1.072'},
users=[self.user])
self.view = VersionView.as_view()
auto_sign_version_patcher = mock.patch(
'olympia.devhub.views.auto_sign_version')
self.auto_sign_version = auto_sign_version_patcher.start()
self.addCleanup(auto_sign_version_patcher.stop)
def url(self, guid, version, pk=None):
if guid is None:
args = [version]
else:
args = [guid, version]
if pk is not None:
args.append(pk)
return reverse_ns('signing.version', args=args)
def create_version(self, version):
response = self.request('PUT', self.url(self.guid, version), version)
assert response.status_code in [201, 202]
def xpi_filepath(self, addon, version):
return os.path.join(
'src', 'olympia', 'signing', 'fixtures',
'{addon}-{version}.xpi'.format(addon=addon, version=version))
def request(self, method='PUT', url=None, version='3.0',
addon='@upload-version', filename=None, channel=None,
extra_kwargs=None):
if filename is None:
filename = self.xpi_filepath(addon, version)
if url is None:
url = self.url(addon, version)
with open(filename) as upload:
data = {'upload': upload}
if method == 'POST' and version:
data['version'] = version
if channel:
data['channel'] = channel
return getattr(self.client, method.lower())(
url, data,
HTTP_AUTHORIZATION=self.authorization(),
format='multipart', **(extra_kwargs or {}))
def make_admin(self, user):
admin_group = Group.objects.create(name='Admin', rules='*:*')
GroupUser.objects.create(group=admin_group, user=user)
class TestUploadVersion(BaseUploadVersionTestMixin, TestCase):
def test_not_authenticated(self):
# Use self.client.put so that we don't add the authorization header.
response = self.client.put(self.url(self.guid, '12.5'))
assert response.status_code == 401
def test_addon_does_not_exist(self):
guid = '@create-version'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request('PUT', addon=guid, version='1.0')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.guid == guid
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(latest_version)
assert not addon.tags.filter(tag_text='dynamic theme').exists()
def test_new_addon_random_slug_unlisted_channel(self):
guid = '@create-webextension'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request('PUT', addon=guid, version='1.0')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert len(addon.slug) == 20
assert 'create' not in addon.slug
def test_user_does_not_own_addon(self):
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 403
assert response.data['error'] == 'You do not own this addon.'
def test_admin_does_not_own_addon(self):
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
self.make_admin(self.user)
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 403
assert response.data['error'] == 'You do not own this addon.'
def test_version_does_not_match_manifest_file(self):
response = self.request('PUT', self.url(self.guid, '2.5'))
assert response.status_code == 400
assert response.data['error'] == (
'Version does not match the manifest file.')
def test_version_already_exists(self):
response = self.request(
'PUT', self.url(self.guid, '2.1.072'), version='2.1.072')
assert response.status_code == 409
assert response.data['error'] == ('Version already exists. '
'Latest version is: 2.1.072.')
@mock.patch('olympia.devhub.views.Version.from_upload')
def test_no_version_yet(self, from_upload):
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202
assert 'processed' in response.data
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_added(self):
assert Addon.objects.get(guid=self.guid).status == amo.STATUS_PUBLIC
qs = Version.objects.filter(addon__guid=self.guid, version='3.0')
assert not qs.exists()
existing = Version.objects.filter(addon__guid=self.guid)
assert existing.count() == 1
assert existing[0].channel == amo.RELEASE_CHANNEL_LISTED
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202
assert 'processed' in response.data
version = qs.get()
assert version.addon.guid == self.guid
assert version.version == '3.0'
assert version.statuses[0][1] == amo.STATUS_AWAITING_REVIEW
assert version.addon.status == amo.STATUS_PUBLIC
assert version.channel == amo.RELEASE_CHANNEL_LISTED
self.auto_sign_version.assert_called_with(version)
assert not version.all_files[0].is_mozilla_signed_extension
assert not version.addon.tags.filter(tag_text='dynamic theme').exists()
def test_version_already_uploaded(self):
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202
assert 'processed' in response.data
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 409
assert response.data['error'] == ('Version already exists. '
'Latest version is: 3.0.')
def test_version_failed_review(self):
self.create_version('3.0')
version = Version.objects.get(addon__guid=self.guid, version='3.0')
version.update(reviewed=datetime.today())
version.files.get().update(reviewed=datetime.today(),
status=amo.STATUS_DISABLED)
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 409
assert response.data['error'] == ('Version already exists. '
'Latest version is: 3.0.')
# Verify that you can check the status after upload (#953).
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_added_is_experiment(self):
self.grant_permission(self.user, 'Experiments:submit')
guid = '@experiment-inside-webextension-guid'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.0.1',
filename='src/olympia/files/fixtures/files/'
'experiment_inside_webextension.xpi')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(latest_version)
def test_version_added_is_experiment_reject_no_perm(self):
guid = '@experiment-inside-webextension-guid'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.1',
filename='src/olympia/files/fixtures/files/'
'experiment_inside_webextension.xpi')
assert response.status_code == 400
assert response.data['error'] == (
'You cannot submit this type of add-on')
def test_mozilla_signed_allowed(self):
guid = '@webextension-guid'
self.user.update(email='[email protected]')
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.0.1',
filename='src/olympia/files/fixtures/files/'
'webextension_signed_already.xpi')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(latest_version)
assert latest_version.all_files[0].is_mozilla_signed_extension
def test_mozilla_signed_not_allowed_not_mozilla(self):
guid = '@webextension-guid'
self.user.update(email='[email protected]')
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.0.1',
filename='src/olympia/files/fixtures/files/'
'webextension_signed_already.xpi')
assert response.status_code == 400
assert response.data['error'] == (
'You cannot submit a Mozilla Signed Extension')
def test_system_addon_allowed(self):
guid = '[email protected]'
self.user.update(email='[email protected]')
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.0.1',
filename='src/olympia/files/fixtures/files/'
'mozilla_guid.xpi')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(latest_version)
def test_system_addon_not_allowed_not_mozilla(self):
guid = '[email protected]'
self.user.update(email='[email protected]')
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.1',
filename='src/olympia/files/fixtures/files/'
'mozilla_guid.xpi')
assert response.status_code == 400
assert response.data['error'] == (
u'You cannot submit an add-on with a guid ending "@mozilla.org" '
u'or "@shield.mozilla.org" or "@pioneer.mozilla.org" '
u'or "@mozilla.com"')
def test_system_addon_update_allowed(self):
"""Updates to system addons are allowed from anyone."""
guid = '[email protected]'
self.user.update(email='[email protected]')
orig_addon = addon_factory(
guid='[email protected]',
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED})
AddonUser.objects.create(
addon=orig_addon,
user=self.user)
response = self.request(
'PUT',
addon=guid, version='0.0.1',
filename='src/olympia/files/fixtures/files/'
'mozilla_guid.xpi')
assert response.status_code == 202
addon = Addon.unfiltered.filter(guid=guid).get()
assert addon.versions.count() == 2
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.auto_sign_version.assert_called_with(latest_version)
def test_invalid_version_response_code(self):
# This raises an error in parse_addon which is not covered by
# an exception handler.
response = self.request(
'PUT',
self.url(self.guid, '1.0'),
addon='@create-webextension-invalid-version',
version='1.0')
assert response.status_code == 400
def test_raises_response_code(self):
# A check that any bare error in handle_upload will return a 400.
with mock.patch('olympia.signing.views.devhub_handle_upload') as patch:
patch.side_effect = ValidationError(message='some error')
response = self.request('PUT', self.url(self.guid, '1.0'))
assert response.status_code == 400
def test_no_version_upload_for_admin_disabled_addon(self):
addon = Addon.objects.get(guid=self.guid)
addon.update(status=amo.STATUS_DISABLED)
response = self.request(
'PUT', self.url(self.guid, '3.0'), version='3.0')
assert response.status_code == 400
error_msg = 'cannot add versions to an addon that has status: %s.' % (
amo.STATUS_CHOICES_ADDON[amo.STATUS_DISABLED])
assert error_msg in response.data['error']
def test_channel_ignored_for_new_addon(self):
guid = '@create-version'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request('PUT', addon=guid, version='1.0',
channel='listed')
assert response.status_code == 201
addon = qs.get()
assert addon.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
def test_no_channel_selects_last_channel(self):
addon = Addon.objects.get(guid=self.guid)
assert addon.status == amo.STATUS_PUBLIC
assert addon.versions.count() == 1
assert addon.versions.all()[0].channel == amo.RELEASE_CHANNEL_LISTED
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202, response.data['error']
assert 'processed' in response.data
new_version = addon.versions.latest()
assert new_version.channel == amo.RELEASE_CHANNEL_LISTED
new_version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.request(
'PUT', self.url(self.guid, '4.0-beta1'), version='4.0-beta1')
assert response.status_code == 202, response.data['error']
assert 'processed' in response.data
third_version = addon.versions.latest()
assert third_version.channel == amo.RELEASE_CHANNEL_UNLISTED
def test_unlisted_channel_for_listed_addon(self):
addon = Addon.objects.get(guid=self.guid)
assert addon.status == amo.STATUS_PUBLIC
assert addon.versions.count() == 1
assert addon.versions.all()[0].channel == amo.RELEASE_CHANNEL_LISTED
response = self.request('PUT', self.url(self.guid, '3.0'),
channel='unlisted')
assert response.status_code == 202, response.data['error']
assert 'processed' in response.data
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_UNLISTED
def test_listed_channel_for_complete_listed_addon(self):
addon = Addon.objects.get(guid=self.guid)
assert addon.status == amo.STATUS_PUBLIC
assert addon.versions.count() == 1
assert addon.has_complete_metadata()
response = self.request('PUT', self.url(self.guid, '3.0'),
channel='listed')
assert response.status_code == 202, response.data['error']
assert 'processed' in response.data
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_LISTED
def test_listed_channel_fails_for_incomplete_addon(self):
addon = Addon.objects.get(guid=self.guid)
assert addon.status == amo.STATUS_PUBLIC
assert addon.versions.count() == 1
addon.current_version.update(license=None) # Make addon incomplete.
addon.versions.latest().update(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert not addon.has_complete_metadata(
has_listed_versions=True)
response = self.request('PUT', self.url(self.guid, '3.0'),
channel='listed')
assert response.status_code == 400
error_msg = (
'You cannot add a listed version to this addon via the API')
assert error_msg in response.data['error']
@override_switch('akismet-spam-check', active=False)
def test_akismet_waffle_off(self):
addon = Addon.objects.get(guid=self.guid)
response = self.request(
'PUT', self.url(self.guid, '3.0'), channel='listed')
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_LISTED
assert AkismetReport.objects.count() == 0
assert response.status_code == 202
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_reports_created_ham_outcome(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.HAM
addon = Addon.objects.get(guid=self.guid)
response = self.request(
'PUT', self.url(self.guid, '3.0'), channel='listed')
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_LISTED
assert response.status_code == 202
comment_check_mock.assert_called_once()
assert AkismetReport.objects.count() == 1
report = AkismetReport.objects.get()
assert report.comment_type == 'product-name'
assert report.comment == 'Upload Version Test XPI' # the addon's name
validation_response = self.get(self.url(self.guid, '3.0'))
assert validation_response.status_code == 200
assert 'spam' not in validation_response.content
@override_switch('akismet-spam-check', active=True)
@override_switch('akismet-addon-action', active=False)
@override_settings(AKISMET_API_KEY=None)
def test_akismet_reports_created_spam_outcome_logging_only(self):
akismet_url = settings.AKISMET_API_URL.format(
api_key='none', action='comment-check')
responses.add(responses.POST, akismet_url, json=True)
addon = Addon.objects.get(guid=self.guid)
response = self.request(
'PUT', self.url(self.guid, '3.0'), channel='listed')
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_LISTED
assert response.status_code == 202
assert AkismetReport.objects.count() == 1
report = AkismetReport.objects.get()
assert report.comment_type == 'product-name'
assert report.comment == 'Upload Version Test XPI' # the addon's name
assert report.result == AkismetReport.MAYBE_SPAM
validation_response = self.get(self.url(self.guid, '3.0'))
assert validation_response.status_code == 200
assert 'spam' not in validation_response.content
@override_switch('akismet-spam-check', active=True)
@override_switch('akismet-addon-action', active=True)
@override_settings(AKISMET_API_KEY=None)
def test_akismet_reports_created_spam_outcome_action_taken(self):
akismet_url = settings.AKISMET_API_URL.format(
api_key='none', action='comment-check')
responses.add(responses.POST, akismet_url, json=True)
addon = Addon.objects.get(guid=self.guid)
response = self.request(
'PUT', self.url(self.guid, '3.0'), channel='listed')
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_LISTED
assert response.status_code == 202
assert AkismetReport.objects.count() == 1
report = AkismetReport.objects.get()
assert report.comment_type == 'product-name'
assert report.comment == 'Upload Version Test XPI' # the addon's name
assert report.result == AkismetReport.MAYBE_SPAM
validation_response = self.get(self.url(self.guid, '3.0'))
assert validation_response.status_code == 200
assert 'spam' in validation_response.content
data = json.loads(validation_response.content)
assert data['validation_results']['messages'][0]['id'] == [
u'validation', u'messages', u'akismet_is_spam_name'
]
class TestUploadVersionWebextension(BaseUploadVersionTestMixin, TestCase):
def setUp(self):
super(TestUploadVersionWebextension, self).setUp()
AppVersion.objects.create(application=amo.FIREFOX.id, version='42.0')
AppVersion.objects.create(application=amo.FIREFOX.id, version='*')
def test_addon_does_not_exist_webextension(self):
response = self.request(
'POST',
url=reverse_ns('signing.version'),
addon='@create-webextension',
version='1.0')
assert response.status_code == 201
guid = response.data['guid']
addon = Addon.unfiltered.get(guid=guid)
assert addon.guid is not None
assert addon.guid != self.guid
version = Version.objects.get(addon__guid=guid, version='1.0')
assert version.files.all()[0].is_webextension is True
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(
latest_version)
def test_addon_does_not_exist_webextension_with_guid_in_url(self):
guid = '@custom-guid-provided'
# Override the filename self.request() picks, we want that specific
# file but with a custom guid.
filename = self.xpi_filepath('@create-webextension', '1.0')
response = self.request(
'PUT', # PUT, not POST, since we're specifying a guid in the URL.
filename=filename,
addon=guid, # Will end up in the url since we're not passing one.
version='1.0')
assert response.status_code == 201
assert response.data['guid'] == '@custom-guid-provided'
addon = Addon.unfiltered.get(guid=response.data['guid'])
assert addon.guid == '@custom-guid-provided'
version = Version.objects.get(addon__guid=guid, version='1.0')
assert version.files.all()[0].is_webextension is True
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(
latest_version)
def test_addon_does_not_exist_webextension_with_invalid_guid_in_url(self):
guid = 'custom-invalid-guid-provided'
# Override the filename self.request() picks, we want that specific
# file but with a custom guid.
filename = self.xpi_filepath('@create-webextension', '1.0')
response = self.request(
'PUT', # PUT, not POST, since we're specifying a guid in the URL.
filename=filename,
addon=guid, # Will end up in the url since we're not passing one.
version='1.0')
assert response.status_code == 400
assert response.data['error'] == u'Invalid GUID in URL'
assert not Addon.unfiltered.filter(guid=guid).exists()
def test_webextension_reuse_guid(self):
response = self.request(
'POST',
url=reverse_ns('signing.version'),
addon='@create-webextension-with-guid',
version='1.0')
guid = response.data['guid']
assert guid == '@webextension-with-guid'
addon = Addon.unfiltered.get(guid=guid)
assert addon.guid == '@webextension-with-guid'
def test_webextension_reuse_guid_but_only_create(self):
# Uploading the same version with the same id fails. People
# have to use the regular `PUT` endpoint for that.
response = self.request(
'POST',
url=reverse_ns('signing.version'),
addon='@create-webextension-with-guid',
version='1.0')
assert response.status_code == 201
response = self.request(
'POST',
url=reverse_ns('signing.version'),
addon='@create-webextension-with-guid',
version='1.0')
assert response.status_code == 400
assert response.data['error'] == 'Duplicate add-on ID found.'
def test_webextension_optional_version(self):
# Uploading the same version with the same id fails. People
# have to use the regular `PUT` endpoint for that.
response = self.request(
'POST',
url=reverse_ns('signing.version'),
addon='@create-webextension-with-guid-and-version',
version='99.0')
assert response.status_code == 201
assert (
response.data['guid'] ==
'@create-webextension-with-guid-and-version')
assert response.data['version'] == '99.0'
def test_webextension_resolve_translations(self):
fname = (
'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi')
response = self.request(
'POST',
url=reverse_ns('signing.version'),
addon='@notify-link-clicks-i18n',
version='1.0',
filename=fname)
assert response.status_code == 201
addon = Addon.unfiltered.get(guid=response.data['guid'])
# Normalized from `en` to `en-US`
assert addon.default_locale == 'en-US'
assert addon.name == 'Notify link clicks i18n'
assert addon.summary == (
'Shows a notification when the user clicks on links.')
translation.activate('de')
addon.reload()
assert addon.name == 'Meine Beispielerweiterung'
assert addon.summary == u'Benachrichtigt den Benutzer über Linkklicks'
def test_too_long_guid_not_in_manifest_forbidden(self):
fname = (
'src/olympia/files/fixtures/files/webextension_no_id.xpi')
guid = (
'this_guid_is_longer_than_the_limit_of_64_chars_see_bug_1201176_'
'and_should_fail@webextension-guid')
response = self.request(
'PUT',
url=self.url(guid, '1.0'),
version='1.0',
filename=fname)
assert response.status_code == 400
assert response.data == {
'error': (
u'Please specify your Add-on GUID in the manifest if it\'s '
u'longer than 64 characters.')
}
assert not Addon.unfiltered.filter(guid=guid).exists()
def test_too_long_guid_in_manifest_allowed(self):
fname = (
'src/olympia/files/fixtures/files/webextension_too_long_guid.xpi')
guid = (
'this_guid_is_longer_than_the_limit_of_64_chars_see_bug_1201176_'
'and_should_fail@webextension-guid')
response = self.request(
'PUT',
url=self.url(guid, '1.0'),
version='1.0',
filename=fname)
assert response.status_code == 201
assert Addon.unfiltered.filter(guid=guid).exists()
def test_dynamic_theme_tag_added(self):
addon = Addon.objects.get(guid=self.guid)
addon.current_version.update(version='0.9')
def parse_addon_wrapper(*args, **kwargs):
from olympia.files.utils import parse_addon
parsed = parse_addon(*args, **kwargs)
parsed['permissions'] = parsed.get('permissions', []) + ['theme']
return parsed
with mock.patch('olympia.devhub.tasks.parse_addon',
wraps=parse_addon_wrapper):
# But unlisted should be ignored
response = self.request(
'PUT', self.url(self.guid, '1.0'), version='1.0',
addon='@create-webextension', channel='unlisted')
assert response.status_code == 202, response.data['error']
assert not addon.tags.filter(tag_text='dynamic theme').exists()
addon.versions.latest().delete(hard=True)
# Only listed version get the tag
response = self.request(
'PUT', self.url(self.guid, '1.0'), version='1.0',
addon='@create-webextension', channel='listed')
assert response.status_code == 202, response.data['error']
assert addon.tags.filter(tag_text='dynamic theme').exists()
class TestCheckVersion(BaseUploadVersionTestMixin, TestCase):
def test_not_authenticated(self):
# Use self.client.get so that we don't add the authorization header.
response = self.client.get(self.url(self.guid, '12.5'))
assert response.status_code == 401
def test_addon_does_not_exist(self):
response = self.get(self.url('foo', '12.5'))
assert response.status_code == 404
assert response.data['error'] == (
'Could not find add-on with guid "foo".')
def test_user_does_not_own_addon(self):
self.create_version('3.0')
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 403
assert response.data['error'] == 'You do not own this addon.'
def test_admin_can_view(self):
self.create_version('3.0')
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.make_admin(self.user)
self.api_key = self.create_api_key(self.user, 'bar')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_does_not_exist(self):
response = self.get(self.url(self.guid, '2.5'))
assert response.status_code == 404
assert (response.data['error'] ==
'No uploaded file for that addon and version.')
def test_version_exists(self):
self.create_version('3.0')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_exists_with_pk(self):
# Mock Version.from_upload so the Version won't be created.
with mock.patch('olympia.devhub.tasks.Version.from_upload'):
self.create_version('3.0')
upload = FileUpload.objects.latest()
upload.update(created=datetime.today() - timedelta(hours=1))
self.create_version('3.0')
newer_upload = FileUpload.objects.latest()
assert newer_upload != upload
response = self.get(self.url(self.guid, '3.0', upload.uuid.hex))
assert response.status_code == 200
# For backwards-compatibility reasons, we return the uuid as "pk".
assert response.data['pk'] == upload.uuid.hex
assert 'processed' in response.data
def test_version_exists_with_pk_not_owner(self):
orig_user, orig_api_key = self.user, self.api_key
# This will create a version for the add-on with guid @create-version
# using a new user.
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
response = self.request('PUT', addon='@create-version', version='1.0')
assert response.status_code == 201
upload = FileUpload.objects.latest()
# Check that the user that created the upload can access it properly.
response = self.get(
self.url('@create-version', '1.0', upload.uuid.hex))
assert response.status_code == 200
assert 'processed' in response.data
# This will create a version for the add-on from the fixture with the
# regular fixture user.
self.user, self.api_key = orig_user, orig_api_key
self.create_version('3.0')
# Check that we can't access the FileUpload by uuid even if we pass in
# an add-on and version that we own if we don't own the FileUpload.
response = self.get(self.url(self.guid, '3.0', upload.uuid.hex))
assert response.status_code == 404
assert 'error' in response.data
def test_version_download_url(self):
version_string = '3.0'
qs = File.objects.filter(version__addon__guid=self.guid,
version__version=version_string)
assert not qs.exists()
self.create_version(version_string)
response = self.get(self.url(self.guid, version_string))
assert response.status_code == 200
file_ = qs.get()
assert response.data['files'][0]['download_url'] == absolutify(
reverse_ns('signing.file', kwargs={'file_id': file_.id}) +
'/{fname}?src=api'.format(fname=file_.filename))
def test_file_hash(self):
version_string = '3.0'
qs = File.objects.filter(version__addon__guid=self.guid,
version__version=version_string)
assert not qs.exists()
self.create_version(version_string)
response = self.get(self.url(self.guid, version_string))
assert response.status_code == 200
file_ = qs.get()
filename = self.xpi_filepath('@upload-version', version_string)
assert response.data['files'][0]['hash'] == \
file_.generate_hash(filename=filename)
def test_has_failed_upload(self):
addon = Addon.objects.get(guid=self.guid)
FileUpload.objects.create(addon=addon, version='3.0')
self.create_version('3.0')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
class TestSignedFile(SigningAPITestMixin, TestCase):
def setUp(self):
super(TestSignedFile, self).setUp()
self.file_ = self.create_file()
def url(self):
return reverse_ns('signing.file', args=[self.file_.pk])
def create_file(self):
addon = addon_factory(
name='thing', version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED},
users=[self.user])
return addon.latest_unlisted_version.all_files[0]
def test_can_download_once_authenticated(self):
response = self.get(self.url())
assert response.status_code == 200
assert response[settings.XSENDFILE_HEADER] == (
self.file_.file_path)
def test_cannot_download_without_authentication(self):
response = self.client.get(self.url()) # no auth
assert response.status_code == 401
def test_api_relies_on_version_downloader(self):
with mock.patch('olympia.versions.views.download_file') as df:
df.return_value = Response({})
self.get(self.url())
assert df.called is True
assert df.call_args[0][0].user == self.user
assert df.call_args[0][1] == str(self.file_.pk)
| bsd-3-clause | 6,569,520,159,358,038,000 | 41.135857 | 79 | 0.61943 | false |
manfer/LFP.bundle | Contents/Code/__init__.py | 1 | 2659 | # -*- coding: utf-8 -*-
TITLE = u'LFP'
PREFIX = '/video/lfp'
LFP_BASE_URL = 'http://www.laliga.es'
LFP_MULTIMEDIA = '%s/multimedia' % LFP_BASE_URL
LFP_ICON = 'lfp.png'
ICON = 'default-icon.png'
LFP_HL_ICON = 'highlights.png'
LFP_VIDEO_ICON = 'video.png'
LFP_PHOTO_ICON = 'photo.png'
LFP_LALIGATV_ICON = 'laligatv.png'
SEARCH_ICON = 'search-icon.png'
SETTINGS_ICON = 'settings-icon.png'
ART = 'futbol.jpg'
HTTP_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Connection': 'keep-alive',
'Origin': LFP_BASE_URL,
'Referer': LFP_MULTIMEDIA
}
from lfputil import L
from lfpvideo import *
from lfpfoto import *
from laligatv import *
from lfpsearch import *
################################################################################
def Start():
Plugin.AddViewGroup('List', viewMode='List', mediaType='items')
Plugin.AddViewGroup('InfoList', viewMode='InfoList', mediaType='items')
Plugin.AddViewGroup('PanelStream', viewMode='PanelStream', mediaType='items')
ObjectContainer.title1 = TITLE
#ObjectContainer.view_group = 'List'
ObjectContainer.art = R(ART)
DirectoryObject.thumb = R(ICON)
DirectoryObject.art = R(ART)
PhotoAlbumObject.thumb = R(ICON)
HTTP.CacheTime = CACHE_1HOUR
################################################################################
@handler(PREFIX, TITLE, art=ART, thumb=LFP_ICON)
def lfp_main_menu():
oc = ObjectContainer()
oc.add(DirectoryObject(
key = Callback(lfp_resumenes),
title = L("Highlights"),
summary = L("enjoy lfp highlight videos"),
thumb = R(LFP_HL_ICON)
))
oc.add(DirectoryObject(
key = Callback(lfp_videos),
title = L("Other Videos"),
summary = L("enjoy other videos on lfp website"),
thumb = R(LFP_VIDEO_ICON)
))
oc.add(DirectoryObject(
key = Callback(lfp_fotos),
title = L("Photos"),
summary = L("enjoy the photos on lfp website"),
thumb = R(LFP_PHOTO_ICON)
))
oc.add(DirectoryObject(
key = Callback(lfp_laligatv),
title = L("La Liga TV"),
summary = L("enjoy live Adelante League matches"),
thumb = R(LFP_LALIGATV_ICON)
))
if Client.Product != 'PlexConnect':
oc.add(InputDirectoryObject(
key = Callback(lfp_search),
title = L('Search LFP Videos'),
prompt = L('Search for LFP Videos'),
summary = L('Search for LFP Videos'),
thumb = R(SEARCH_ICON)
))
return oc
| gpl-3.0 | 4,681,088,658,285,820,000 | 27.287234 | 91 | 0.608499 | false |
gooddata/openstack-nova | nova/context.py | 1 | 22746 | # Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of nova."""
from contextlib import contextmanager
import copy
import warnings
import eventlet.queue
import eventlet.timeout
from keystoneauth1.access import service_catalog as ksa_service_catalog
from keystoneauth1 import plugin
from oslo_context import context
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from nova import exception
from nova.i18n import _
from nova import objects
from nova import policy
from nova import utils
LOG = logging.getLogger(__name__)
# TODO(melwitt): This cache should be cleared whenever WSGIService receives a
# SIGHUP and periodically based on an expiration time. Currently, none of the
# cell caches are purged, so neither is this one, for now.
CELL_CACHE = {}
# NOTE(melwitt): Used for the scatter-gather utility to indicate we timed out
# waiting for a result from a cell.
did_not_respond_sentinel = object()
# NOTE(melwitt): Used for the scatter-gather utility to indicate an exception
# was raised gathering a result from a cell.
raised_exception_sentinel = object()
# FIXME(danms): Keep a global cache of the cells we find the
# first time we look. This needs to be refreshed on a timer or
# trigger.
CELLS = []
# Timeout value for waiting for cells to respond
CELL_TIMEOUT = 60
class _ContextAuthPlugin(plugin.BaseAuthPlugin):
"""A keystoneauth auth plugin that uses the values from the Context.
Ideally we would use the plugin provided by auth_token middleware however
this plugin isn't serialized yet so we construct one from the serialized
auth data.
"""
def __init__(self, auth_token, sc):
super(_ContextAuthPlugin, self).__init__()
self.auth_token = auth_token
self.service_catalog = ksa_service_catalog.ServiceCatalogV2(sc)
def get_token(self, *args, **kwargs):
return self.auth_token
def get_endpoint(self, session, service_type=None, interface=None,
region_name=None, service_name=None, **kwargs):
return self.service_catalog.url_for(service_type=service_type,
service_name=service_name,
interface=interface,
region_name=region_name)
@enginefacade.transaction_context_provider
class RequestContext(context.RequestContext):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id=None, project_id=None, is_admin=None,
read_deleted="no", remote_address=None, timestamp=None,
quota_class=None, service_catalog=None,
user_auth_plugin=None, **kwargs):
""":param read_deleted: 'no' indicates deleted records are hidden,
'yes' indicates deleted records are visible,
'only' indicates that *only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param instance_lock_checked: This is not used and will be removed
in a future release.
:param user_auth_plugin: The auth plugin for the current request's
authentication data.
"""
if user_id:
kwargs['user_id'] = user_id
if project_id:
kwargs['project_id'] = project_id
if kwargs.pop('instance_lock_checked', None) is not None:
# TODO(mriedem): Let this be a hard failure in 19.0.0 (S).
warnings.warn("The 'instance_lock_checked' kwarg to "
"nova.context.RequestContext is no longer used and "
"will be removed in a future version.")
super(RequestContext, self).__init__(is_admin=is_admin, **kwargs)
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, six.string_types):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in ('image', 'block-storage', 'volumev3',
'key-manager', 'placement', 'network')]
else:
# if list is empty or none
self.service_catalog = []
# NOTE(markmc): this attribute is currently only used by the
# rs_limits turnstile pre-processor.
# See https://lists.launchpad.net/openstack/msg12200.html
self.quota_class = quota_class
# NOTE(dheeraj): The following attributes are used by cellsv2 to store
# connection information for connecting to the target cell.
# It is only manipulated using the target_cell contextmanager
# provided by this module
self.db_connection = None
self.mq_connection = None
self.cell_uuid = None
self.user_auth_plugin = user_auth_plugin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
def get_auth_plugin(self):
if self.user_auth_plugin:
return self.user_auth_plugin
else:
return _ContextAuthPlugin(self.auth_token, self.service_catalog)
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def to_dict(self):
values = super(RequestContext, self).to_dict()
# FIXME(dims): defensive hasattr() checks need to be
# removed once we figure out why we are seeing stack
# traces
values.update({
'user_id': getattr(self, 'user_id', None),
'project_id': getattr(self, 'project_id', None),
'is_admin': getattr(self, 'is_admin', None),
'read_deleted': getattr(self, 'read_deleted', 'no'),
'remote_address': getattr(self, 'remote_address', None),
'timestamp': utils.strtime(self.timestamp) if hasattr(
self, 'timestamp') else None,
'request_id': getattr(self, 'request_id', None),
'quota_class': getattr(self, 'quota_class', None),
'user_name': getattr(self, 'user_name', None),
'service_catalog': getattr(self, 'service_catalog', None),
'project_name': getattr(self, 'project_name', None),
})
# NOTE(tonyb): This can be removed once we're certain to have a
# RequestContext contains 'is_admin_project', We can only get away with
# this because we "know" the default value of 'is_admin_project' which
# is very fragile.
values.update({
'is_admin_project': getattr(self, 'is_admin_project', True),
})
return values
@classmethod
def from_dict(cls, values):
return super(RequestContext, cls).from_dict(
values,
user_id=values.get('user_id'),
project_id=values.get('project_id'),
# TODO(sdague): oslo.context has show_deleted, if
# possible, we should migrate to that in the future so we
# don't need to be different here.
read_deleted=values.get('read_deleted', 'no'),
remote_address=values.get('remote_address'),
timestamp=values.get('timestamp'),
quota_class=values.get('quota_class'),
service_catalog=values.get('service_catalog'),
)
def elevated(self, read_deleted=None):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
# context.roles must be deepcopied to leave original roles
# without changes
context.roles = copy.deepcopy(self.roles)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
def can(self, action, target=None, fatal=True):
"""Verifies that the given action is valid on the target in this context.
:param action: string representing the action to be checked.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``.
If None, then this default target will be considered:
{'project_id': self.project_id, 'user_id': self.user_id}
:param fatal: if False, will return False when an exception.Forbidden
occurs.
:raises nova.exception.Forbidden: if verification fails and fatal is
True.
:return: returns a non-False value (not necessarily "True") if
authorized and False if not authorized and fatal is False.
"""
if target is None:
target = {'project_id': self.project_id,
'user_id': self.user_id}
try:
return policy.authorize(self, action, target)
except exception.Forbidden:
if fatal:
raise
return False
def to_policy_values(self):
policy = super(RequestContext, self).to_policy_values()
policy['is_admin'] = self.is_admin
return policy
def __str__(self):
return "<Context %s>" % self.to_dict()
def get_context():
"""A helper method to get a blank context.
Note that overwrite is False here so this context will not update the
greenthread-local stored context that is used when logging.
"""
return RequestContext(user_id=None,
project_id=None,
is_admin=False,
overwrite=False)
def get_admin_context(read_deleted="no"):
# NOTE(alaski): This method should only be used when an admin context is
# necessary for the entirety of the context lifetime. If that's not the
# case please use get_context(), or create the RequestContext manually, and
# use context.elevated() where necessary. Some periodic tasks may use
# get_admin_context so that their database calls are not filtered on
# project_id.
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def require_context(ctxt):
"""Raise exception.Forbidden() if context is not a user or an
admin context.
"""
if not ctxt.is_admin and not is_user_context(ctxt):
raise exception.Forbidden()
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.Forbidden()
elif context.project_id != project_id:
raise exception.Forbidden()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.Forbidden()
elif context.user_id != user_id:
raise exception.Forbidden()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.Forbidden()
elif context.quota_class != class_name:
raise exception.Forbidden()
def set_target_cell(context, cell_mapping):
"""Adds database connection information to the context
for communicating with the given target_cell.
This is used for permanently targeting a cell in a context.
Use this when you want all subsequent code to target a cell.
Passing None for cell_mapping will untarget the context.
:param context: The RequestContext to add connection information
:param cell_mapping: An objects.CellMapping object or None
"""
global CELL_CACHE
if cell_mapping is not None:
# avoid circular import
from nova.db import api as db
from nova import rpc
# Synchronize access to the cache by multiple API workers.
@utils.synchronized(cell_mapping.uuid)
def get_or_set_cached_cell_and_set_connections():
try:
cell_tuple = CELL_CACHE[cell_mapping.uuid]
except KeyError:
db_connection_string = cell_mapping.database_connection
context.db_connection = db.create_context_manager(
db_connection_string)
if not cell_mapping.transport_url.startswith('none'):
context.mq_connection = rpc.create_transport(
cell_mapping.transport_url)
context.cell_uuid = cell_mapping.uuid
CELL_CACHE[cell_mapping.uuid] = (context.db_connection,
context.mq_connection)
else:
context.db_connection = cell_tuple[0]
context.mq_connection = cell_tuple[1]
context.cell_uuid = cell_mapping.uuid
get_or_set_cached_cell_and_set_connections()
else:
context.db_connection = None
context.mq_connection = None
context.cell_uuid = None
@contextmanager
def target_cell(context, cell_mapping):
"""Yields a new context with connection information for a specific cell.
This function yields a copy of the provided context, which is targeted to
the referenced cell for MQ and DB connections.
Passing None for cell_mapping will yield an untargetd copy of the context.
:param context: The RequestContext to add connection information
:param cell_mapping: An objects.CellMapping object or None
"""
# Create a sanitized copy of context by serializing and deserializing it
# (like we would do over RPC). This help ensure that we have a clean
# copy of the context with all the tracked attributes, but without any
# of the hidden/private things we cache on a context. We do this to avoid
# unintentional sharing of cached thread-local data across threads.
# Specifically, this won't include any oslo_db-set transaction context, or
# any existing cell targeting.
cctxt = RequestContext.from_dict(context.to_dict())
set_target_cell(cctxt, cell_mapping)
yield cctxt
def scatter_gather_cells(context, cell_mappings, timeout, fn, *args, **kwargs):
"""Target cells in parallel and return their results.
The first parameter in the signature of the function to call for each cell
should be of type RequestContext.
:param context: The RequestContext for querying cells
:param cell_mappings: The CellMappings to target in parallel
:param timeout: The total time in seconds to wait for all the results to be
gathered
:param fn: The function to call for each cell
:param args: The args for the function to call for each cell, not including
the RequestContext
:param kwargs: The kwargs for the function to call for each cell
:returns: A dict {cell_uuid: result} containing the joined results. The
did_not_respond_sentinel will be returned if a cell did not
respond within the timeout. The raised_exception_sentinel will
be returned if the call to a cell raised an exception. The
exception will be logged.
"""
greenthreads = []
queue = eventlet.queue.LightQueue()
results = {}
def gather_result(cell_mapping, fn, context, *args, **kwargs):
cell_uuid = cell_mapping.uuid
try:
with target_cell(context, cell_mapping) as cctxt:
result = fn(cctxt, *args, **kwargs)
except Exception:
LOG.exception('Error gathering result from cell %s', cell_uuid)
result = raised_exception_sentinel
# The queue is already synchronized.
queue.put((cell_uuid, result))
for cell_mapping in cell_mappings:
greenthreads.append((cell_mapping.uuid,
utils.spawn(gather_result, cell_mapping,
fn, context, *args, **kwargs)))
with eventlet.timeout.Timeout(timeout, exception.CellTimeout):
try:
while len(results) != len(greenthreads):
cell_uuid, result = queue.get()
results[cell_uuid] = result
except exception.CellTimeout:
# NOTE(melwitt): We'll fill in did_not_respond_sentinels at the
# same time we kill/wait for the green threads.
pass
# Kill the green threads still pending and wait on those we know are done.
for cell_uuid, greenthread in greenthreads:
if cell_uuid not in results:
greenthread.kill()
results[cell_uuid] = did_not_respond_sentinel
LOG.warning('Timed out waiting for response from cell %s',
cell_uuid)
else:
greenthread.wait()
return results
def load_cells():
global CELLS
if not CELLS:
CELLS = objects.CellMappingList.get_all(get_admin_context())
LOG.debug('Found %(count)i cells: %(cells)s',
dict(count=len(CELLS),
cells=','.join([c.identity for c in CELLS])))
if not CELLS:
LOG.error('No cells are configured, unable to continue')
def scatter_gather_skip_cell0(context, fn, *args, **kwargs):
"""Target all cells except cell0 in parallel and return their results.
The first parameter in the signature of the function to call for
each cell should be of type RequestContext. There is a timeout for
waiting on all results to be gathered.
:param context: The RequestContext for querying cells
:param fn: The function to call for each cell
:param args: The args for the function to call for each cell, not including
the RequestContext
:param kwargs: The kwargs for the function to call for each cell
:returns: A dict {cell_uuid: result} containing the joined results. The
did_not_respond_sentinel will be returned if a cell did not
respond within the timeout. The raised_exception_sentinel will
be returned if the call to a cell raised an exception. The
exception will be logged.
"""
load_cells()
cell_mappings = [cell for cell in CELLS if not cell.is_cell0()]
return scatter_gather_cells(context, cell_mappings, CELL_TIMEOUT,
fn, *args, **kwargs)
def scatter_gather_single_cell(context, cell_mapping, fn, *args, **kwargs):
"""Target the provided cell and return its results or sentinels in case of
failure.
The first parameter in the signature of the function to call for each cell
should be of type RequestContext.
:param context: The RequestContext for querying cells
:param cell_mapping: The CellMapping to target
:param fn: The function to call for each cell
:param args: The args for the function to call for each cell, not including
the RequestContext
:param kwargs: The kwargs for the function to call for this cell
:returns: A dict {cell_uuid: result} containing the joined results. The
did_not_respond_sentinel will be returned if the cell did not
respond within the timeout. The raised_exception_sentinel will
be returned if the call to the cell raised an exception. The
exception will be logged.
"""
return scatter_gather_cells(context, [cell_mapping], CELL_TIMEOUT, fn,
*args, **kwargs)
def scatter_gather_all_cells(context, fn, *args, **kwargs):
"""Target all cells in parallel and return their results.
The first parameter in the signature of the function to call for
each cell should be of type RequestContext. There is a timeout for
waiting on all results to be gathered.
:param context: The RequestContext for querying cells
:param fn: The function to call for each cell
:param args: The args for the function to call for each cell, not including
the RequestContext
:param kwargs: The kwargs for the function to call for each cell
:returns: A dict {cell_uuid: result} containing the joined results. The
did_not_respond_sentinel will be returned if a cell did not
respond within the timeout. The raised_exception_sentinel will
be returned if the call to a cell raised an exception. The
exception will be logged.
"""
load_cells()
return scatter_gather_cells(context, CELLS, CELL_TIMEOUT,
fn, *args, **kwargs)
| apache-2.0 | -4,422,274,522,955,957,000 | 39.763441 | 81 | 0.636859 | false |
buzztroll/staccato | staccato/scheduler/simple_thread.py | 1 | 1673 | import time
import staccato.openstack.common.service as os_service
import staccato.xfer.events as s_events
import staccato.xfer.executor as s_executor
import staccato.xfer.constants as s_constants
from staccato.xfer.constants import Events
import staccato.db as s_db
class SimpleCountSchedler(os_service.Service):
def __init__(self, conf):
super(SimpleCountSchedler, self).__init__()
self.max_at_once = 1
self.db_obj = s_db.StaccatoDB(conf)
self.executor = s_executor.SimpleThreadExecutor(conf)
self.state_machine = s_events.XferStateMachine(self.executor)
self.running = 0
self.done = False
self._started_ids = []
def _poll_db(self):
while not self.done:
time.sleep(1)
self._check_for_transfers()
def _new_transfer(self, request):
self.running += 1
self._started_ids.append(request.id)
self.state_machine.event_occurred(Events.EVENT_START,
xfer_request=request,
db=self.db_obj)
def _transfer_complete(self):
self.running -= 1
def _check_for_transfers(self):
requests = self.db_obj.get_xfer_requests(self._started_ids)
for r in requests:
if s_constants.is_state_done_running(r.state):
self._started_ids.remove(r.id)
avail = self.max_at_once - len(self._started_ids)
xfer_request_ready = self.db_obj.get_all_ready(limit=avail)
for request in xfer_request_ready:
self._new_transfer(request)
def start(self):
self.tg.add_thread(self._poll_db)
| apache-2.0 | 3,352,977,460,077,363,700 | 33.142857 | 69 | 0.616856 | false |
smlng/bgp-stats | src/python/bgp-rib-stats.py | 1 | 13070 | #!/usr/bin/python
from __future__ import print_function
import argparse
import gzip
import os
import radix
import re
import sys
from bz2 import BZ2File
from datetime import datetime, timedelta
from multiprocessing import Process, Queue, cpu_count
from netaddr import IPSet
# own imports
import mrtx
verbose = False
warning = False
logging = False
re_file_rv = re.compile('rib.(\d+).(\d\d\d\d).bz2')
re_file_rr = re.compile('bview.(\d+).(\d\d\d\d).gz')
re_path_rv = re.compile('.*/([a-z0-9\.-]+)/bgpdata/\d\d\d\d.\d\d/RIBS.*')
re_path_rr = re.compile('.*/(rrc\d\d)/\d\d\d\d.\d\d.*')
reserved_ipv4 = IPSet (['0.0.0.0/8', # host on this network (RFC1122)
'10.0.0.0/8','172.16.0.0/12','192.168.0.0/16', # private address space (RFC1918)
'100.64.0.0/10', # shared address space (RFC6598)
'127.0.0.0/8', # loopback (RFC1122)
'169.254.0.0/16', # linklocal (RFC3927)
'192.0.0.0/24', # special purpose (RFC6890)
'192.0.0.0/29', # DS-lite (RFC6333)
'192.0.2.0/24','198.51.100.0/24','203.0.113.0/24', # test net 1-3 (RFC5737)
'224.0.0.0/4', # multicast address space
'240.0.0.0/4', # future use (RFC1122)
'255.255.255.255/32' # limited broadcast
])
existing_data = list()
'''
OUTPUT FORMAT:
timestamp|date ; input type (RIB|UPDATE) ; source (route-views.xyz| rrcXY) ; \
#ipv4-prefixes/pfxlength (1..32) ; #ipv4 moas ; #ipv4 bogus \
[; #ipv6-prefix/pfxlength ; #ipv6 moas ; #ipv6 bogus ]
NOTE:
- #ips covered can be derived from #pfx/pfx_len
'''
def print_log(*objs):
if logging or verbose:
print("[LOGS] .", *objs, file=sys.stdout)
def print_info(*objs):
if verbose:
print("[INFO] ..", *objs, file=sys.stdout)
def print_warn(*objs):
if warning or verbose:
print("[WARN] ", *objs, file=sys.stderr)
def print_error(*objs):
print("[ERROR] ", *objs, file=sys.stderr)
def loadPtree(fin):
print_log("call loadPtree (%s)" % (fin))
f = (BZ2File(fin, 'rb'), gzip.open(fin, 'rb'))[fin.lower().endswith('.gz')]
data = mrtx.parse_mrt_file(f, print_progress=verbose)
f.close()
ptree = radix.Radix()
for prefix, origins in data.items():
pnode = ptree.add(prefix)
pnode.data['asn'] = list()
for o in list(origins):
if o not in pnode.data['asn']:
pnode.data['asn'].append(str(o))
pnode.data['moas'] = len(pnode.data['asn'])
return ptree
# add num_pfx to stats
def getStats (ptree):
print_log("call getStats")
pfxlen = dict()
asn = dict()
num_pfx_moas = 0
# eval prefix tree
for p in ptree:
pl = int(p.prefixlen)
for a in p.data['asn']:
if a not in asn:
asn[a] = list()
asn[a].append(p.prefix)
if p.data['moas'] > 1:
num_pfx_moas += 1
if pl not in pfxlen:
pfxlen[pl] = list()
pfxlen[pl].append(p.prefix)
# asn results
num_asn = len(asn.keys())
num_asn_pfx = list()
num_asn_ips = list()
for a in asn:
num_asn_pfx.append(len(asn[a]))
num_asn_ips.append(len(IPSet(asn[a])))
# min, max, avg/mean, median
if len(num_asn_pfx) < 1:
num_asn_pfx.append(0)
if len(num_asn_ips) < 1:
num_asn_ips.append(0)
min_asn_pfx = min(num_asn_pfx)
max_asn_pfx = max(num_asn_pfx)
avg_asn_pfx = sum(num_asn_pfx)/len(num_asn_pfx)
med_asn_pfx = sorted(num_asn_pfx)[int(round(len(num_asn_pfx)/2))]
min_asn_ips = min(num_asn_ips)
max_asn_ips = max(num_asn_ips)
avg_asn_ips = sum(num_asn_ips)/len(num_asn_ips)
med_asn_ips = sorted(num_asn_ips)[int(round(len(num_asn_ips)/2))]
# prefix and ip results
pl_dict = dict()
for i in range(32): # init with all 0
pl_dict[i+1] = 0
for pl in pfxlen:
pl_dict[pl] = len(pfxlen[pl])
pkeys = sorted(pfxlen.keys(),reverse=False)
prefixIPs = IPSet()
for pk in pkeys:
print_info ("prefix length: "+str(pk)+", #prefixes: "+ str(len(pfxlen[pk])))
prefixIPs = prefixIPs | IPSet(pfxlen[pk])
num_bogus_ips = len(prefixIPs & reserved_ipv4)
num_pfx_ips = len(prefixIPs)
num_pfx = len(ptree.prefixes())
ret = list()
for i in range(32):
ret.append(pl_dict[i+1])
ret.extend([num_pfx,num_pfx_ips,num_bogus_ips,num_pfx_moas,num_asn])
ret.extend([min_asn_pfx,max_asn_pfx,avg_asn_pfx,med_asn_pfx])
ret.extend([min_asn_ips,max_asn_ips,avg_asn_ips,med_asn_ips])
return ret
stats_header = ["pl01","pl02","pl03","pl04","pl05","pl06","pl07","pl08",
"pl09","pl10","pl11","pl12","pl13","pl14","pl15","pl16",
"pl17","pl18","pl19","pl20","pl21","pl22","pl23","pl24",
"pl25","pl26","pl27","pl28","pl29","pl30","pl31","pl32",
"num_pfx","num_pfx_ips","num_bog_ips","num_pfx_moa","num_asn",
"min_asn_pfx","max_asn_pfx","avg_asn_pfx","med_asn_pfx",
"min_asn_ips","max_asn_ips","avg_asn_ips","med_asn_ips"]
def parseFilename(fin):
print_log("call parseFilename (%s)" % (fin))
maptype = 'none'
subtype = 'none'
pn, fn = os.path.split(fin)
if re_path_rr.match(pn):
m = re_path_rr.match(pn)
maptype = 'riperis'
subtype = m.group(1)
elif re_path_rv.match(pn):
m = re_path_rv.match(pn)
maptype = 'routeviews'
subtype = m.group(1)
else:
print_warn("Unknown BGP data source (pathname).")
date = '19700101'
time = '0000'
if re_file_rr.match(fn):
maptype = 'riperis'
m = re_file_rr.match(fn)
date = m.group(1)
time = m.group(2)
elif re_file_rv.match(fn):
maptype = 'routeviews'
m = re_file_rv.match(fn)
date = m.group(1)
time = m.group(2)
else:
print_warn("Unknown BGP data source (filename).")
dt = "%s-%s-%s %s:%s" % (str(date[0:4]),str(date[4:6]),str(date[6:8]),str(time[0:2]),str(time[2:4]))
ts = int((datetime.strptime(dt, "%Y-%m-%d %H:%M") - datetime(1970, 1, 1)).total_seconds())
return ts, maptype, subtype
def singleWorker(wd, fin):
print_log("call singleWorker(fin: %s)" % (fin))
ts0, mt0, st0 = parseFilename(fin)
if ts0 not in existing_data:
pt0 = loadPtree(fin)
stats = getStats(pt0)
dout = [ts0,mt0,st0]
dout.extend(stats)
outputStats(wd,dout)
else:
print_info("data set exists, skipping ...")
def statsThread(inq, outq):
print_log("start statsThread")
for fin in iter(inq.get, 'DONE'):
try:
ts0, mt0, st0 = parseFilename(fin)
if ts0 not in existing_data:
pt0 = loadPtree(fin)
stats = getStats(pt0)
dout = [ts0,mt0,st0]
dout.extend(stats)
outq.put(dout)
else:
print_info("data set exists, skipping ...")
except Exception, e:
print_error("%s failed on %s with: %s" % (current_process().name, url, e.message))
return True
def outputThread(outq, outf):
while True:
odata = outq.get()
if (odata == 'DONE'):
break
try:
outputStats(outf,odata)
except Exception, e:
print_error("%s failed on %s with: %s" % (current_process().name, url, e.message))
return True
output_header = ["# timestamp","maptype","subtype"]
output_header.extend(stats_header)
def outputStats (fout, dout):
output = ';'.join(str(x) for x in dout)
if fout:
with open(fout, "a+") as f:
f.write(output+'\n')
else:
print(output)
sys.stdout.flush()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--logging', help='Ouptut logging.', action='store_true')
parser.add_argument('-w', '--warning', help='Output warnings.', action='store_true')
parser.add_argument('-v', '--verbose', help='Verbose output with debug info, logging, and warnings.', action='store_true')
parser.add_argument('-t', '--threads', help='Use threads for parallel and faster processing.', action='store_true', default=False)
parser.add_argument('-n', '--numthreads', help='Set number of threads.', type=int, default=None)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-s', '--single', help='Process a single file, results are printed to STDOUT.')
group.add_argument('-b', '--bulk', help='Process a bunch of files in given directory (optional recursive).')
parser.add_argument('-r', '--recursive', help='Search directories recursivly if in bulk mode.', action='store_true')
parser.add_argument('-f', '--file', help='Write results to file.', default=None)
args = vars(parser.parse_args())
global verbose
verbose = args['verbose']
global warning
warning = args['warning']
global logging
logging = args['logging']
writedata = args['file']
if writedata and os.path.isfile(writedata): # read already written data
with open(writedata, "r") as f:
global existing_data
for line in f:
if line.startswith('#'):
continue
ts = line.split(';')[0].strip()
try:
existing_data.append(int(ts))
except:
print_error("Failure converting timestamp to integer!")
print_info(existing_data[0])
print_log("read %d data sets." % (len(existing_data)))
recursive = args['recursive']
threads = args['threads']
workers = args['numthreads']
if not workers:
workers = cpu_count() / 2
bulk = args['bulk']
single = args['single']
start_time = datetime.now()
print_log("START: " + start_time.strftime('%Y-%m-%d %H:%M:%S'))
if bulk:
print_log('mode: bulk')
if not (os.path.isdir(bulk)):
print_error("Invalid path for bulk processing!")
exit(1)
all_files = []
if recursive:
for dirpath, dirnames, filenames in os.walk(bulk):
for filename in [f for f in filenames if (re_file_rv.match(f) or re_file_rr.match(f))]:
all_files.append(os.path.join(dirpath, filename))
else:
for filename in [f for f in os.listdir(bulk) if (re_file_rv.match(f) or re_file_rr.match(f))]:
all_files.append(os.path.join(bulk, filename))
all_files.sort()
print_log("matching files: %d" % (len(all_files)))
if threads:
input_queue = Queue()
output_queue = Queue()
if len(existing_data) == 0: # write header if no existing data
output_queue.put(output_header)
processes = []
# fill input queue
for f in all_files:
input_queue.put(f)
# start workers to calc stats
for w in xrange(workers):
p = Process(target=statsThread, args=(input_queue,output_queue))
p.start()
processes.append(p)
input_queue.put('DONE')
# start output process to
output_p = Process(target=outputThread, args=(output_queue,writedata))
output_p.start()
for p in processes:
p.join()
output_queue.put('DONE')
output_p.join()
else:
for w in all_files:
singleWorker(writedata, w)
elif single:
print_log("mode: single")
if os.path.isfile(single):
ts0, mt0, st0 = parseFilename(os.path.abspath(single))
if ts0 not in existing_data:
pt0 = loadPtree(single)
stats = getStats(pt0)
dout = [ts0,mt0,st0]
dout.extend(stats)
outputStats(writedata, output_header)
outputStats(writedata, dout)
else:
print_info("data set exists, skipping ...")
else:
print_error("File not found (%s)!" % (single))
else:
print_error("Missing parameter: choose bulk or single mode!")
exit(1)
end_time = datetime.now()
print_log("FINISH: " + end_time.strftime('%Y-%m-%d %H:%M:%S'))
done_time = end_time - start_time
print_log(" processing time [s]: " + str(done_time.total_seconds()))
if __name__ == "__main__":
main() | mit | 6,659,661,468,522,400,000 | 34.137097 | 139 | 0.53619 | false |
jtb0/myPiProject | editConffile.py | 1 | 3739 | #!/usr/bin/env python
#coding: utf8
###############################################################################
# #
# python editConfFile.py </location/to/conffile.conf> <set/get> <section> <variable> <value> #
# #
###############################################################################
import sys
import ConfigParser
DEBUG="false"
true="true"
conffile=sys.argv[1]
if (DEBUG == true) : print "conffile:"
if (DEBUG == true) : print conffile
option=sys.argv[2]
if (DEBUG == true) : print "option:"
if (DEBUG == true) : print option
section=sys.argv[3]
if (DEBUG == true) : print "section"
if (DEBUG == true) : print section
variable=sys.argv[4]
if (DEBUG == true) : print "variable"
if (DEBUG == true) : print variable
value=sys.argv[5]
if (DEBUG == true) : print "value"
if (DEBUG == true) : print value
cp = ConfigParser.ConfigParser()
cp.read(conffile)
def optionSet(conffile, section, variable, value):
if (DEBUG == true) : print "set-Block:"
if (cp.has_section(section)):
cp.set(str(section), str(variable), str(value))
with open(str(conffile), 'w') as configfile:
cp.write(configfile)
else :
cp.add_section(section)
cp.set(str(section), str(variable), str(value))
with open(str(conffile), 'w') as configfile:
cp.write(configfile)
if (option == "set"): optionSet(conffile, section, variable, value)
def optionGet(conffile, section, variable):
if (DEBUG == true) : print "get-Block:"
print cp.get(str(section), str(variable))
return cp.get(str(section), str(variable))
if (DEBUG == true) : print "end"
if (option == "get"): optionGet(conffile, section, variable)
def optionAppend(conffile, section, variable, value):
if (DEBUG == true) : print "append-Block:"
try:
if (DEBUG == true) : print "try NoOptionError"
#try if there is already an entry at the configfile
cp.has_option(section, variable)
#if there is an entry read the list into the entity list1
list1 = list(eval(cp.get(section, variable), {}, {}))
if (DEBUG == true) : print "Hier kommt die Liste:"
if (DEBUG == true) : print list1
#append the value to the existing list
list1 = list(list1) + list([value])
if (DEBUG == true) : print list1
#persist the new list in the configfile
cp.set(str(section), str(variable), str(list1))
with open(str(conffile), 'w') as configfile:
cp.write(configfile)
except ConfigParser.NoOptionError:
if (DEBUG == true) : print "NoOptionError raised"
#if there is no entry for the variable at the conffile the entry will be done by the optionSet method with the value given as list object
optionSet(conffile, section, variable, list([value]))
if (DEBUG == true) : print "NoOptionError raised"
#optionAppend(conffile, section, variable, value)
#else:
if (option == "append"): optionAppend(conffile, section, variable, value)
if (option == "delete") :
if (DEBUG == true) : print "delete-Block:"
deleteList = [value]
if (cp.has_option(section, variable)):
list1 = eval(cp.get(section, variable), {}, {})
if (DEBUG == true) : print "Hier kommt die Liste:"
if (DEBUG == true) : print list1
for index, item in enumerate(list1):
if item in deleteList :
list1.pop(index)
if (DEBUG == true) : print list1
cp.set(str(section), str(variable), str(list1))
with open(str(conffile), 'w') as configfile:
cp.write(configfile)
| gpl-3.0 | -3,766,158,335,119,960,000 | 35.656863 | 138 | 0.577695 | false |
shadowgamefly/news-Digest | web/web/settings.py | 1 | 2732 | """
Django settings for web project.
Generated by 'django-admin startproject' using Django 1.8.14.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '07n8$i4x*gdwpwux6ehv*^598i6d=&4w@&di!gk$y^s+pe#x0='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'web',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'static/templates/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
| mit | 1,399,339,844,919,125,500 | 25.269231 | 71 | 0.689239 | false |
Azure/azure-sdk-for-python | sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_authentication.py | 1 | 4537 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_authentication.py
DESCRIPTION:
This sample demonstrates how to authenticate to the Form Recognizer service.
There are two supported methods of authentication:
1) Use a Form Recognizer API key with AzureKeyCredential from azure.core.credentials
2) Use a token credential from azure-identity to authenticate with Azure Active Directory
See more details about authentication here:
https://docs.microsoft.com/azure/cognitive-services/authentication
USAGE:
python sample_authentication.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Form Recognizer resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
3) AZURE_CLIENT_ID - the client ID of your active directory application.
4) AZURE_TENANT_ID - the tenant ID of your active directory application.
5) AZURE_CLIENT_SECRET - the secret of your active directory application.
"""
import os
class AuthenticationSample(object):
url = "https://raw.githubusercontent.com/Azure/azure-sdk-for-python/main/sdk/formrecognizer/azure-ai-formrecognizer/tests/sample_forms/forms/Form_1.jpg"
def authentication_with_api_key_credential_form_recognizer_client(self):
# [START create_fr_client_with_key]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import FormRecognizerClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(endpoint, AzureKeyCredential(key))
# [END create_fr_client_with_key]
poller = form_recognizer_client.begin_recognize_content_from_url(self.url)
result = poller.result()
def authentication_with_azure_active_directory_form_recognizer_client(self):
# [START create_fr_client_with_aad]
"""DefaultAzureCredential will use the values from these environment
variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET
"""
from azure.ai.formrecognizer import FormRecognizerClient
from azure.identity import DefaultAzureCredential
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
credential = DefaultAzureCredential()
form_recognizer_client = FormRecognizerClient(endpoint, credential)
# [END create_fr_client_with_aad]
poller = form_recognizer_client.begin_recognize_content_from_url(self.url)
result = poller.result()
def authentication_with_api_key_credential_form_training_client(self):
# [START create_ft_client_with_key]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import FormTrainingClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_training_client = FormTrainingClient(endpoint, AzureKeyCredential(key))
# [END create_ft_client_with_key]
properties = form_training_client.get_account_properties()
def authentication_with_azure_active_directory_form_training_client(self):
# [START create_ft_client_with_aad]
"""DefaultAzureCredential will use the values from these environment
variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET
"""
from azure.ai.formrecognizer import FormTrainingClient
from azure.identity import DefaultAzureCredential
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
credential = DefaultAzureCredential()
form_training_client = FormTrainingClient(endpoint, credential)
# [END create_ft_client_with_aad]
properties = form_training_client.get_account_properties()
if __name__ == '__main__':
sample = AuthenticationSample()
sample.authentication_with_api_key_credential_form_recognizer_client()
sample.authentication_with_azure_active_directory_form_recognizer_client()
sample.authentication_with_api_key_credential_form_training_client()
sample.authentication_with_azure_active_directory_form_training_client()
| mit | 5,170,395,117,075,362,000 | 44.37 | 156 | 0.707075 | false |
Miserlou/Zappa | tests/test_handler.py | 1 | 14534 | from mock import Mock
import sys
import unittest
from zappa.handler import LambdaHandler
from zappa.utilities import merge_headers
def no_args():
return
def one_arg(first):
return first
def two_args(first, second):
return first, second
def var_args(*args):
return args
def var_args_with_one(first, *args):
return first, args[0]
def unsupported(first, second, third):
return first, second, third
def raises_exception(*args, **kwargs):
raise Exception('app exception')
def handle_bot_intent(event, context):
return "Success"
mocked_exception_handler = Mock()
class TestZappa(unittest.TestCase):
def setUp(self):
mocked_exception_handler.reset_mock()
def tearDown(self):
LambdaHandler._LambdaHandler__instance = None
LambdaHandler.settings = None
LambdaHandler.settings_name = None
def test_run_function(self):
self.assertIsNone(LambdaHandler.run_function(no_args, 'e', 'c'))
self.assertEqual(LambdaHandler.run_function(one_arg, 'e', 'c'), 'e')
self.assertEqual(LambdaHandler.run_function(two_args, 'e', 'c'), ('e', 'c'))
self.assertEqual(LambdaHandler.run_function(var_args, 'e', 'c'), ('e', 'c'))
self.assertEqual(LambdaHandler.run_function(var_args_with_one, 'e', 'c'), ('e', 'c'))
try:
LambdaHandler.run_function(unsupported, 'e', 'c')
self.fail('Exception expected')
except RuntimeError as e:
pass
def test_run_fuction_with_type_hint(self):
scope = {}
exec('def f_with_type_hint() -> None: return', scope)
f_with_type_hint = scope['f_with_type_hint']
self.assertIsNone(LambdaHandler.run_function(f_with_type_hint, 'e', 'c'))
def test_wsgi_script_name_on_aws_url(self):
"""
Ensure that requests to the amazonaws.com host for an API with a
domain have the correct request.url
"""
lh = LambdaHandler('tests.test_wsgi_script_name_settings')
event = {
'body': '',
'resource': '/{proxy+}',
'requestContext': {},
'queryStringParameters': {},
'headers': {
'Host': '1234567890.execute-api.us-east-1.amazonaws.com',
},
'pathParameters': {
'proxy': 'return/request/url'
},
'httpMethod': 'GET',
'stageVariables': {},
'path': '/return/request/url'
}
response = lh.handler(event, None)
self.assertEqual(response['statusCode'], 200)
self.assertEqual(
response['body'],
'https://1234567890.execute-api.us-east-1.amazonaws.com/dev/return/request/url'
)
def test_wsgi_script_name_on_domain_url(self):
"""
Ensure that requests to the amazonaws.com host for an API with a
domain have the correct request.url
"""
lh = LambdaHandler('tests.test_wsgi_script_name_settings')
event = {
'body': '',
'resource': '/{proxy+}',
'requestContext': {},
'queryStringParameters': {},
'headers': {
'Host': 'example.com',
},
'pathParameters': {
'proxy': 'return/request/url'
},
'httpMethod': 'GET',
'stageVariables': {},
'path': '/return/request/url'
}
response = lh.handler(event, None)
self.assertEqual(response['statusCode'], 200)
self.assertEqual(
response['body'],
'https://example.com/return/request/url'
)
def test_wsgi_script_name_with_multi_value_header(self):
"""
Ensure that requests generated with multivalued headers (such as
from an ALB with Multi Valued Headers enabled) succeed.
"""
lh = LambdaHandler('tests.test_wsgi_script_name_settings')
event = {
'body': '',
'resource': '/{proxy+}',
'requestContext': {},
'queryStringParameters': {},
'multiValueHeaders': {
'Host': ['example.com'],
},
'pathParameters': {
'proxy': 'return/request/url'
},
'httpMethod': 'GET',
'stageVariables': {},
'path': '/return/request/url'
}
response = lh.handler(event, None)
self.assertEqual(response['statusCode'], 200)
self.assertIn('multiValueHeaders', response)
def test_wsgi_script_name_with_multi_value_querystring(self):
"""
Ensure that requests generated with multivalue querystrings succeed.
"""
lh = LambdaHandler('tests.test_wsgi_script_name_settings')
event = {
'body': '',
'resource': '/{proxy+}',
'requestContext': {},
'multiValueQueryStringParameters': {
'multi': ['value', 'qs']
},
'multiValueHeaders': {
'Host': ['example.com'],
},
'pathParameters': {
'proxy': 'return/request/url'
},
'httpMethod': 'GET',
'stageVariables': {},
'path': '/return/request/url'
}
response = lh.handler(event, None)
self.assertEqual(response['statusCode'], 200)
self.assertEqual(
response['body'],
'https://example.com/return/request/url?multi=value&multi=qs'
)
def test_wsgi_script_name_on_test_request(self):
"""
Ensure that requests sent by the "Send test request" button behaves
sensibly
"""
lh = LambdaHandler('tests.test_wsgi_script_name_settings')
event = {
'body': '',
'resource': '/{proxy+}',
'requestContext': {},
'queryStringParameters': {},
'headers': {},
'pathParameters': {
'proxy': 'return/request/url'
},
'httpMethod': 'GET',
'stageVariables': {},
'path': '/return/request/url'
}
response = lh.handler(event, None)
self.assertEqual(response['statusCode'], 200)
self.assertEqual(
response['body'],
'https://zappa:80/return/request/url'
)
def test_exception_handler_on_web_request(self):
"""
Ensure that app exceptions triggered by web requests use the exception_handler.
"""
lh = LambdaHandler('tests.test_exception_handler_settings')
event = {
'body': '',
'resource': '/{proxy+}',
'requestContext': {},
'queryStringParameters': {},
'headers': {
'Host': '1234567890.execute-api.us-east-1.amazonaws.com',
},
'pathParameters': {
'proxy': 'return/request/url'
},
'httpMethod': 'GET',
'stageVariables': {},
'path': '/return/request/url'
}
mocked_exception_handler.assert_not_called()
response = lh.handler(event, None)
self.assertEqual(response['statusCode'], 500)
mocked_exception_handler.assert_called()
def test_wsgi_script_on_cognito_event_request(self):
"""
Ensure that requests sent by cognito behave sensibly
"""
lh = LambdaHandler('tests.test_wsgi_script_name_settings')
event = {'version': '1',
'region': 'eu-west-1',
'userPoolId': 'region_poolID',
'userName': 'uuu-id-here',
'callerContext': {'awsSdkVersion': 'aws-sdk-js-2.149.0',
'clientId': 'client-id-here'},
'triggerSource': 'PreSignUp_SignUp',
'request': {'userAttributes':
{'email': '[email protected]'}, 'validationData': None},
'response': {'autoConfirmUser': False,
'autoVerifyEmail': False,
'autoVerifyPhone': False}}
response = lh.handler(event, None)
self.assertEqual(response['response']['autoConfirmUser'], False)
def test_bot_triggered_event(self):
"""
Ensure that bot triggered events are handled as in the settings
"""
lh = LambdaHandler('tests.test_bot_handler_being_triggered')
# from : https://docs.aws.amazon.com/lambda/latest/dg/eventsources.html#eventsources-lex
event = {
"messageVersion": "1.0",
"invocationSource": "DialogCodeHook",
"userId": "user-id specified in the POST request to Amazon Lex.",
"sessionAttributes": {
"key1": "value1",
"key2": "value2",
},
"bot": {
"name": "bot-name",
"alias": "bot-alias",
"version": "bot-version"
},
"outputDialogMode": "Text or Voice, based on ContentType request header in runtime API request",
"currentIntent": {
"name": "intent-name",
"slots": {
"slot-name": "value",
"slot-name": "value",
"slot-name": "value"
},
"confirmationStatus": "None, Confirmed, or Denied (intent confirmation, if configured)"
}
}
response = lh.handler(event, None)
self.assertEqual(response, 'Success')
def test_exception_in_bot_triggered_event(self):
"""
Ensure that bot triggered exceptions are handled as defined in the settings.
"""
lh = LambdaHandler('tests.test_bot_exception_handler_settings')
# from : https://docs.aws.amazon.com/lambda/latest/dg/eventsources.html#eventsources-lex
event = {
"messageVersion": "1.0",
"invocationSource": "DialogCodeHook",
"userId": "user-id specified in the POST request to Amazon Lex.",
"sessionAttributes": {
"key1": "value1",
"key2": "value2",
},
"bot": {
"name": "bot-name",
"alias": "bot-alias",
"version": "bot-version"
},
"outputDialogMode": "Text or Voice, based on ContentType request header in runtime API request",
"currentIntent": {
"name": "intent-name",
"slots": {
"slot-name": "value",
"slot-name": "value",
"slot-name": "value"
},
"confirmationStatus": "None, Confirmed, or Denied (intent confirmation, if configured)"
}
}
response = lh.lambda_handler(event, None)
mocked_exception_handler.assert_called
def test_wsgi_script_name_on_alb_event(self):
"""
Ensure ALB-triggered events are properly handled by LambdaHandler
ALB-forwarded events have a slightly different request structure than API-Gateway
https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html
"""
lh = LambdaHandler('tests.test_wsgi_script_name_settings')
event = {
'requestContext': {
'elb': {
'targetGroupArn': 'arn:aws:elasticloadbalancing:region:123456789012:targetgroup/my-target-group/6d0ecf831eec9f09'
}
},
'httpMethod': 'GET',
'path': '/return/request/url',
'queryStringParameters': {},
'headers': {
'accept': 'text/html,application/xhtml+xml',
'accept-language': 'en-US,en;q=0.8',
'content-type': 'text/plain',
'cookie': 'cookies',
'host': '1234567890.execute-api.us-east-1.amazonaws.com',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6)',
'x-amzn-trace-id': 'Root=1-5bdb40ca-556d8b0c50dc66f0511bf520',
'x-forwarded-for': '72.21.198.66',
'x-forwarded-port': '443',
'x-forwarded-proto': 'https'
},
'isBase64Encoded': False,
'body': ''
}
response = lh.handler(event, None)
self.assertEqual(response['statusCode'], 200)
self.assertEqual(response['statusDescription'], '200 OK')
self.assertEqual(response['isBase64Encoded'], False)
self.assertEqual(
response['body'],
'https://1234567890.execute-api.us-east-1.amazonaws.com/return/request/url'
)
def test_merge_headers_no_multi_value(self):
event = {
'headers': {
'a': 'b'
}
}
merged = merge_headers(event)
self.assertEqual(merged['a'], 'b')
def test_merge_headers_combine_values(self):
event = {
'headers': {
'a': 'b',
'z': 'q'
},
'multiValueHeaders': {
'a': ['c'],
'x': ['y']
}
}
merged = merge_headers(event)
self.assertEqual(merged['a'], 'c')
self.assertEqual(merged['x'], 'y')
self.assertEqual(merged['z'], 'q')
def test_merge_headers_no_single_value(self):
event = {
'multiValueHeaders': {
'a': ['c', 'd'],
'x': ['y', 'z', 'f']
}
}
merged = merge_headers(event)
self.assertEqual(merged['a'], 'c, d')
self.assertEqual(merged['x'], 'y, z, f')
def test_cloudwatch_subscription_event(self):
"""
Test that events sent in the format used by CloudWatch logs via
subscription filters are handled properly.
The actual payload that Lambda receives is in the following format
{ "awslogs": {"data": "BASE64ENCODED_GZIP_COMPRESSED_DATA"} }
https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/SubscriptionFilters.html
"""
lh = LambdaHandler('tests.test_event_script_settings')
event = {
'awslogs': {
'data': "some-data-not-important-for-test"
}
}
response = lh.handler(event, None)
self.assertEqual(response, True)
| mit | 5,811,482,622,813,922,000 | 32.488479 | 133 | 0.521605 | false |
christiancg/indoor | authenticationdecorator.py | 1 | 1123 | from functools import wraps
from flask import request, Response
from modelos import Usuario
from logger import Logger
log = Logger(__name__)
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
try:
usr = Usuario.query.filter(Usuario.nombre == username).first()
if usr is None:
return False
elif usr.password != password:
return False
else:
return True
except Exception, ex:
log.exception(ex)
return False
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(response={ 'status':
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials'}, status=401,
headers={'WWW-Authenticate': 'Basic realm="Login Required"'},
mimetype='application/json')
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
| apache-2.0 | 454,912,826,066,127,740 | 27.794872 | 68 | 0.680321 | false |
hyperized/ansible | lib/ansible/modules/cloud/azure/azure_rm_lock_info.py | 1 | 8055 | #!/usr/bin/python
#
# Copyright (c) 2019 Yuwei Zhou, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_lock_info
version_added: "2.9"
short_description: Manage Azure locks
description:
- Create, delete an Azure lock.
options:
name:
description:
- Name of the lock.
type: str
required: true
managed_resource_id:
description:
- ID of the resource where need to manage the lock.
- Get this via facts module.
- Cannot be set mutal with I(resource_group).
- Manage subscription if both I(managed_resource_id) and I(resource_group) not defined.
- "'/subscriptions/{subscriptionId}' for subscriptions."
- "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups."
- "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources."
- Can get all locks with 'child scope' for this resource, use I(managed_resource_id) in response for further management.
type: str
resource_group:
description:
- Resource group name where need to manage the lock.
- The lock is in the resource group level.
- Cannot be set mutal with I(managed_resource_id).
- Query subscription if both I(managed_resource_id) and I(resource_group) not defined.
- Can get all locks with 'child scope' in this resource group, use the I(managed_resource_id) in response for further management.
type: str
extends_documentation_fragment:
- azure
author:
- Yuwei Zhou (@yuwzho)
'''
EXAMPLES = '''
- name: Get myLock details of myVM
azure_rm_lock_info:
name: myLock
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM
- name: List locks of myVM
azure_rm_lock_info:
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM
- name: List locks of myResourceGroup
azure_rm_lock_info:
resource_group: myResourceGroup
- name: List locks of myResourceGroup
azure_rm_lock_info:
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup
- name: List locks of mySubscription
azure_rm_lock_info:
- name: List locks of mySubscription
azure_rm_lock_info:
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
'''
RETURN = '''
locks:
description:
- List of locks dicts.
returned: always
type: complex
contains:
id:
description:
- ID of the Lock.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Authorization/locks/myLock"
name:
description:
- Name of the lock.
returned: always
type: str
sample: myLock
level:
description:
- Type level of the lock.
returned: always
type: str
sample: can_not_delete
notes:
description:
- Notes of the lock added by creator.
returned: always
type: str
sample: "This is a lock"
''' # NOQA
import json
import re
from ansible.module_utils.common.dict_transformations import _camel_to_snake
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMLockInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
managed_resource_id=dict(type='str')
)
self.results = dict(
changed=False,
locks=[]
)
mutually_exclusive = [['resource_group', 'managed_resource_id']]
self.name = None
self.resource_group = None
self.managed_resource_id = None
self._mgmt_client = None
self._query_parameters = {'api-version': '2016-09-01'}
self._header_parameters = {'Content-Type': 'application/json; charset=utf-8'}
super(AzureRMLockInfo, self).__init__(self.module_arg_spec, facts_module=True, mutually_exclusive=mutually_exclusive, supports_tags=False)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_lock_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_lock_facts' module has been renamed to 'azure_rm_lock_info'", version='2.13')
for key in self.module_arg_spec.keys():
setattr(self, key, kwargs[key])
self._mgmt_client = self.get_mgmt_svc_client(GenericRestClient, base_url=self._cloud_environment.endpoints.resource_manager)
changed = False
# construct scope id
scope = self.get_scope()
url = '/{0}/providers/Microsoft.Authorization/locks'.format(scope)
if self.name:
url = '{0}/{1}'.format(url, self.name)
locks = self.list_locks(url)
resp = locks.get('value') if 'value' in locks else [locks]
self.results['locks'] = [self.to_dict(x) for x in resp]
return self.results
def to_dict(self, lock):
resp = dict(
id=lock['id'],
name=lock['name'],
level=_camel_to_snake(lock['properties']['level']),
managed_resource_id=re.sub('/providers/Microsoft.Authorization/locks/.+', '', lock['id'])
)
if lock['properties'].get('notes'):
resp['notes'] = lock['properties']['notes']
if lock['properties'].get('owners'):
resp['owners'] = [x['application_id'] for x in lock['properties']['owners']]
return resp
def list_locks(self, url):
try:
resp = self._mgmt_client.query(url=url,
method='GET',
query_parameters=self._query_parameters,
header_parameters=self._header_parameters,
body=None,
expected_status_codes=[200],
polling_timeout=None,
polling_interval=None)
return json.loads(resp.text)
except CloudError as exc:
self.fail('Error when finding locks {0}: {1}'.format(url, exc.message))
def get_scope(self):
'''
Get the resource scope of the lock management.
'/subscriptions/{subscriptionId}' for subscriptions,
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups,
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources.
'''
if self.managed_resource_id:
return self.managed_resource_id
elif self.resource_group:
return '/subscriptions/{0}/resourcegroups/{1}'.format(self.subscription_id, self.resource_group)
else:
return '/subscriptions/{0}'.format(self.subscription_id)
def main():
AzureRMLockInfo()
if __name__ == '__main__':
main()
| gpl-3.0 | 6,295,349,006,659,803,000 | 35.121076 | 156 | 0.611546 | false |
CanaimaGNULinux/canaimagnulinux.userdata | canaimagnulinux/userdata/userdataschema.py | 1 | 6237 | # -*- coding: utf-8 -*-
from canaimagnulinux.userdata import _
from plone.app.users.userdataschema import IUserDataSchema
from plone.app.users.userdataschema import IUserDataSchemaProvider
from zope import schema
from zope.interface import implements
from zope.schema import ValidationError
class TermsNotAccepted(ValidationError):
__doc__ = _(u'You must accept the terms and conditions for registering an account!')
def validateAccept(value):
""" Validate if accepted the terms of use for this site. """
# if value is not True:
# return False
if not value:
raise TermsNotAccepted(value)
return True
def getCommonTerms():
""" Get the common terms of use for this site. """
commonterms_url = 'terminos-y-convenios/condiciones-generales-miembros'
return commonterms_url
class UserDataSchemaProvider(object):
implements(IUserDataSchemaProvider)
def getSchema(self):
""" Get custom User Data Squema """
return IEnhancedUserDataSchema
class IEnhancedUserDataSchema(IUserDataSchema):
""" Use all the fields from the default user data schema,
and add various extra fields.
"""
firstname = schema.TextLine(
title=_(u'label_firstname', default=u'First name'),
description=_(u'help_firstname',
default=u'Fill in your given name.'),
required=True,)
lastname = schema.TextLine(
title=_(u'label_lastname', default=u'Last name'),
description=_(u'help_lastname',
default=u'Fill in your surname or your family name.'),
required=True,)
gender = schema.Choice(
title=_(u'label_gender', default=u'Gender'),
description=_(u'help_gender',
default=u'Male / Female?'),
values=['Male', 'Female'],
required=True,)
birthdate = schema.Date(
title=_(u'label_birthdate', default=u'Birthdate'),
description=_(u'help_birthdate',
default=u'Your date of birth, in the format dd-mm-yyyy'),
required=False,)
mobile = schema.TextLine(
title=_(u'label_mobile', default=u'Mobile'),
description=_(u'help_mobile',
default=u'Fill in your mobile number.'),
required=False,)
officephone = schema.TextLine(
title=_(u'label_officephone', default=u'Office number'),
description=_(u'help_officephone',
default=u'Fill in your office number.'),
required=False,)
irc = schema.TextLine(
title=_(u'label_irc', default=u'IRC nickname'),
description=_(u'help_irc',
default=u'Fill in your IRC nickname.'),
required=False,)
telegram = schema.TextLine(
title=_(u'label_telegram', default=u'Telegram account'),
description=_(u'help_telegram',
default=u'Fill in your Telegram account, in the format @telegram-nickname'),
required=False,)
skype = schema.TextLine(
title=_(u'label_skype', default=u'Skype account'),
description=_(u'help_skype',
default=u'Fill in your skype account.'),
required=False,)
twitter = schema.TextLine(
title=_(u'label_twitter', default=u'Twitter account'),
description=_(u'help_twitter',
default=u'Fill in your Twitter account.'),
required=False,)
instagram = schema.TextLine(
title=_(u'label_instagram', default=u'Instagram account'),
description=_(u'help_instagram',
default=u'Fill in your Instagram account.'),
required=False,)
facebook = schema.TextLine(
title=_(u'label_facebook', default=u'Facebook account'),
description=_(u'help_facebook',
default=u'Fill in your Facebook account.'),
required=False,)
country = schema.TextLine(
title=_(u'label_country', default=u'Country'),
description=_(u'help_country',
default=u'Fill in the country you live in.'),
required=False,)
city = schema.TextLine(
title=_(u'label_city', default=u'City'),
description=_(u'help_city',
default=u'Fill in the city you live in.'),
required=False,)
institution = schema.TextLine(
title=_(u'label_institution', default=u'Institution / Organization'),
description=_(u'help_institution',
default=u'Fill in the institution where you work.'),
required=False,)
instadd = schema.TextLine(
title=_(u'label_instadd', default=u'Institution address'),
description=_(u'help_instadd',
default=u'Fill in the address of the institution where you work.'),
required=False,)
position = schema.TextLine(
title=_(u'label_position', default=u'Current position'),
description=_(u'help_instadd',
default=u'Fill in the current position.'),
required=False,)
profession = schema.TextLine(
title=_(u'label_profession', default=u'Profession'),
description=_(u'help_profession',
default=u'Fill in your profession.'),
required=False,)
# newsletter = schema.Bool(
# title=_(u'label_newsletter', default=u'Subscribe to newsletter'),
# description=_(u'help_newsletter',
# default=u'If you tick this box, we'll subscribe you to "
# "our newsletter.'),
# required=False,)
accept = schema.Bool(
title=_(u'label_accept', default=u'Accept terms of use'),
description=_(u'help_accept',
default=u'Tick this box to indicate that you have found, read and accepted the '
'<a id=\'commonterms\' target=\'_blank\' href=\'' + getCommonTerms() + '\' title=\'Terms of use for this site.\'>terms of use</a> for this site.'),
# description=_(u'help_accept',
# default=u'Tick this box to indicate that you have found,'
# ' read and accepted the terms of use for this site. '),
required=False,
constraint=validateAccept,)
| gpl-2.0 | -6,964,262,227,185,304,000 | 39.764706 | 169 | 0.601571 | false |
jessepeterson/commandment | commandment/dep/__init__.py | 1 | 3899 | from typing import Set, Dict
from enum import Enum
class SetupAssistantStep(Enum):
"""This enumeration contains all possible steps of Setup Assistant that can be skipped.
See Also:
- `DEP Web Services: Define Profile <https://developer.apple.com/library/content/documentation/Miscellaneous/Reference/MobileDeviceManagementProtocolRef/4-Profile_Management/ProfileManagement.html#//apple_ref/doc/uid/TP40017387-CH7-SW30>`_.
"""
"""Skips Apple ID setup."""
AppleID = 'AppleID'
"""Skips Touch ID setup."""
Biometric = 'Biometric'
"""Disables automatically sending diagnostic information."""
Diagnostics = 'Diagnostics'
"""Skips DisplayTone setup."""
DisplayTone = 'DisplayTone'
"""Disables Location Services."""
Location = 'Location'
"""Hides and disables the passcode pane."""
Passcode = 'Passcode'
"""Skips Apple Pay setup."""
Payment = 'Payment'
"""Skips privacy pane."""
Privacy = 'Privacy'
"""Disables restoring from backup."""
Restore = 'Restore'
SIMSetup = 'SIMSetup'
"""Disables Siri."""
Siri = 'Siri'
"""Skips Terms and Conditions."""
TOS = 'TOS'
"""Skips zoom setup."""
Zoom = 'Zoom'
"""If the Restore pane is not skipped, removes Move from Android option from it."""
Android = 'Android'
"""Skips the Home Button screen in iOS."""
HomeButtonSensitivity = 'HomeButtonSensitivity'
"""Skips on-boarding informational screens for user education (“Cover Sheet, Multitasking & Control Center”,
for example) in iOS."""
iMessageAndFaceTime = 'iMessageAndFaceTime'
"""Skips the iMessage and FaceTime screen in iOS."""
OnBoarding = 'OnBoarding'
"""Skips the screen for Screen Time in iOS."""
ScreenTime = 'ScreenTime'
"""Skips the mandatory software update screen in iOS."""
SoftwareUpdate = 'SoftwareUpdate'
"""Skips the screen for watch migration in iOS."""
WatchMigration = 'WatchMigration'
"""Skips the Choose Your Look screen in macOS."""
Appearance = 'Appearance'
"""Disables FileVault Setup Assistant screen in macOS."""
FileVault = 'FileVault'
"""Skips iCloud Analytics screen in macOS."""
iCloudDiagnostics = 'iCloudDiagnostics'
"""Skips iCloud Documents and Desktop screen in macOS."""
iCloudStorage = 'iCloudStorage'
"""Disables registration screen in macOS"""
Registration = 'Registration'
# ATV
"""Skips the tvOS screen about using aerial screensavers in ATV."""
ScreenSaver = 'ScreenSaver'
"""Skips the Tap To Set Up option in ATV about using an iOS device to set up your ATV (instead of entering all
your account information and setting choices separately)."""
TapToSetup = 'TapToSetup'
"""Skips TV home screen layout sync screen in tvOS."""
TVHomeScreenSync = 'TVHomeScreenSync'
"""Skips the TV provider sign in screen in tvOS."""
TVProviderSignIn = 'TVProviderSignIn'
"""Skips the “Where is this Apple TV?” screen in tvOS."""
TVRoom = 'TVRoom'
SkipSetupSteps = Set[SetupAssistantStep]
class DEPProfileRemovalStatus(Enum):
SUCCESS = "SUCCESS"
NOT_ACCESSIBLE = "NOT_ACCESSIBLE"
FAILED = "FAILED"
SerialNumber = str
DEPProfileRemovals = Dict[SerialNumber, DEPProfileRemovalStatus]
class DEPOrgType(Enum):
"""This enum specifies allowable values for the ``org_type`` field of the dep /account endpoint."""
Education = 'edu'
Organization = 'org'
class DEPOrgVersion(Enum):
"""This enum specifies allowable values for the ``org_version`` field of the dep /account endpoint."""
v1 = 'v1' # Apple Deployment Programmes
v2 = 'v2' # Apple School Manager
class DEPOperationType(Enum):
"""This enum describes the types of operations returned in a DEP Sync Devices result."""
Added = 'added'
Modified = 'modified'
Deleted = 'deleted'
| mit | -1,331,331,849,121,758,200 | 36.057143 | 250 | 0.682087 | false |
ad510/find_pennapps_hackers | find_pennapps_hackers.py | 1 | 3884 | #!/usr/bin/env python3
import http.client
import re
import sys
import time
def main():
# print info about me :)
print("Andrew Downing")
print("website: http://andrewd.50webs.com")
print("github: ad510")
print()
# find twitter usernames
twitterUsers = set(findbetween(gethttp("twitter.com", "/search?q=%23PennApps", True), "data-screen-name=\"", "\""))
for twitterUser in twitterUsers:
name = ""
domains = set()
githubUsers = set()
html = gethttp("twitter.com", "/" + twitterUser, True)
# find real name
nameFields = findbetween(html, "<span class=\"profile-field\">", "</span>")
if len(nameFields) > 0:
name = nameFields[0]
print(name)
print("twitter: " + twitterUser)
# find website domains
for url in findurls(html):
url2 = url[:len(url) - 1] if url.endswith("/") else url
if url2.find("twitter.com") == -1 and url2.find("twimg.com") == -1 and (url2.endswith(".com") or url2.endswith(".org") or url2.endswith(".net")):
domains.add(url2)
elif url.find("github.com") != -1:
githubUsers.add(url)
if len(domains) > 0:
print("website: " + str(domains))
# find github accounts
if "--duckduckgo" in sys.argv:
# duckduckgo
html = ""
try:
html = gethttp("duckduckgo.com", "/html/?q=site:github.com " + name, True)
except:
print("error searching 'site:github.com " + name + "'")
for url in findlinks(html):
if url.find("https://github.com/") != -1 and url.count("/") == 3:
githubUsers.add(url.split("github.com/")[1].split("/")[0])
time.sleep(2)
else:
# google
for url in findlinks(gethttp("www.google.com", "/search?q=site:github.com+" + name.replace(" ", "+"), True)):
if url.startswith("/url?q=https://github.com/") and url.count("/") == 4:
githubUsers.add(findbetween(url, "/url?q=https://github.com/", "&")[0].split("%")[0])
# find in website
for domain in domains:
for url in findlinks(gethttpsmart(domain)):
if (url.find("github.com/") != -1):
githubUsers.add(url.split("github.com/")[1].split("/")[0])
if len(githubUsers) > 0:
print("github: " + str(githubUsers))
print()
def gethttpsmart(url):
minusProtocol = url[url.find("//") + 2 : ]
if minusProtocol.find("/") == -1:
minusProtocol += "/"
return gethttp(minusProtocol.split("/")[0], "/" + minusProtocol.split("/")[1], url.startswith("https"))
def gethttp(domain, url, https):
#print(domain, url, https)
conn = http.client.HTTPSConnection(domain) if https else http.client.HTTPConnection(domain)
conn.request("GET", url)
r1 = conn.getresponse()
if (r1.status == 301 or r1.status == 302) and url != "/sorry":
return gethttpsmart(r1.getheader("Location")) # got a "moved permanently" error
elif r1.status != 200:
print("non-normal status connecting to", domain, url, r1.status, r1.reason)
r1str = str(r1.read())
conn.close()
return r1str
def findbetween(string, before, after):
ret = []
for match in re.finditer(re.escape(before), string):
ret.append(string[match.start() + len(before) : string.find(after, match.start() + len(before))])
return ret
def findurls(string): # thanks to https://stackoverflow.com/questions/6883049/regex-to-find-urls-in-string-in-python
return re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string)
def findlinks(string):
return re.findall('href="?\'?([^"\'>]*)', string)
if __name__ == "__main__":
main()
| mit | -4,278,235,120,225,218,000 | 40.319149 | 157 | 0.558445 | false |
dhowland/EasyAVR | keymapper/easykeymap/kleparse.py | 1 | 9205 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#
# Easy AVR USB Keyboard Firmware Keymapper
# Copyright (C) 2013-2017 David Howland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Parse JSON files from http://www.keyboard-layout-editor.com and convert
to an EasyAVR layout data structure. The conversion is not complete, because
layouts from KLE don't contain enough information to completely define a board.
This whole thing is a total hack-job. It is not meant to be a perfect solution,
it is only meant to be a quick way to start adding support for a new board.
"""
import json
from .build import NULL_SYMBOL
# All default legends from the ANSI104 and ISO105 predefined layouts.
conversion_table = {
"A": "HID_KEYBOARD_SC_A",
"B": "HID_KEYBOARD_SC_B",
"C": "HID_KEYBOARD_SC_C",
"D": "HID_KEYBOARD_SC_D",
"E": "HID_KEYBOARD_SC_E",
"F": "HID_KEYBOARD_SC_F",
"G": "HID_KEYBOARD_SC_G",
"H": "HID_KEYBOARD_SC_H",
"I": "HID_KEYBOARD_SC_I",
"J": "HID_KEYBOARD_SC_J",
"K": "HID_KEYBOARD_SC_K",
"L": "HID_KEYBOARD_SC_L",
"M": "HID_KEYBOARD_SC_M",
"N": "HID_KEYBOARD_SC_N",
"O": "HID_KEYBOARD_SC_O",
"P": "HID_KEYBOARD_SC_P",
"Q": "HID_KEYBOARD_SC_Q",
"R": "HID_KEYBOARD_SC_R",
"S": "HID_KEYBOARD_SC_S",
"T": "HID_KEYBOARD_SC_T",
"U": "HID_KEYBOARD_SC_U",
"V": "HID_KEYBOARD_SC_V",
"W": "HID_KEYBOARD_SC_W",
"X": "HID_KEYBOARD_SC_X",
"Y": "HID_KEYBOARD_SC_Y",
"Z": "HID_KEYBOARD_SC_Z",
"!\n1": "HID_KEYBOARD_SC_1_AND_EXCLAMATION",
"@\n2": "HID_KEYBOARD_SC_2_AND_AT",
"\"\n2": "HID_KEYBOARD_SC_2_AND_AT",
"#\n3": "HID_KEYBOARD_SC_3_AND_HASHMARK",
"£\n3": "HID_KEYBOARD_SC_3_AND_HASHMARK",
"$\n4": "HID_KEYBOARD_SC_4_AND_DOLLAR",
"%\n5": "HID_KEYBOARD_SC_5_AND_PERCENTAGE",
"^\n6": "HID_KEYBOARD_SC_6_AND_CARET",
"&\n7": "HID_KEYBOARD_SC_7_AND_AND_AMPERSAND",
"*\n8": "HID_KEYBOARD_SC_8_AND_ASTERISK",
"(\n9": "HID_KEYBOARD_SC_9_AND_OPENING_PARENTHESIS",
")\n0": "HID_KEYBOARD_SC_0_AND_CLOSING_PARENTHESIS",
"Enter": "HID_KEYBOARD_SC_ENTER",
"Esc": "HID_KEYBOARD_SC_ESCAPE",
"Backspace": "HID_KEYBOARD_SC_BACKSPACE",
"Tab": "HID_KEYBOARD_SC_TAB",
" ": "HID_KEYBOARD_SC_SPACE",
"_\n-": "HID_KEYBOARD_SC_MINUS_AND_UNDERSCORE",
"+\n=": "HID_KEYBOARD_SC_EQUAL_AND_PLUS",
"{\n[": "HID_KEYBOARD_SC_OPENING_BRACKET_AND_OPENING_BRACE",
"}\n]": "HID_KEYBOARD_SC_CLOSING_BRACKET_AND_CLOSING_BRACE",
"|\n\\": "HID_KEYBOARD_SC_BACKSLASH_AND_PIPE",
"~\n#": "HID_KEYBOARD_SC_NON_US_HASHMARK_AND_TILDE",
":\n;": "HID_KEYBOARD_SC_SEMICOLON_AND_COLON",
"\"\n'": "HID_KEYBOARD_SC_APOSTROPHE_AND_QUOTE",
"@\n'": "HID_KEYBOARD_SC_APOSTROPHE_AND_QUOTE",
"~\n`": "HID_KEYBOARD_SC_GRAVE_ACCENT_AND_TILDE",
"¬\n`": "HID_KEYBOARD_SC_GRAVE_ACCENT_AND_TILDE",
"<\n,": "HID_KEYBOARD_SC_COMMA_AND_LESS_THAN_SIGN",
">\n.": "HID_KEYBOARD_SC_DOT_AND_GREATER_THAN_SIGN",
"?\n/": "HID_KEYBOARD_SC_SLASH_AND_QUESTION_MARK",
"Caps Lock": "HID_KEYBOARD_SC_CAPS_LOCK",
"F1": "HID_KEYBOARD_SC_F1",
"F2": "HID_KEYBOARD_SC_F2",
"F3": "HID_KEYBOARD_SC_F3",
"F4": "HID_KEYBOARD_SC_F4",
"F5": "HID_KEYBOARD_SC_F5",
"F6": "HID_KEYBOARD_SC_F6",
"F7": "HID_KEYBOARD_SC_F7",
"F8": "HID_KEYBOARD_SC_F8",
"F9": "HID_KEYBOARD_SC_F9",
"F10": "HID_KEYBOARD_SC_F10",
"F11": "HID_KEYBOARD_SC_F11",
"F12": "HID_KEYBOARD_SC_F12",
"PrtSc": "HID_KEYBOARD_SC_PRINT_SCREEN",
"Scroll Lock": "HID_KEYBOARD_SC_SCROLL_LOCK",
"Pause\nBreak": "HID_KEYBOARD_SC_PAUSE",
"Insert": "HID_KEYBOARD_SC_INSERT",
"Home": "HID_KEYBOARD_SC_HOME",
"PgUp": "HID_KEYBOARD_SC_PAGE_UP",
"Delete": "HID_KEYBOARD_SC_DELETE",
"End": "HID_KEYBOARD_SC_END",
"PgDn": "HID_KEYBOARD_SC_PAGE_DOWN",
"→": "HID_KEYBOARD_SC_RIGHT_ARROW",
"←": "HID_KEYBOARD_SC_LEFT_ARROW",
"↓": "HID_KEYBOARD_SC_DOWN_ARROW",
"↑": "HID_KEYBOARD_SC_UP_ARROW",
"Num Lock": "HID_KEYBOARD_SC_NUM_LOCK",
"/": "HID_KEYBOARD_SC_KEYPAD_SLASH",
"*": "HID_KEYBOARD_SC_KEYPAD_ASTERISK",
"-": "HID_KEYBOARD_SC_KEYPAD_MINUS",
"+": "HID_KEYBOARD_SC_KEYPAD_PLUS",
"kpEnter": "HID_KEYBOARD_SC_KEYPAD_ENTER",
"1\nEnd": "HID_KEYBOARD_SC_KEYPAD_1_AND_END",
"2\n↓": "HID_KEYBOARD_SC_KEYPAD_2_AND_DOWN_ARROW",
"3\nPgDn": "HID_KEYBOARD_SC_KEYPAD_3_AND_PAGE_DOWN",
"4\n←": "HID_KEYBOARD_SC_KEYPAD_4_AND_LEFT_ARROW",
"5": "HID_KEYBOARD_SC_KEYPAD_5",
"6\n→": "HID_KEYBOARD_SC_KEYPAD_6_AND_RIGHT_ARROW",
"7\nHome": "HID_KEYBOARD_SC_KEYPAD_7_AND_HOME",
"8\n↑": "HID_KEYBOARD_SC_KEYPAD_8_AND_UP_ARROW",
"9\nPgUp": "HID_KEYBOARD_SC_KEYPAD_9_AND_PAGE_UP",
"0\nIns": "HID_KEYBOARD_SC_KEYPAD_0_AND_INSERT",
".\nDel": "HID_KEYBOARD_SC_KEYPAD_DOT_AND_DELETE",
# "|\n\\": "HID_KEYBOARD_SC_NON_US_BACKSLASH_AND_PIPE",
"Menu": "HID_KEYBOARD_SC_APPLICATION",
"=": "HID_KEYBOARD_SC_KEYPAD_EQUAL_SIGN",
"Ctrl": "HID_KEYBOARD_SC_LEFT_CONTROL",
"Shift": "HID_KEYBOARD_SC_LEFT_SHIFT",
"Alt": "HID_KEYBOARD_SC_LEFT_ALT",
"Win": "HID_KEYBOARD_SC_LEFT_GUI",
"rCtrl": "HID_KEYBOARD_SC_RIGHT_CONTROL",
"rShift": "HID_KEYBOARD_SC_RIGHT_SHIFT",
"AltGr": "HID_KEYBOARD_SC_RIGHT_ALT",
"rWin": "HID_KEYBOARD_SC_RIGHT_GUI",
}
def convert(s, legend, width=4, height=4):
"""Utility function to make legends less ambiguous."""
if legend == 'Enter' and width == 4 and height == 8:
legend = 'kpEnter'
elif legend == '' and width > 8:
legend = ' '
elif legend == 'Ctrl':
if s['ctrl']:
legend = 'rCtrl'
else:
s['ctrl'] = True
elif legend == 'Shift':
if s['shift']:
legend = 'rShift'
else:
s['shift'] = True
elif legend == 'Alt':
if s['alt']:
legend = 'AltGr'
else:
s['alt'] = True
elif legend == 'Win':
if s['win']:
legend = 'rWin'
else:
s['win'] = True
try:
return conversion_table[legend]
except KeyError:
return NULL_SYMBOL
def parse(path):
"""Open the JSON file at `path` and return a structure of the layout for
use in EasyAVR board config files.
"""
with open(path, encoding="utf8") as fp:
jslayout = json.load(fp)
state = {
'ctrl': False,
'shift': False,
'alt': False,
'win': False,
}
width = 4
height = 4
maxwidth = 0
totalwidth = 0
totalheight = 0
rownum = 0
colnum = 0
maxcols = 0
overhang = False
lastoverhang = False
layout = []
for row in jslayout:
newrow = []
if totalwidth > maxwidth:
maxwidth = totalwidth
totalwidth = 0
if colnum > maxcols:
maxcols = colnum
colnum = 0
overhang = False
for item in row:
if isinstance(item, str):
scancode = convert(state, item, width, height)
newrow.append(((width, height), (rownum, colnum), scancode))
totalwidth += width
width = 4
height = 4
colnum += 1
elif isinstance(item, dict):
for param, val in item.items():
if param == 'w':
width = int(val * 4)
elif param == 'h':
height = int(val * 4)
if height != 8:
raise Exception("Only heights of 1u or 2u are supported.")
overhang = True
elif param == 'x':
if lastoverhang:
# total hack to prevent overlaps in ISO enter
newrow.append((int(val * -4), None, NULL_SYMBOL))
else:
newrow.append((int(val * 4), None, NULL_SYMBOL))
totalwidth += int(val * 4)
elif param == 'y':
layout.append(int(val * 4))
totalheight += int(val * 4)
else:
continue
else:
raise TypeError("Unrecognized object in row array.")
layout.append(newrow)
totalheight += 4
rownum += 1
lastoverhang = overhang
return {
'display_height': totalheight,
'display_width': maxwidth,
'num_rows': rownum,
'num_cols': maxcols,
'layout': layout,
}
| gpl-2.0 | 988,168,901,522,469,400 | 34.747082 | 86 | 0.560684 | false |
markvoelker/refstack | refstack/db/utils.py | 1 | 1872 | # Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for database."""
from oslo_config import cfg
from oslo_log import log
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class PluggableBackend(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
"""Init."""
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
"""Get backend."""
if not self.__backend:
backend_name = CONF[self.__pivot]
if backend_name not in self.__backends: # pragma: no cover
raise Exception('Invalid backend: %s' % backend_name)
backend = self.__backends[backend_name]
if isinstance(backend, tuple): # pragma: no cover
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
LOG.debug('backend %s', self.__backend)
return self.__backend
def __getattr__(self, key):
"""Proxy interface to backend."""
backend = self.__get_backend()
return getattr(backend, key)
| apache-2.0 | -6,631,642,058,597,606,000 | 32.428571 | 78 | 0.608974 | false |
Murkantilism/LoL_API_Research | Summoner_Data_Retrieval/DEPRECATED/Generate_Summoners_Hourly.py | 1 | 1818 | __author__ = 'Deniz'
import time, subprocess, argparse, getopt
from sys import argv
import sys, os
DEFAULT_NUM_SUMMONERS = 250
DEFAULT_LOCATION = os.curdir + "\_out\Random_Summoners_run_"+str(time.time())
def main():
parser = argparse.ArgumentParser(description='Attempt to generate X number'
' of random summoners hourly.')
parser.add_argument('-out', metavar='o', type=str, default=DEFAULT_LOCATION, help='the output location ' + str(DEFAULT_LOCATION))
parser.add_argument('-num', metavar='n', type=int, default=DEFAULT_NUM_SUMMONERS,
help='number of summoners (default: ' +
str(DEFAULT_NUM_SUMMONERS) + ')',)
args = parser.parse_args()
#print vars(args).values()
# Assign the number of summoners
numSummoners = vars(args).values()[0]
# Assign the output path
outputLocation = vars(args).values()[1]
subprocess.check_call('python Generate_Summoners.py' + ' -out ' +
str(outputLocation) + ' -num ' +
str(numSummoners), shell=True)
subprocess.check_call('python Get_Most_Used_Champion.py' + ' -out ' +
str(outputLocation), shell=True)
subprocess.check_call('python Check_Duplicate_Summoners.py' + ' -out ' +
str(outputLocation), shell=True)
subprocess.check_call('python Scrub_Useless_Summoners.py' + ' -out ' +
str(outputLocation), shell=True)
time.sleep(3600-time.time()%3600)
main()
# The usage information returned when -h parameter is given
def usage():
print "\nThis is the CLI for the dan audio matcher program\n"
print 'Usage: ' + argv[0] + ' -f <set1> -f <set2>'
if __name__ == "__main__":
main() | mit | 5,609,995,969,163,271,000 | 38.543478 | 133 | 0.59516 | false |
Intelimetrica/coati | coati/generator.py | 1 | 3847 | from coati.powerpoint import open_pptx, runpowerpoint
import os
import sys
import logging
from shutil import copyfile
from colorlog import ColoredFormatter
LOG_LEVEL = logging.DEBUG
LOGFORMAT = "%(asctime)s - %(log_color)s%(message)s"
logging.root.setLevel(LOG_LEVEL)
formatter = ColoredFormatter(LOGFORMAT)
stream = logging.StreamHandler()
stream.setLevel(LOG_LEVEL)
stream.setFormatter(formatter)
log = logging.getLogger('pythonConfig')
log.setLevel(LOG_LEVEL)
log.addHandler(stream)
this_dir = os.path.dirname(__file__)
template_path = os.path.join(this_dir, 'templates/slide_template.txt')
config_template_path = os.path.join(this_dir, 'templates/config_template.txt')
init_template_path = os.path.join(this_dir, 'templates/init_template.txt')
def _get_slides_shapes(ppt_path):
pptapp = runpowerpoint()
pptFile = open_pptx(pptapp, ppt_path)
log.debug('Open Template successfully...')
all_slide_shapes = []
for slide in pptFile.Slides:
shapes_in_slide = _get_shapes_in_slide(slide)
all_slide_shapes.append(shapes_in_slide)
pptFile.close()
pptapp.Quit()
log.debug('Finish reading template...')
return all_slide_shapes
def _get_shapes_in_slide(slide):
shapes_in_slide = {each_shape.name: () for each_shape in slide.shapes}
return shapes_in_slide
def _generate_path(p):
if not os.path.exists(os.path.dirname(p)):
try:
os.makedirs(os.path.dirname(p))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def _cp(src, dst, fn):
source = open(src, 'r')
result = fn(source.read())
destination = open(dst, 'w')
destination.write(result)
source.close
destination.close
def _insert_code(complete_text, text_to_insert, text_to_replace):
ans = complete_text.replace(text_to_replace, text_to_insert)
return ans
def _file_exists(ppt_path):
if not (ppt_path.endswith('.pptx') or ppt_path.endswith('.ppt')):
sys.exit('The file provided is not a PPT file')
elif not os.path.isfile(ppt_path):
sys.exit('The PPT file provided doesnt exist or is damaged')
pass
def generate(project_name, ppt_path):
_file_exists(ppt_path)
path = os.path.abspath(project_name)
spaces = " " * 12
slide_tuples = '['
#Generate builders/ folder prior slides creation
path_builders = os.path.join(path, 'builders/')
_generate_path(path_builders)
log.info('create folder %s', "./builders/")
for i, slide in enumerate(_get_slides_shapes(ppt_path)):
slide_name = 'slide' + str(i+1)
filename = path_builders + slide_name + '.py';
#Create slide#.py with the template info
_cp(template_path, filename, lambda source: _insert_code(
source,
str(slide).replace(", ",",\n" + spaces),
'"_-{}-_"'))
log.info('create %s', filename)
#This line is in the for loop cause is gathering info for the config.py
slide_tuples += ('\n' + spaces if i != 0 else '') + '(' + str(i+1) + ', ' + slide_name + '.build()),'
#Generate config.py with already gathered info in slide_tuples
config_filename = path + '/config.py'
_cp(config_template_path, config_filename, lambda source: _insert_code(
source,
(slide_tuples[:-1] + ']'),
'"_-{}-_"'))
log.info('create %s', config_filename)
#Create __init__ in builders
init_file = path + '/builders/__init__.py'
copyfile(init_template_path, init_file)
log.info('create %s', init_file)
#Copy original template file
copy_ppt = path + '/' + str(os.path.split(ppt_path)[-1])
_cp(ppt_path, copy_ppt , lambda source: source)
log.info('copy %s', copy_ppt)
#Add images folder
_generate_path(os.path.join(path, 'images/'))
log.info('create folder %s', "./images/")
| mit | -2,483,990,554,525,858,300 | 32.452174 | 109 | 0.643878 | false |
samuelpych/Final-Project | finalproject.py | 1 | 5690 | """
Finalproject.py
Author: Sam Pych
Credit: Thomas Kyle Postans, Hagin, My Space Game, David Wilson
Assignment: Create a pong game with two movable blocks and the ball either bounces off the wall
or appears on the other side.
optional: keep score
bounde=self.collidingWithSprites(Pongblock1)
"""
from ggame import App, Sprite, ImageAsset, Frame
from ggame import SoundAsset, Sound, TextAsset, Color
import math
from time import time
from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset
from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
class Pongblock(Sprite):
black = Color(0x000000, 1.0)
thinline= LineStyle(1, black)
rectangle_asset=RectangleAsset(50, 100, thinline, black)
#rectangle1 = Sprite(rectangle_asset, (1100,250))
def __init__(self, position):
super().__init__(Pongblock1.rectangle_asset, position)
self.vy = 1
ponggame.listenKeyEvent("keydown", "up arrow", self.up)
ponggame.listenKeyEvent("keydown","left arrow",self.left)
ponggame.listenKeyEvent("keydown","down arrow",self.down)
ponggame.listenKeyEvent("keydown","right arrow",self.right)
self.fxcenter = self.fycenter = 0.5
def step(self):
self.y += self.vy
#self.y += self.vy
if self.y >480:
self.y=479
if self.y <-1:
self.y=0
def up(self, event):
self.vy -=2
def down(self, event):
self.vy +=2
def left(self, event):
self.vx -=2
def right(self, event):
self.vx +=2
class Pongblock1(Sprite):
black = Color(0x000000, 1.0)
thinline= LineStyle(1, black)
rectangle_asset=RectangleAsset(50, 100, thinline, black)
#rectangle1 = Sprite(rectangle_asset, (1100,250))
def __init__(self, position):
super().__init__(Pongblock1.rectangle_asset, position)
self.vy = 1
ponggame.listenKeyEvent("keydown", "w", self.up)
ponggame.listenKeyEvent("keydown","a",self.left)
ponggame.listenKeyEvent("keydown","s",self.down)
ponggame.listenKeyEvent("keydown","d",self.right)
self.fxcenter = self.fycenter = 0.5
def step(self):
self.y += self.vy
#self.y += self.vy
if self.y >480:
self.y=479
if self.y <-1:
self.y=0
def up(self, event):
self.vy -=2
def down(self, event):
self.vy +=2
def left(self, event):
self.vx -=2
def right(self, event):
self.vx +=2
class pongball(Sprite):
red = Color(0xff0000, 1.0)
thinline= LineStyle(1, red)
circle_asset=CircleAsset(25, thinline, red)
#circle1 = Sprite(circle_asset, (600,300))
circle=CircleAsset(1500, thinline, red)
def __init__(self, position):
super().__init__(pongball.circle_asset, position)
self.vx = 2
self.vy = 10
previousY = self.vy
self.fxcenter = self.fycenter = 0.5
def step(self):
self.x += self.vx
self.y += self.vy
if self.y >500:
self.vy=-7
if self.y <-1:
self.vy=7
if self.visible:
collides = self.collidingWithSprites(Scoreline)
if len(collides):
if collides[0].visible:
print("arrow keys win")
self.x += self.vx
self.x += self.vx
return True
if self.visible:
collides2 = self.collidingWithSprites(Scoreline2)
if len(collides2):
if collides2[0].visible:
print("wasd wins")
self.x += self.vx
self.x += self.vx
if self.visible:
collides3 = self.collidingWithSprites(Pongblock1)
if len(collides3):
if collides3[0].visible:
self.vx = 6
self.vy = 6
self.x += self.vx
self.y += self.vy
if self.visible:
collides4 = self.collidingWithSprites(Pongblock)
if len(collides4):
if collides4[0].visible:
self.vx = -6
self.vy = -4
self.x += self.vx
self.y += self.vy
class Scoreline(Sprite):
blue = Color(0x0000ff, 1.0)
thinline= LineStyle(1, blue)
rectangle_asset=RectangleAsset(10, 2000, thinline, blue)
#rectangle = Sprite(rectangle_asset, (00,-100))
def __init__(self, position):
super().__init__(Scoreline.rectangle_asset, position)
class Scoreline2(Sprite):
blue = Color(0x0000ff, 1.0)
thinline= LineStyle(1, blue)
rectangle_asset=RectangleAsset(10, 2000, thinline, blue)
#rectangle = Sprite(rectangle_asset, (1200,-100))
def __init__(self, position):
super().__init__(Scoreline2.rectangle_asset, position)
#class Scoreboard:
# Not enough time to do it
class ponggame(App):
def __init__(self, width, height):
super().__init__(width, height)
Pongblock1((100,10))
Scoreline((00,-100))
Pongblock((1100,250))
Scoreline2((1200,-100))
pongball((1000,100))
print(self.getSpritesbyClass(pongball))
def step(self):
for x in self.getSpritesbyClass(Pongblock1):
x.step()
for x in self.getSpritesbyClass(Pongblock):
x.step()
for x in self.getSpritesbyClass(pongball):
x.step()
def restart(self):
ponggame.listenKeyEvent("keydown","spacebar",self.restart)
app = ponggame(0,0)
app.run() | mit | -5,657,514,026,309,861,000 | 33.70122 | 104 | 0.577153 | false |
zasdfgbnm/tensorflow | tensorflow/contrib/quantize/python/common.py | 1 | 4098 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities used across this package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
# Skip all operations that are backprop related or export summaries.
SKIPPED_PREFIXES = (
'gradients/', 'RMSProp/', 'Adagrad/', 'Const_', 'HistogramSummary',
'ScalarSummary')
# Valid activation ops for quantization end points.
_ACTIVATION_OP_SUFFIXES = ['/Relu6', '/Relu', '/Identity']
# Regular expression for recognizing nodes that are part of batch norm group.
_BATCHNORM_RE = re.compile(r'^(.*)/BatchNorm/batchnorm')
def BatchNormGroups(graph):
"""Finds batch norm layers, returns their prefixes as a list of strings.
Args:
graph: Graph to inspect.
Returns:
List of strings, prefixes of batch norm group names found.
"""
bns = []
for op in graph.get_operations():
match = _BATCHNORM_RE.search(op.name)
if match:
bn = match.group(1)
if not bn.startswith(SKIPPED_PREFIXES):
bns.append(bn)
# Filter out duplicates.
return list(collections.OrderedDict.fromkeys(bns))
def GetEndpointActivationOp(graph, prefix):
"""Returns an Operation with the given prefix and a valid end point suffix.
Args:
graph: Graph where to look for the operation.
prefix: String, prefix of Operation to return.
Returns:
The Operation with the given prefix and a valid end point suffix or None if
there are no matching operations in the graph for any valid suffix
"""
for suffix in _ACTIVATION_OP_SUFFIXES:
activation = _GetOperationByNameDontThrow(graph, prefix + suffix)
if activation:
return activation
return None
def _GetOperationByNameDontThrow(graph, name):
"""Returns an Operation with the given name.
Args:
graph: Graph where to look for the operation.
name: String, name of Operation to return.
Returns:
The Operation with the given name. None if the name does not correspond to
any operation in the graph
"""
try:
return graph.get_operation_by_name(name)
except KeyError:
return None
def CreateOrGetQuantizationStep():
"""Returns a Tensor of the number of steps the quantized graph has run.
Returns:
Quantization step Tensor.
"""
quantization_step_name = 'fake_quantization_step'
quantization_step_tensor_name = quantization_step_name + '/AssignAdd:0'
g = ops.get_default_graph()
try:
return g.get_tensor_by_name(quantization_step_tensor_name)
except KeyError:
# Create in proper graph and base name_scope.
with g.name_scope(None):
quantization_step_tensor = variable_scope.get_variable(
quantization_step_name,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
with g.name_scope(quantization_step_tensor.op.name + '/'):
# We return the incremented variable tensor. Since this is used in conds
# for quant_delay and freeze_bn_delay, it will run once per graph
# execution.
return state_ops.assign_add(quantization_step_tensor, 1)
| apache-2.0 | -8,926,153,720,807,249,000 | 32.590164 | 80 | 0.704978 | false |
corradio/electricitymap | test_parser.py | 1 | 4107 | #!/usr/bin/env python3
"""
Usage: poetry run test_parser FR production
"""
import time
import sys
import pprint
import datetime
import logging
import arrow
import click
from electricitymap.contrib.parsers.lib.parsers import PARSER_KEY_TO_DICT
from parsers.lib.quality import (
validate_consumption,
validate_production,
validate_exchange,
ValidationError,
)
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
@click.command()
@click.argument("zone")
@click.argument("data-type", default="production")
@click.option("--target_datetime", default=None, show_default=True)
def test_parser(zone, data_type, target_datetime):
"""
Parameters
----------
zone: a two letter zone from the map
data_type: in ['production', 'exchangeForecast', 'production', 'exchange',
'price', 'consumption', 'generationForecast', 'consumptionForecast']
target_datetime: string parseable by arrow, such as 2018-05-30 15:00
Examples
-------
>>> python test_parser.py NO-NO3-\>SE exchange
parser result:
{'netFlow': -51.6563, 'datetime': datetime.datetime(2018, 7, 3, 14, 38, tzinfo=tzutc()), 'source': 'driftsdata.stattnet.no', 'sortedZoneKeys': 'NO-NO3->SE'}
---------------------
took 0.09s
min returned datetime: 2018-07-03 14:38:00+00:00
max returned datetime: 2018-07-03T14:38:00+00:00 UTC -- OK, <2h from now :) (now=2018-07-03T14:39:16.274194+00:00 UTC)
>>> python test_parser.py FR production
parser result:
[... long stuff ...]
---------------------
took 5.38s
min returned datetime: 2018-07-02 00:00:00+02:00
max returned datetime: 2018-07-03T14:30:00+00:00 UTC -- OK, <2h from now :) (now=2018-07-03T14:43:35.501375+00:00 UTC)
"""
if target_datetime:
target_datetime = arrow.get(target_datetime).datetime
start = time.time()
parser = PARSER_KEY_TO_DICT[data_type][zone]
if data_type in ["exchange", "exchangeForecast"]:
args = zone.split("->")
else:
args = [zone]
res = parser(
*args, target_datetime=target_datetime, logger=logging.getLogger(__name__)
)
if not res:
raise ValueError('Error: parser returned nothing ({})'.format(res))
elapsed_time = time.time() - start
if isinstance(res, (list, tuple)):
res_list = list(res)
else:
res_list = [res]
try:
dts = [e["datetime"] for e in res_list]
except:
raise ValueError('Parser output lacks `datetime` key for at least some of the '
'ouput. Full ouput: \n\n{}\n'.format(res))
assert all([type(e['datetime']) is datetime.datetime for e in res_list]), \
'Datetimes must be returned as native datetime.datetime objects'
last_dt = arrow.get(max(dts)).to('UTC')
first_dt = arrow.get(min(dts)).to('UTC')
max_dt_warning = ''
if not target_datetime:
max_dt_warning = (
" :( >2h from now !!!"
if (arrow.utcnow() - last_dt).total_seconds() > 2 * 3600
else " -- OK, <2h from now :) (now={} UTC)".format(arrow.utcnow())
)
print("Parser result:")
pp = pprint.PrettyPrinter(width=120)
pp.pprint(res)
print(
"\n".join(
[
"---------------------",
"took {:.2f}s".format(elapsed_time),
"min returned datetime: {} UTC".format(first_dt),
"max returned datetime: {} UTC {}".format(last_dt, max_dt_warning),
]
)
)
if type(res) == dict:
res = [res]
for event in res:
try:
if data_type == "production":
validate_production(event, zone)
elif data_type == "consumption":
validate_consumption(event, zone)
elif data_type == "exchange":
validate_exchange(event, zone)
except ValidationError as e:
logger.warning('Validation failed @ {}: {}'.format(event['datetime'], e))
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
print(test_parser())
| gpl-3.0 | -8,112,435,090,802,228,000 | 30.837209 | 160 | 0.590699 | false |
brendanv/nasa-api | nasa/maas.py | 1 | 1543 | from nasa import api
from nasa.base import NasaApiObject
''' Retrieves the most recent MAAS Report '''
def latest():
response = api.external_api_get(
'http://marsweather.ingenology.com/v1/latest/',
{},
)
return MAASReport.from_response(response['report'])
''' Retrieves the set of MAAS Reports that match the filters
provided via keyword args. Most report fields can be used as
filters.
'''
def archived(**kwargs):
return _maas_paginate(
'http://marsweather.ingenology.com/v1/archive/',
**kwargs
)
def _maas_paginate(url, **kwargs):
response = api.external_api_get(url, kwargs)
response['results'] = [
MAASReport.from_response(r) for r in response['results']
]
next_url = response['next']
if next_url is not None:
response['next'] = lambda: _maas_paginate(next_url)
prev_url = response['previous']
if prev_url is not None:
response['previous'] = lambda: _maas_paginate(prev_url)
return response
class MAASReport(NasaApiObject):
"""Mars Atmospheric Aggregation System Report"""
class Meta(object):
properties = ['terrestrial_date', 'sol', 'ls', 'min_temp',
'min_temp_fahrenheit', 'max_temp', 'max_temp_fahrenheit',
'pressure', 'pressure_string', 'abs_humidity',
'wind_speed', 'wind_direction', 'atmo_opacity', 'season',
'sunrise', 'sunset']
def __init__(self, **kwargs):
super(MAASReport, self).__init__(**kwargs)
| gpl-3.0 | 5,404,063,207,014,197,000 | 32.543478 | 79 | 0.615684 | false |
wkerzendorf/chiantipy | chiantipy/chianti/__init__.py | 1 | 2095 | '''the ChiantiPy - CHIANTI Python package
calculates various aspects of emission line and continua from the
CHIANTI atomic database for astrophysical spectroscopy'''
import os
import constants
import filters
import mputil
#
#try:
# chInteractive = int(os.environ['CHIANTIPY_INTERACTIVE'])
#except:
# chInteractive = 1
#if chInteractive:
# import pylab as pl
#else:
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as pl
###
#xuvtop = os.environ['XUVTOP']
##chInteractive=1
#Defaults = util.defaultsRead(verbose = chInteractive)
#Ip = util.ipRead()
#MasterList = util.masterListRead()
#AbundanceAll = util.abundanceRead(abundancename = Defaults['abundfile'])
#IoneqAll = util.ioneqRead(ioneqname = Defaults['ioneqfile'])
#import version
#__version__ = version.__version__
#__version_info__ = version.__version_info__
#import core
import pylab as pl
if pl.rcParams['backend'].lower() == 'qt4agg':
import gui_qt.gui as gui
elif pl.rcParams['backend'].lower() == 'wxagg':
import gui_wx.gui as gui
elif pl.rcParams['backend'].lower() == 'gtkagg':
import gui_cl.gui as gui
elif pl.rcParams['backend'].lower() == 'agg':
import gui_cl.gui as gui
elif pl.rcParams['backend'].lower() == 'agg':
import gui_cl.gui as gui
elif pl.rcParams['backend'].lower() == 'macosx':
import gui_cl.gui as gui
else:
print ' - Warning - '
print ' - in order to use the various gui dialogs, the matlpotlib/pylab backend needs'
print ' - to be either Qt4Agg or WXAgg - '
print ' - in order to use the command line dialogs, the matlpotlib/pylab backend needs'
print ' - to be GTKAgg or MacOSX - '
print ' - current backend is ',pl.rcParams['backend']
print ' - the full functionality of the chianti.core.ion class may not be available'
print ' - it would probably be better to set your matplotlib backend to either'
print ' - Qt4Agg, WXAgg, GTKAgg, or MacOSX'
print ' - using the command line dialogs for now but there could be problems -'
import gui_cl.gui as gui
#
# placed here because util needs gui
import util
| gpl-3.0 | 1,139,370,885,466,107,600 | 33.916667 | 91 | 0.702148 | false |
librasungirl/openthread | tests/toranj/wpan.py | 1 | 61707 | #!/usr/bin/env python3
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
import os
import time
import re
import random
import weakref
import subprocess
import socket
import asyncore
import inspect
# ----------------------------------------------------------------------------------------------------------------------
# wpantund properties
WPAN_STATE = 'NCP:State'
WPAN_NAME = 'Network:Name'
WPAN_PANID = 'Network:PANID'
WPAN_XPANID = 'Network:XPANID'
WPAN_KEY = 'Network:Key'
WPAN_KEY_INDEX = 'Network:KeyIndex'
WPAN_CHANNEL = 'NCP:Channel'
WPAN_HW_ADDRESS = 'NCP:HardwareAddress'
WPAN_EXT_ADDRESS = 'NCP:ExtendedAddress'
WPAN_POLL_INTERVAL = 'NCP:SleepyPollInterval'
WPAN_NODE_TYPE = 'Network:NodeType'
WPAN_ROLE = 'Network:Role'
WPAN_PARTITION_ID = 'Network:PartitionId'
WPAN_NCP_VERSION = 'NCP:Version'
WPAN_NCP_MCU_POWER_STATE = "NCP:MCUPowerState"
WPAN_NETWORK_ALLOW_JOIN = 'com.nestlabs.internal:Network:AllowingJoin'
WPAN_NETWORK_PASSTHRU_PORT = 'com.nestlabs.internal:Network:PassthruPort'
WPAN_RCP_VERSION = "POSIXApp:RCPVersion"
WPAN_IP6_LINK_LOCAL_ADDRESS = "IPv6:LinkLocalAddress"
WPAN_IP6_MESH_LOCAL_ADDRESS = "IPv6:MeshLocalAddress"
WPAN_IP6_MESH_LOCAL_PREFIX = "IPv6:MeshLocalPrefix"
WPAN_IP6_ALL_ADDRESSES = "IPv6:AllAddresses"
WPAN_IP6_MULTICAST_ADDRESSES = "IPv6:MulticastAddresses"
WPAN_IP6_INTERFACE_ROUTES = "IPv6:Routes"
WPAN_DAEMON_OFF_MESH_ROUTE_AUTO_ADD_ON_INTERFACE = "Daemon:OffMeshRoute:AutoAddOnInterface"
WPAN_DAEMON_OFF_MESH_ROUTE_FILTER_SELF_AUTO_ADDED = "Daemon:OffMeshRoute:FilterSelfAutoAdded"
WPAN_DAEMON_ON_MESH_PREFIX_AUTO_ADD_AS_INTERFACE_ROUTE = "Daemon:OnMeshPrefix:AutoAddAsInterfaceRoute"
WPAN_THREAD_RLOC16 = "Thread:RLOC16"
WPAN_THREAD_ROUTER_ID = "Thread:RouterID"
WPAN_THREAD_LEADER_ADDRESS = "Thread:Leader:Address"
WPAN_THREAD_LEADER_ROUTER_ID = "Thread:Leader:RouterID"
WPAN_THREAD_LEADER_WEIGHT = "Thread:Leader:Weight"
WPAN_THREAD_LEADER_LOCAL_WEIGHT = "Thread:Leader:LocalWeight"
WPAN_THREAD_LEADER_NETWORK_DATA = "Thread:Leader:NetworkData"
WPAN_THREAD_STABLE_LEADER_NETWORK_DATA = "Thread:Leader:StableNetworkData"
WPAN_THREAD_NETWORK_DATA = "Thread:NetworkData"
WPAN_THREAD_CHILD_TABLE = "Thread:ChildTable"
WPAN_THREAD_CHILD_TABLE_ASVALMAP = "Thread:ChildTable:AsValMap"
WPAN_THREAD_CHILD_TABLE_ADDRESSES = "Thread:ChildTable:Addresses"
WPAN_THREAD_NEIGHBOR_TABLE = "Thread:NeighborTable"
WPAN_THREAD_NEIGHBOR_TABLE_ASVALMAP = "Thread:NeighborTable:AsValMap"
WPAN_THREAD_NEIGHBOR_TABLE_ERR_RATES = "Thread:NeighborTable:ErrorRates"
WPAN_THREAD_NEIGHBOR_TABLE_ERR_RATES_AVVALMAP = "Thread:NeighborTable:ErrorRates:AsValMap"
WPAN_THREAD_ROUTER_TABLE = "Thread:RouterTable"
WPAN_THREAD_ROUTER_TABLE_ASVALMAP = "Thread:RouterTable:AsValMap"
WPAN_THREAD_CHILD_TIMEOUT = "Thread:ChildTimeout"
WPAN_THREAD_PARENT = "Thread:Parent"
WPAN_THREAD_PARENT_ASVALMAP = "Thread:Parent:AsValMap"
WPAN_THREAD_NETWORK_DATA_VERSION = "Thread:NetworkDataVersion"
WPAN_THREAD_STABLE_NETWORK_DATA = "Thread:StableNetworkData"
WPAN_THREAD_STABLE_NETWORK_DATA_VERSION = "Thread:StableNetworkDataVersion"
WPAN_THREAD_PREFERRED_ROUTER_ID = "Thread:PreferredRouterID"
WPAN_THREAD_COMMISSIONER_ENABLED = "Thread:Commissioner:Enabled"
WPAN_THREAD_DEVICE_MODE = "Thread:DeviceMode"
WPAN_THREAD_OFF_MESH_ROUTES = "Thread:OffMeshRoutes"
WPAN_THREAD_ON_MESH_PREFIXES = "Thread:OnMeshPrefixes"
WPAN_THREAD_ROUTER_ROLE_ENABLED = "Thread:RouterRole:Enabled"
WPAN_THREAD_CONFIG_FILTER_RLOC_ADDRESSES = "Thread:Config:FilterRLOCAddresses"
WPAN_THREAD_ROUTER_UPGRADE_THRESHOLD = "Thread:RouterUpgradeThreshold"
WPAN_THREAD_ROUTER_DOWNGRADE_THRESHOLD = "Thread:RouterDowngradeThreshold"
WPAN_THREAD_ACTIVE_DATASET = "Thread:ActiveDataset"
WPAN_THREAD_ACTIVE_DATASET_ASVALMAP = "Thread:ActiveDataset:AsValMap"
WPAN_THREAD_PENDING_DATASET = "Thread:PendingDataset"
WPAN_THREAD_PENDING_DATASET_ASVALMAP = "Thread:PendingDataset:AsValMap"
WPAN_THREAD_ADDRESS_CACHE_TABLE = "Thread:AddressCacheTable"
WPAN_THREAD_ADDRESS_CACHE_TABLE_ASVALMAP = "Thread:AddressCacheTable:AsValMap"
WPAN_OT_LOG_LEVEL = "OpenThread:LogLevel"
WPAN_OT_SLAAC_ENABLED = "OpenThread:SLAAC:Enabled"
WPAN_OT_STEERING_DATA_ADDRESS = "OpenThread:SteeringData:Address"
WPAN_OT_STEERING_DATA_SET_WHEN_JOINABLE = "OpenThread:SteeringData:SetWhenJoinable"
WPAN_OT_MSG_BUFFER_COUNTERS = "OpenThread:MsgBufferCounters"
WPAN_OT_MSG_BUFFER_COUNTERS_AS_STRING = "OpenThread:MsgBufferCounters:AsString"
WPAN_OT_DEBUG_TEST_ASSERT = "OpenThread:Debug:TestAssert"
WPAN_OT_DEBUG_TEST_WATCHDOG = "OpenThread:Debug:TestWatchdog"
WPAN_MAC_WHITELIST_ENABLED = "MAC:Whitelist:Enabled"
WPAN_MAC_WHITELIST_ENTRIES = "MAC:Whitelist:Entries"
WPAN_MAC_WHITELIST_ENTRIES_ASVALMAP = "MAC:Whitelist:Entries:AsValMap"
WPAN_MAC_BLACKLIST_ENABLED = "MAC:Blacklist:Enabled"
WPAN_MAC_BLACKLIST_ENTRIES = "MAC:Blacklist:Entries"
WPAN_MAC_BLACKLIST_ENTRIES_ASVALMAP = "MAC:Blacklist:Entries:AsValMap"
WPAN_MAC_FILTER_FIXED_RSSI = "MAC:Filter:FixedRssi"
WPAN_MAC_FILTER_ENTRIES = "MAC:Filter:Entries"
WPAN_MAC_FILTER_ENTRIES_ASVALMAP = "MAC:Filter:Entries:AsValMap"
WPAN_CHILD_SUPERVISION_INTERVAL = "ChildSupervision:Interval"
WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT = "ChildSupervision:CheckTimeout"
WPAN_JAM_DETECTION_STATUS = "JamDetection:Status"
WPAN_JAM_DETECTION_ENABLE = "JamDetection:Enable"
WPAN_JAM_DETECTION_RSSI_THRESHOLD = "JamDetection:RssiThreshold"
WPAN_JAM_DETECTION_WINDOW = "JamDetection:Window"
WPAN_JAM_DETECTION_BUSY_PERIOD = "JamDetection:BusyPeriod"
WPAN_JAM_DETECTION_DEBUG_HISTORY_BITMAP = "JamDetection:Debug:HistoryBitmap"
WPAN_CHANNEL_MONITOR_SAMPLE_INTERVAL = "ChannelMonitor:SampleInterval"
WPAN_CHANNEL_MONITOR_RSSI_THRESHOLD = "ChannelMonitor:RssiThreshold"
WPAN_CHANNEL_MONITOR_SAMPLE_WINDOW = "ChannelMonitor:SampleWindow"
WPAN_CHANNEL_MONITOR_SAMPLE_COUNT = "ChannelMonitor:SampleCount"
WPAN_CHANNEL_MONITOR_CHANNEL_QUALITY = "ChannelMonitor:ChannelQuality"
WPAN_CHANNEL_MONITOR_CHANNEL_QUALITY_ASVALMAP = "ChannelMonitor:ChannelQuality:AsValMap"
WPAN_CHANNEL_MANAGER_NEW_CHANNEL = "ChannelManager:NewChannel"
WPAN_CHANNEL_MANAGER_DELAY = "ChannelManager:Delay"
WPAN_CHANNEL_MANAGER_CHANNEL_SELECT = "ChannelManager:ChannelSelect"
WPAN_CHANNEL_MANAGER_AUTO_SELECT_ENABLED = "ChannelManager:AutoSelect:Enabled"
WPAN_CHANNEL_MANAGER_AUTO_SELECT_INTERVAL = "ChannelManager:AutoSelect:Interval"
WPAN_CHANNEL_MANAGER_SUPPORTED_CHANNEL_MASK = "ChannelManager:SupportedChannelMask"
WPAN_CHANNEL_MANAGER_FAVORED_CHANNEL_MASK = "ChannelManager:FavoredChannelMask"
WPAN_NCP_COUNTER_ALL_MAC = "NCP:Counter:AllMac"
WPAN_NCP_COUNTER_ALL_MAC_ASVALMAP = "NCP:Counter:AllMac:AsValMap"
WPAN_NCP_COUNTER_TX_PKT_TOTAL = "NCP:Counter:TX_PKT_TOTAL"
WPAN_NCP_COUNTER_TX_PKT_UNICAST = "NCP:Counter:TX_PKT_UNICAST"
WPAN_NCP_COUNTER_TX_PKT_BROADCAST = "NCP:Counter:TX_PKT_BROADCAST"
WPAN_NCP_COUNTER_TX_PKT_ACK_REQ = "NCP:Counter:TX_PKT_ACK_REQ"
WPAN_NCP_COUNTER_TX_PKT_ACKED = "NCP:Counter:TX_PKT_ACKED"
WPAN_NCP_COUNTER_TX_PKT_NO_ACK_REQ = "NCP:Counter:TX_PKT_NO_ACK_REQ"
WPAN_NCP_COUNTER_TX_PKT_DATA = "NCP:Counter:TX_PKT_DATA"
WPAN_NCP_COUNTER_TX_PKT_DATA_POLL = "NCP:Counter:TX_PKT_DATA_POLL"
WPAN_NCP_COUNTER_TX_PKT_BEACON = "NCP:Counter:TX_PKT_BEACON"
WPAN_NCP_COUNTER_TX_PKT_BEACON_REQ = "NCP:Counter:TX_PKT_BEACON_REQ"
WPAN_NCP_COUNTER_TX_PKT_OTHER = "NCP:Counter:TX_PKT_OTHER"
WPAN_NCP_COUNTER_TX_PKT_RETRY = "NCP:Counter:TX_PKT_RETRY"
WPAN_NCP_COUNTER_TX_ERR_CCA = "NCP:Counter:TX_ERR_CCA"
WPAN_NCP_COUNTER_TX_ERR_ABORT = "NCP:Counter:TX_ERR_ABORT"
WPAN_NCP_COUNTER_RX_PKT_TOTAL = "NCP:Counter:RX_PKT_TOTAL"
WPAN_NCP_COUNTER_RX_PKT_UNICAST = "NCP:Counter:RX_PKT_UNICAST"
WPAN_NCP_COUNTER_RX_PKT_BROADCAST = "NCP:Counter:RX_PKT_BROADCAST"
WPAN_NCP_COUNTER_RX_PKT_DATA = "NCP:Counter:RX_PKT_DATA"
WPAN_NCP_COUNTER_RX_PKT_DATA_POLL = "NCP:Counter:RX_PKT_DATA_POLL"
WPAN_NCP_COUNTER_RX_PKT_BEACON = "NCP:Counter:RX_PKT_BEACON"
WPAN_NCP_COUNTER_RX_PKT_BEACON_REQ = "NCP:Counter:RX_PKT_BEACON_REQ"
WPAN_NCP_COUNTER_RX_PKT_OTHER = "NCP:Counter:RX_PKT_OTHER"
WPAN_NCP_COUNTER_RX_PKT_FILT_WL = "NCP:Counter:RX_PKT_FILT_WL"
WPAN_NCP_COUNTER_RX_PKT_FILT_DA = "NCP:Counter:RX_PKT_FILT_DA"
WPAN_NCP_COUNTER_RX_ERR_EMPTY = "NCP:Counter:RX_ERR_EMPTY"
WPAN_NCP_COUNTER_RX_ERR_UKWN_NBR = "NCP:Counter:RX_ERR_UKWN_NBR"
WPAN_NCP_COUNTER_RX_ERR_NVLD_SADDR = "NCP:Counter:RX_ERR_NVLD_SADDR"
WPAN_NCP_COUNTER_RX_ERR_SECURITY = "NCP:Counter:RX_ERR_SECURITY"
WPAN_NCP_COUNTER_RX_ERR_BAD_FCS = "NCP:Counter:RX_ERR_BAD_FCS"
WPAN_NCP_COUNTER_RX_ERR_OTHER = "NCP:Counter:RX_ERR_OTHER"
WPAN_NCP_COUNTER_TX_IP_SEC_TOTAL = "NCP:Counter:TX_IP_SEC_TOTAL"
WPAN_NCP_COUNTER_TX_IP_INSEC_TOTAL = "NCP:Counter:TX_IP_INSEC_TOTAL"
WPAN_NCP_COUNTER_TX_IP_DROPPED = "NCP:Counter:TX_IP_DROPPED"
WPAN_NCP_COUNTER_RX_IP_SEC_TOTAL = "NCP:Counter:RX_IP_SEC_TOTAL"
WPAN_NCP_COUNTER_RX_IP_INSEC_TOTAL = "NCP:Counter:RX_IP_INSEC_TOTAL"
WPAN_NCP_COUNTER_RX_IP_DROPPED = "NCP:Counter:RX_IP_DROPPED"
WPAN_NCP_COUNTER_TX_SPINEL_TOTAL = "NCP:Counter:TX_SPINEL_TOTAL"
WPAN_NCP_COUNTER_RX_SPINEL_TOTAL = "NCP:Counter:RX_SPINEL_TOTAL"
WPAN_NCP_COUNTER_RX_SPINEL_ERR = "NCP:Counter:RX_SPINEL_ERR"
WPAN_NCP_COUNTER_IP_TX_SUCCESS = "NCP:Counter:IP_TX_SUCCESS"
WPAN_NCP_COUNTER_IP_RX_SUCCESS = "NCP:Counter:IP_RX_SUCCESS"
WPAN_NCP_COUNTER_IP_TX_FAILURE = "NCP:Counter:IP_TX_FAILURE"
WPAN_NCP_COUNTER_IP_RX_FAILURE = "NCP:Counter:IP_RX_FAILURE"
# ----------------------------------------------------------------------------------------------------------------------
# Valid state values
STATE_UNINITIALIZED = '"uninitialized"'
STATE_FAULT = '"uninitialized:fault"'
STATE_UPGRADING = '"uninitialized:upgrading"'
STATE_DEEP_SLEEP = '"offline:deep-sleep"'
STATE_OFFLINE = '"offline"'
STATE_COMMISSIONED = '"offline:commissioned"'
STATE_ASSOCIATING = '"associating"'
STATE_CREDENTIALS_NEEDED = '"associating:credentials-needed"'
STATE_ASSOCIATED = '"associated"'
STATE_ISOLATED = '"associated:no-parent"'
STATE_NETWAKE_ASLEEP = '"associated:netwake-asleep"'
STATE_NETWAKE_WAKING = '"associated:netwake-waking"'
# -----------------------------------------------------------------------------------------------------------------------
# MCU Power state from `WPAN_NCP_MCU_POWER_STATE`
MCU_POWER_STATE_ON = '"on"'
MCU_POWER_STATE_LOW_POWER = '"low-power"'
MCU_POWER_STATE_OFF = '"off"'
# -----------------------------------------------------------------------------------------------------------------------
# Node types (from `WPAN_NODE_TYPE` property)
NODE_TYPE_UNKNOWN = '"unknown"'
NODE_TYPE_LEADER = '"leader"'
NODE_TYPE_ROUTER = '"router"'
NODE_TYPE_END_DEVICE = '"end-device"'
NODE_TYPE_SLEEPY_END_DEVICE = '"sleepy-end-device"'
NODE_TYPE_COMMISSIONER = '"commissioner"'
NODE_TYPE_NEST_LURKER = '"nl-lurker"'
# -----------------------------------------------------------------------------------------------------------------------
# Node types used by `Node.join()`
JOIN_TYPE_ROUTER = 'r'
JOIN_TYPE_END_DEVICE = 'e'
JOIN_TYPE_SLEEPY_END_DEVICE = 's'
# -----------------------------------------------------------------------------------------------------------------------
# Address Cache Table Entry States
ADDRESS_CACHE_ENTRY_STATE_CACHED = "cached"
ADDRESS_CACHE_ENTRY_STATE_SNOOPED = "snooped"
ADDRESS_CACHE_ENTRY_STATE_QUERY = "query"
ADDRESS_CACHE_ENTRY_STATE_RETRY_QUERY = "retry-query"
# -----------------------------------------------------------------------------------------------------------------------
# Bit Flags for Thread Device Mode `WPAN_THREAD_DEVICE_MODE`
THREAD_MODE_FLAG_FULL_NETWORK_DATA = (1 << 0)
THREAD_MODE_FLAG_FULL_THREAD_DEV = (1 << 1)
THREAD_MODE_FLAG_SECURE_DATA_REQUEST = (1 << 2)
THREAD_MODE_FLAG_RX_ON_WHEN_IDLE = (1 << 3)
_OT_BUILDDIR = os.getenv('top_builddir', '../..')
_WPANTUND_PREFIX = os.getenv('WPANTUND_PREFIX', '/usr/local')
# -----------------------------------------------------------------------------------------------------------------------
def _log(text, new_line=True, flush=True):
sys.stdout.write(text)
if new_line:
sys.stdout.write('\n')
if flush:
sys.stdout.flush()
# -----------------------------------------------------------------------------------------------------------------------
# Node class
class Node(object):
""" A wpantund OT NCP instance """
# defines the default verbosity setting (can be changed per `Node`)
_VERBOSE = os.getenv('TORANJ_VERBOSE',
'no').lower() in ['true', '1', 't', 'y', 'yes', 'on']
_SPEED_UP_FACTOR = 1 # defines the default time speed up factor
# path to `wpantund`, `wpanctl`, `ot-ncp-ftd`,`ot-ncp` and `ot-rcp`
_WPANTUND = '%s/sbin/wpantund' % _WPANTUND_PREFIX
_WPANCTL = '%s/bin/wpanctl' % _WPANTUND_PREFIX
_OT_NCP_FTD = '%s/examples/apps/ncp/ot-ncp-ftd' % _OT_BUILDDIR
_OT_NCP_FTD_POSIX = '%s/src/posix/ot-ncp' % _OT_BUILDDIR
_OT_RCP = '%s/examples/apps/ncp/ot-rcp' % _OT_BUILDDIR
# Environment variable used to determine how to run OpenThread
# If set to 1, then posix NCP (`ot-ncp`) is used along with a posix RCP `ot-rcp`.
# Otherwise, the posix NCP `ot-ncp-ftd` is used
_POSIX_ENV_VAR = 'TORANJ_POSIX_RCP_MODEL'
# determines if the wpantund logs are saved in file or sent to stdout
_TUND_LOG_TO_FILE = True
# name of wpantund log file (if # name of wpantund _TUND_LOG_TO_FILE is
# True)
_TUND_LOG_FNAME = 'wpantund-logs'
# interface name
_INTFC_NAME_PREFIX = 'utun' if sys.platform == 'darwin' else 'wpan'
_START_INDEX = 4 if sys.platform == 'darwin' else 1
_cur_index = _START_INDEX
_all_nodes = weakref.WeakSet()
def __init__(self, verbose=_VERBOSE):
"""Creates a new `Node` instance"""
index = Node._cur_index
Node._cur_index += 1
self._index = index
self._interface_name = self._INTFC_NAME_PREFIX + str(index)
self._verbose = verbose
# Check if env variable `TORANJ_POSIX_RCP_MODEL` is defined
# and use it to determine if to use operate in "posix-ncp-app".
if self._POSIX_ENV_VAR in os.environ:
self._use_posix_with_rcp = (os.environ[self._POSIX_ENV_VAR] in [
'1', 'yes'
])
else:
self._use_posix_with_rcp = False
if self._use_posix_with_rcp:
ncp_socket_path = 'system:{} -s {} spinel+hdlc+uart://{}?forkpty-arg={}'.format(
self._OT_NCP_FTD_POSIX, self._SPEED_UP_FACTOR, self._OT_RCP,
index)
else:
ncp_socket_path = 'system:{} {} {}'.format(self._OT_NCP_FTD, index,
self._SPEED_UP_FACTOR)
cmd = self._WPANTUND + \
' -o Config:NCP:SocketPath \"{}\"'.format(ncp_socket_path) + \
' -o Config:TUN:InterfaceName {}'.format(self._interface_name) + \
' -o Config:NCP:DriverName spinel' + \
' -o Daemon:SyslogMask \"all -debug\"'
if Node._TUND_LOG_TO_FILE:
self._tund_log_file = open(
self._TUND_LOG_FNAME + str(index) + '.log', 'wb')
else:
self._tund_log_file = None
if self._verbose:
_log('$ Node{}.__init__() cmd: {}'.format(index, cmd))
self._wpantund_process = subprocess.Popen(cmd,
shell=True,
stderr=self._tund_log_file)
self._wpanctl_cmd = self._WPANCTL + ' -I ' + self._interface_name + ' '
# map from local_port to `AsyncReceiver` object
self._recvers = weakref.WeakValueDictionary()
Node._all_nodes.add(self)
def __del__(self):
self._wpantund_process.poll()
if self._wpantund_process.returncode is None:
self._wpantund_process.terminate()
self._wpantund_process.wait()
def __repr__(self):
return 'Node (index={}, interface_name={})'.format(
self._index, self._interface_name)
@property
def index(self):
return self._index
@property
def interface_name(self):
return self._interface_name
@property
def tund_log_file(self):
return self._tund_log_file
@property
def using_posix_with_rcp(self):
return self._use_posix_with_rcp
# ------------------------------------------------------------------------------------------------------------------
# Executing a `wpanctl` command
def wpanctl(self, cmd):
""" Runs a wpanctl command on the given wpantund/OT-NCP instance and returns the output """
if self._verbose:
_log('$ Node{}.wpanctl(\'{}\')'.format(self._index, cmd),
new_line=False)
result = subprocess.check_output(self._wpanctl_cmd + cmd,
shell=True,
stderr=subprocess.STDOUT)
if len(result) >= 1 and result[
-1] == '\n': # remove the last char if it is '\n',
result = result[:-1]
if self._verbose:
if '\n' in result:
_log(':')
for line in result.splitlines():
_log(' ' + line)
else:
_log(' -> \'{}\''.format(result))
return result
# ------------------------------------------------------------------------------------------------------------------
# APIs matching `wpanctl` commands.
def get(self, prop_name, value_only=True):
return self.wpanctl('get ' + ('-v ' if value_only else '') + prop_name)
def set(self, prop_name, value, binary_data=False):
return self._update_prop('set', prop_name, value, binary_data)
def add(self, prop_name, value, binary_data=False):
return self._update_prop('add', prop_name, value, binary_data)
def remove(self, prop_name, value, binary_data=False):
return self._update_prop('remove', prop_name, value, binary_data)
def _update_prop(self, action, prop_name, value, binary_data):
return self.wpanctl(action + ' ' + prop_name + ' ' +
('-d ' if binary_data else '') + '-v ' +
value) # use -v to handle values starting with `-`.
def reset(self):
return self.wpanctl('reset')
def status(self):
return self.wpanctl('status')
def leave(self):
return self.wpanctl('leave')
def form(self,
name,
channel=None,
channel_mask=None,
panid=None,
xpanid=None,
key=None,
key_index=None,
node_type=None,
mesh_local_prefix=None,
legacy_prefix=None):
return self.wpanctl(
'form \"' + name + '\"' +
(' -c {}'.format(channel) if channel is not None else '') +
(' -m {}'.format(channel_mask) if channel_mask is not None else ''
) + (' -p {}'.format(panid) if panid is not None else '') +
(' -x {}'.format(xpanid) if xpanid is not None else '') +
(' -k {}'.format(key) if key is not None else '') +
(' -i {}'.format(key_index) if key_index is not None else '') +
(' -T {}'.format(node_type) if node_type is not None else '') +
(' -M {}'.format(mesh_local_prefix
) if mesh_local_prefix is not None else '') +
(' -L {}'.format(legacy_prefix) if legacy_prefix is not None else ''
))
def join(self,
name,
channel=None,
node_type=None,
panid=None,
xpanid=None,
key=None):
return self.wpanctl(
'join \"' + name + '\"' +
(' -c {}'.format(channel) if channel is not None else '') +
(' -T {}'.format(node_type) if node_type is not None else '') +
(' -p {}'.format(panid) if panid is not None else '') +
(' -x {}'.format(xpanid) if xpanid is not None else '') +
(' -k {}'.format(key) if key is not None else '') + (' -n'))
def active_scan(self, channel=None):
return self.wpanctl(
'scan' + (' -c {}'.format(channel) if channel is not None else ''))
def energy_scan(self, channel=None):
return self.wpanctl('scan -e' + (
' -c {}'.format(channel) if channel is not None else ''))
def discover_scan(self,
channel=None,
joiner_only=False,
enable_filtering=False,
panid_filter=None):
return self.wpanctl(
'scan -d' +
(' -c {}'.format(channel) if channel is not None else '') +
(' -j' if joiner_only else '') +
(' -f' if enable_filtering else '') +
(' -p {}'.format(panid_filter) if panid_filter is not None else ''))
def permit_join(self, duration_sec=None, port=None, udp=True, tcp=True):
if not udp and not tcp: # incorrect use!
return ''
traffic_type = ''
if udp and not tcp:
traffic_type = ' --udp'
if tcp and not udp:
traffic_type = ' --tcp'
if port is not None and duration_sec is None:
duration_sec = '240'
return self.wpanctl(
'permit-join' +
(' {}'.format(duration_sec) if duration_sec is not None else '') +
(' {}'.format(port) if port is not None else '') + traffic_type)
def config_gateway(self, prefix, default_route=False, priority=None):
return self.wpanctl(
'config-gateway ' + prefix + (' -d' if default_route else '') +
(' -P {}'.format(priority) if priority is not None else ''))
def add_prefix(self,
prefix,
prefix_len=None,
priority=None,
stable=True,
on_mesh=False,
slaac=False,
dhcp=False,
configure=False,
default_route=False,
preferred=False):
return self.wpanctl(
'add-prefix ' + prefix +
(' -l {}'.format(prefix_len) if prefix_len is not None else '') +
(' -P {}'.format(priority) if priority is not None else '') +
(' -s' if stable else '') + (' -f' if preferred else '') +
(' -a' if slaac else '') + (' -d' if dhcp else '') +
(' -c' if configure else '') + (' -r' if default_route else '') +
(' -o' if on_mesh else ''))
def remove_prefix(self, prefix, prefix_len=None):
return self.wpanctl('remove-prefix ' + prefix + (
' -l {}'.format(prefix_len) if prefix_len is not None else ''))
def add_route(self,
route_prefix,
prefix_len=None,
priority=None,
stable=True):
"""route priority [(>0 for high, 0 for medium, <0 for low)]"""
return self.wpanctl(
'add-route ' + route_prefix +
(' -l {}'.format(prefix_len) if prefix_len is not None else '') +
(' -p {}'.format(priority) if priority is not None else '') +
('' if stable else ' -n'))
def remove_route(self,
route_prefix,
prefix_len=None,
priority=None,
stable=True):
"""route priority [(>0 for high, 0 for medium, <0 for low)]"""
return self.wpanctl(
'remove-route ' + route_prefix +
(' -l {}'.format(prefix_len) if prefix_len is not None else '') +
(' -p {}'.format(priority) if priority is not None else ''))
def commissioner_start(self):
return self.wpanctl('commissioner start')
def commissioner_add_joiner(self, eui64, pskd, timeout='100'):
return self.wpanctl('commissioner joiner-add {} {} {}'.format(
eui64, timeout, pskd))
def joiner_join(self, pskd):
return self.wpanctl('joiner --join {}'.format(pskd))
def joiner_attach(self):
return self.wpanctl('joiner --attach')
# ------------------------------------------------------------------------------------------------------------------
# Helper methods
def is_associated(self):
return self.get(WPAN_STATE) == STATE_ASSOCIATED
def join_node(self, node, node_type=JOIN_TYPE_ROUTER, should_set_key=True):
"""Join a network specified by another node, `node` should be a Node"""
if not node.is_associated():
return "{} is not associated".format(node)
return self.join(
node.get(WPAN_NAME)[1:-1],
channel=node.get(WPAN_CHANNEL),
node_type=node_type,
panid=node.get(WPAN_PANID),
xpanid=node.get(WPAN_XPANID),
key=node.get(WPAN_KEY)[1:-1] if should_set_key else None)
def whitelist_node(self, node):
"""Adds a given node (of type `Node`) to the whitelist of `self` and enables whitelisting on `self`"""
self.add(WPAN_MAC_WHITELIST_ENTRIES, node.get(WPAN_EXT_ADDRESS)[1:-1])
self.set(WPAN_MAC_WHITELIST_ENABLED, '1')
def un_whitelist_node(self, node):
"""Removes a given node (of node `Node) from the whitelist"""
self.remove(WPAN_MAC_WHITELIST_ENTRIES,
node.get(WPAN_EXT_ADDRESS)[1:-1])
def is_in_scan_result(self, scan_result):
"""Checks if node is in the scan results
`scan_result` must be an array of `ScanResult` object (see `parse_scan_result`).
"""
joinable = (self.get(WPAN_NETWORK_ALLOW_JOIN) == 'true')
panid = self.get(WPAN_PANID)
xpanid = self.get(WPAN_XPANID)[2:]
name = self.get(WPAN_NAME)[1:-1]
channel = self.get(WPAN_CHANNEL)
ext_address = self.get(WPAN_EXT_ADDRESS)[1:-1]
for item in scan_result:
if all([
item.network_name == name, item.panid == panid,
item.xpanid == xpanid, item.channel == channel,
item.ext_address == ext_address,
(item.type == ScanResult.TYPE_DISCOVERY_SCAN) or
(item.joinable == joinable)
]):
return True
return False
def find_ip6_address_with_prefix(self, prefix):
"""Find an IPv6 address on node matching a given prefix.
`prefix` should be an string containing the prefix.
Returns a string containing the IPv6 address matching the prefix or empty string if no address found.
"""
if len(prefix) > 2 and prefix[-1] == ':' and prefix[-2] == ':':
prefix = prefix[:-1]
all_addrs = parse_list(self.get(WPAN_IP6_ALL_ADDRESSES))
matched_addr = [addr for addr in all_addrs if addr.startswith(prefix)]
return matched_addr[0] if len(matched_addr) >= 1 else ''
def add_ip6_address_on_interface(self, address, prefix_len=64):
"""Adds an IPv6 interface on the network interface.
`address` should be string containing the IPv6 address.
`prefix_len` is an `int` specifying the prefix length.
NOTE: this method uses linux `ip` command.
"""
cmd = 'ip -6 addr add ' + address + \
'/{} dev '.format(prefix_len) + self.interface_name
if self._verbose:
_log('$ Node{} \'{}\')'.format(self._index, cmd))
result = subprocess.check_output(cmd,
shell=True,
stderr=subprocess.STDOUT)
return result
def remove_ip6_address_on_interface(self, address, prefix_len=64):
"""Removes an IPv6 interface on the network interface.
`address` should be string containing the IPv6 address.
`prefix_len` is an `int` specifying the prefix length.
NOTE: this method uses linux `ip` command.
"""
cmd = 'ip -6 addr del ' + address + \
'/{} dev '.format(prefix_len) + self.interface_name
if self._verbose:
_log('$ Node{} \'{}\')'.format(self._index, cmd))
result = subprocess.check_output(cmd,
shell=True,
stderr=subprocess.STDOUT)
return result
# ------------------------------------------------------------------------------------------------------------------
# class methods
@classmethod
def init_all_nodes(cls, disable_logs=not _VERBOSE, wait_time=15):
"""Issues a `wpanctl.leave` on all `Node` objects and waits for them to be ready"""
random.seed(123456)
time.sleep(0.5)
for node in Node._all_nodes:
start_time = time.time()
while True:
try:
node._wpantund_process.poll()
if node._wpantund_process.returncode is not None:
print(
'Node {} wpantund instance has terminated unexpectedly'
.format(node))
if disable_logs:
node.set(WPAN_OT_LOG_LEVEL, '0')
node.leave()
except subprocess.CalledProcessError as e:
if (node._verbose):
_log(' -> \'{}\' exit code: {}'.format(
e.output, e.returncode))
interval = time.time() - start_time
if interval > wait_time:
print(
'Took too long to init node {} ({}>{} sec)'.format(
node, interval, wait_time))
raise
except BaseException:
raise
else:
break
time.sleep(0.4)
@classmethod
def finalize_all_nodes(cls):
"""Finalizes all previously created `Node` instances (stops the wpantund process)"""
for node in Node._all_nodes:
node._wpantund_process.terminate()
node._wpantund_process.wait()
@classmethod
def set_time_speedup_factor(cls, factor):
"""Sets up the time speed up factor - should be set before creating any `Node` objects"""
if len(Node._all_nodes) != 0:
raise Node._NodeError(
'set_time_speedup_factor() cannot be called after creating a `Node`'
)
Node._SPEED_UP_FACTOR = factor
# ------------------------------------------------------------------------------------------------------------------
# IPv6 message Sender and Receiver class
class _NodeError(Exception):
pass
def prepare_tx(self, src, dst, data=40, count=1, mcast_hops=None):
"""Prepares an IPv6 msg transmission.
- `src` and `dst` can be either a string containing IPv6 address, or a tuple (ipv6 address as string, port),
if no port is given, a random port number is used.
- `data` can be either a string containing the message to be sent, or an int indicating size of the message (a
random message with the given length will be used).
- `count` gives number of times the message will be sent (default is 1).
- `mcast_hops` specifies multicast hop limit (only applicable for multicast tx).
Returns an `AsyncSender` object.
"""
if isinstance(src, tuple):
src_addr = src[0]
src_port = src[1]
else:
src_addr = src
src_port = random.randint(49152, 65535)
if isinstance(dst, tuple):
dst_addr = dst[0]
dst_port = dst[1]
else:
dst_addr = dst
dst_port = random.randint(49152, 65535)
if isinstance(data, int):
# create a random message with the given length.
all_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,><?;:[]=-+)(*&^%$#@'
msg = ''.join(random.choice(all_chars) for _ in range(data))
else:
msg = data
return AsyncSender(self, src_addr, src_port, dst_addr, dst_port, msg,
count, mcast_hops)
def _get_receiver(self, local_port):
# Gets or creates a receiver (an `AsyncReceiver`) tied to given port
# number
if local_port in self._recvers:
receiver = self._recvers[local_port]
else:
receiver = AsyncReceiver(self, local_port)
self._recvers[local_port] = receiver
return receiver
def _remove_recver(self, recvr):
# Removes a receiver from weak dictionary - called when the receiver is
# done and its socket is closed
local_port = recvr.local_port
if local_port in self._recvers:
del self._recvers[local_port]
def prepare_rx(self, sender):
"""Prepare to receive messages from a sender (an `AsyncSender`)"""
receiver = self._get_receiver(sender.dst_port)
receiver._add_sender(sender.src_addr, sender.src_port, sender.msg,
sender.count)
return receiver
def prepare_listener(self, local_port, timeout=1):
"""Prepares a listener (an `AsyncReceiver`) listening on the given `local_port` for given `timeout` (sec)"""
receiver = self._get_receiver(local_port)
receiver._set_listen_timeout(timeout)
return receiver
@staticmethod
def perform_async_tx_rx(timeout=20):
"""Called to perform all previously prepared async rx/listen and tx operations"""
try:
start_time = time.time()
while asyncore.socket_map:
elapsed_time = time.time() - start_time
if elapsed_time > timeout:
print('Performing aysnc tx/tx took too long ({}>{} sec)'.
format(elapsed_time, timeout))
raise Node._NodeError(
'perform_tx_rx timed out ({}>{} sec)'.format(
elapsed_time, timeout))
# perform a single asyncore loop
asyncore.loop(timeout=0.5, count=1)
except BaseException:
print('Failed to perform async rx/tx')
raise
# -----------------------------------------------------------------------------------------------------------------------
# `AsyncSender` and `AsyncReceiver classes
_SO_BINDTODEVICE = 25
def _is_ipv6_addr_link_local(ip_addr):
"""Indicates if a given IPv6 address is link-local"""
return ip_addr.lower().startswith('fe80::')
def _create_socket_address(ip_address, port):
"""Convert a given IPv6 address (string) and port number into a socket address"""
# `socket.getaddrinfo()` returns a list of `(family, socktype, proto, canonname, sockaddr)` where `sockaddr`
# (at index 4) can be used as input in socket methods (like `sendto()`, `bind()`, etc.).
return socket.getaddrinfo(ip_address, port)[0][4]
class AsyncSender(asyncore.dispatcher):
""" An IPv6 async message sender - use `Node.prepare_tx()` to create one"""
def __init__(self,
node,
src_addr,
src_port,
dst_addr,
dst_port,
msg,
count,
mcast_hops=None):
self._node = node
self._src_addr = src_addr
self._src_port = src_port
self._dst_addr = dst_addr
self._dst_port = dst_port
self._msg = msg
self._count = count
self._dst_sock_addr = _create_socket_address(dst_addr, dst_port)
self._tx_buffer = self._msg
self._tx_counter = 0
# Create a socket, bind it to the node's interface
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, _SO_BINDTODEVICE,
node.interface_name + '\0')
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Set the IPV6_MULTICAST_HOPS
if mcast_hops is not None:
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS,
mcast_hops)
# Bind the socket to the given src address
if _is_ipv6_addr_link_local(src_addr):
# If src is a link local address it requires the interface name to
# be specified.
src_sock_addr = _create_socket_address(
src_addr + '%' + node.interface_name, src_port)
else:
src_sock_addr = _create_socket_address(src_addr, src_port)
sock.bind(src_sock_addr)
asyncore.dispatcher.__init__(self, sock)
# Property getters
@property
def node(self):
return self._node
@property
def src_addr(self):
return self._src_addr
@property
def src_port(self):
return self._src_port
@property
def dst_addr(self):
return self._dst_addr
@property
def dst_port(self):
return self._dst_port
@property
def msg(self):
return self._msg
@property
def count(self):
return self._count
@property
def was_successful(self):
"""Indicates if the transmission of IPv6 messages finished successfully"""
return self._tx_counter == self._count
# asyncore.dispatcher callbacks
def readable(self):
return False
def writable(self):
return True
def handle_write(self):
sent_len = self.sendto(self._tx_buffer, self._dst_sock_addr)
if self._node._verbose:
if sent_len < 30:
info_text = '{} bytes ("{}")'.format(sent_len,
self._tx_buffer[:sent_len])
else:
info_text = '{} bytes'.format(sent_len)
_log('- Node{} sent {} to [{}]:{} from [{}]:{}'.format(
self._node._index, info_text, self._dst_addr, self._dst_port,
self._src_addr, self._src_port))
self._tx_buffer = self._tx_buffer[sent_len:]
if len(self._tx_buffer) == 0:
self._tx_counter += 1
if self._tx_counter < self._count:
self._tx_buffer = self._msg
else:
self.handle_close()
def handle_close(self):
self.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class AsyncReceiver(asyncore.dispatcher):
""" An IPv6 async message receiver - use `prepare_rx()` to create one"""
_MAX_RECV_SIZE = 2048
class _SenderInfo(object):
def __init__(self, sender_addr, sender_port, msg, count):
self._sender_addr = sender_addr
self._sender_port = sender_port
self._msg = msg
self._count = count
self._rx_counter = 0
def _check_received(self, msg, sender_addr, sender_port):
if self._msg == msg and self._sender_addr == sender_addr and self._sender_port == sender_port:
self._rx_counter += 1
return self._did_recv_all()
def _did_recv_all(self):
return self._rx_counter >= self._count
def __init__(self, node, local_port):
self._node = node
self._local_port = local_port
self._senders = [] # list of `_SenderInfo` objects
# contains all received messages as a list of (pkt, (src_addr,
# src_port))
self._all_rx = []
self._timeout = 0 # listen timeout (zero means forever)
self._started = False
self._start_time = 0
# Create a socket, bind it to the node's interface
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, _SO_BINDTODEVICE,
node.interface_name + '\0')
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Bind the socket to any IPv6 address with the given local port
local_sock_addr = _create_socket_address('::', local_port)
sock.bind(local_sock_addr)
asyncore.dispatcher.__init__(self, sock)
def _add_sender(self, sender_addr, sender_port, msg, count):
self._senders.append(
AsyncReceiver._SenderInfo(sender_addr, sender_port, msg, count))
def _set_listen_timeout(self, timeout):
self._timeout = timeout
# Property getters
@property
def node(self):
return self._node
@property
def local_port(self):
return self._local_port
@property
def all_rx_msg(self):
"""returns all received messages as a list of (msg, (src_addr, src_port))"""
return self._all_rx
@property
def was_successful(self):
"""Indicates if all expected IPv6 messages were received successfully"""
return len(self._senders) == 0 or all(
[sender._did_recv_all() for sender in self._senders])
# asyncore.dispatcher callbacks
def readable(self):
if not self._started:
self._start_time = time.time()
self._started = True
if self._timeout != 0 and time.time(
) - self._start_time >= self._timeout:
self.handle_close()
if self._node._verbose:
_log(
'- Node{} finished listening on port {} for {} sec, received {} msg(s)'
.format(self._node._index, self._local_port, self._timeout,
len(self._all_rx)))
return False
return True
def writable(self):
return False
def handle_read(self):
(msg, src_sock_addr) = self.recvfrom(AsyncReceiver._MAX_RECV_SIZE)
src_addr = src_sock_addr[0]
src_port = src_sock_addr[1]
if (_is_ipv6_addr_link_local(src_addr)):
if '%' in src_addr:
# remove the interface name from address
src_addr = src_addr.split('%')[0]
if self._node._verbose:
if len(msg) < 30:
info_text = '{} bytes ("{}")'.format(len(msg), msg)
else:
info_text = '{} bytes'.format(len(msg))
_log('- Node{} received {} on port {} from [{}]:{}'.format(
self._node._index, info_text, self._local_port, src_addr,
src_port))
self._all_rx.append((msg, (src_addr, src_port)))
if all([
sender._check_received(msg, src_addr, src_port)
for sender in self._senders
]):
self.handle_close()
def handle_close(self):
self.close()
# remove the receiver from the node once the socket is closed
self._node._remove_recver(self)
# -----------------------------------------------------------------------------------------------------------------------
class VerifyError(Exception):
pass
_is_in_verify_within = False
def verify(condition):
"""Verifies that a `condition` is true, otherwise raises a VerifyError"""
global _is_in_verify_within
if not condition:
calling_frame = inspect.currentframe().f_back
error_message = 'verify() failed at line {} in "{}"'.format(
calling_frame.f_lineno, calling_frame.f_code.co_filename)
if not _is_in_verify_within:
print(error_message)
raise VerifyError(error_message)
def verify_within(condition_checker_func, wait_time, delay_time=0.1):
"""Verifies that a given function `condition_checker_func` passes successfully within a given wait timeout.
`wait_time` is maximum time waiting for condition_checker to pass (in seconds).
`delay_time` specifies a delay interval added between failed attempts (in seconds).
"""
global _is_in_verify_within
start_time = time.time()
old_is_in_verify_within = _is_in_verify_within
_is_in_verify_within = True
while True:
try:
condition_checker_func()
except VerifyError as e:
if time.time() - start_time > wait_time:
print('Took too long to pass the condition ({}>{} sec)'.format(
time.time() - start_time, wait_time))
print(e.message)
raise e
except BaseException:
raise
else:
break
if delay_time != 0:
time.sleep(delay_time)
_is_in_verify_within = old_is_in_verify_within
# -----------------------------------------------------------------------------------------------------------------------
# Parsing `wpanctl` output
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ScanResult(object):
""" This object encapsulates a scan result (active/discover/energy scan)"""
TYPE_ACTIVE_SCAN = 'active-scan'
TYPE_DISCOVERY_SCAN = 'discover-scan'
TYPE_ENERGY_SCAN = 'energy-scan'
def __init__(self, result_text):
items = [item.strip() for item in result_text.split('|')]
if len(items) == 8:
self._type = ScanResult.TYPE_ACTIVE_SCAN
self._index = items[0]
self._joinable = (items[1] == 'YES')
self._network_name = items[2][1:-1]
self._panid = items[3]
self._channel = items[4]
self._xpanid = items[5]
self._ext_address = items[6]
self._rssi = items[7]
elif len(items) == 7:
self._type = ScanResult.TYPE_DISCOVERY_SCAN
self._index = items[0]
self._network_name = items[1][1:-1]
self._panid = items[2]
self._channel = items[3]
self._xpanid = items[4]
self._ext_address = items[5]
self._rssi = items[6]
elif len(items) == 2:
self._type = ScanResult.TYPE_ENERGY_SCAN
self._channel = items[0]
self._rssi = items[1]
else:
raise ValueError(
'"{}" does not seem to be a valid scan result string'.
result_text)
@property
def type(self):
return self._type
@property
def joinable(self):
return self._joinable
@property
def network_name(self):
return self._network_name
@property
def panid(self):
return self._panid
@property
def channel(self):
return self._channel
@property
def xpanid(self):
return self._xpanid
@property
def ext_address(self):
return self._ext_address
@property
def rssi(self):
return self._rssi
def __repr__(self):
return 'ScanResult({})'.format(self.__dict__)
def parse_scan_result(scan_result):
""" Parses scan result string and returns an array of `ScanResult` objects"""
return [ScanResult(item) for item in scan_result.split('\n')[2:]
] # skip first two lines which are table headers
def parse_list(list_string):
"""
Parses IPv6/prefix/route list string (output of wpanctl get for properties WPAN_IP6_ALL_ADDRESSES,
IP6_MULTICAST_ADDRESSES, WPAN_THREAD_ON_MESH_PREFIXES, ...)
Returns an array of strings each containing an IPv6/prefix/route entry.
"""
# List string example (get(WPAN_IP6_ALL_ADDRESSES) output):
#
# '[\n
# \t"fdf4:5632:4940:0:8798:8701:85d4:e2be prefix_len:64 origin:ncp valid:forever preferred:forever"\n
# \t"fe80::2092:9358:97ea:71c6 prefix_len:64 origin:ncp valid:forever preferred:forever"\n
# ]'
#
# We split the lines ('\n' as separator) and skip the first and last lines which are '[' and ']'.
# For each line, skip the first two characters (which are '\t"') and last character ('"'), then split the string
# using whitespace as separator. The first entry is the IPv6 address.
#
return [line[2:-1].split()[0] for line in list_string.split('\n')[1:-1]]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class OnMeshPrefix(object):
""" This object encapsulates an on-mesh prefix"""
def __init__(self, text):
# Example of expected text:
#
# '\t"fd00:abba:cafe:: prefix_len:64 origin:user stable:yes flags:0x31'
# ' [on-mesh:1 def-route:0 config:0 dhcp:0 slaac:1 pref:1 prio:med] rloc:0x0000"'
m = re.match(
r'\t"([0-9a-fA-F:]+)\s*prefix_len:(\d+)\s+origin:(\w*)\s+stable:(\w*).* \['
+
r'on-mesh:(\d)\s+def-route:(\d)\s+config:(\d)\s+dhcp:(\d)\s+slaac:(\d)\s+pref:(\d)\s+prio:(\w*)\]'
+ r'\s+rloc:(0x[0-9a-fA-F]+)', text)
verify(m is not None)
data = m.groups()
self._prefix = data[0]
self._prefix_len = data[1]
self._origin = data[2]
self._stable = (data[3] == 'yes')
self._on_mesh = (data[4] == '1')
self._def_route = (data[5] == '1')
self._config = (data[6] == '1')
self._dhcp = (data[7] == '1')
self._slaac = (data[8] == '1')
self._preferred = (data[9] == '1')
self._priority = (data[10])
self._rloc16 = (data[11])
@property
def prefix(self):
return self._prefix
@property
def prefix_len(self):
return self._prefix_len
@property
def origin(self):
return self._origin
@property
def priority(self):
return self._priority
def is_stable(self):
return self._stable
def is_on_mesh(self):
return self._on_mesh
def is_def_route(self):
return self._def_route
def is_config(self):
return self._config
def is_dhcp(self):
return self._dhcp
def is_slaac(self):
return self._slaac
def is_preferred(self):
return self._preferred
def rloc16(self):
return self._rloc16
def __repr__(self):
return 'OnMeshPrefix({})'.format(self.__dict__)
def parse_on_mesh_prefix_result(on_mesh_prefix_list):
""" Parses on-mesh prefix list string and returns an array of `OnMeshPrefix` objects"""
return [
OnMeshPrefix(item) for item in on_mesh_prefix_list.split('\n')[1:-1]
]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ChildEntry(object):
""" This object encapsulates a child entry"""
def __init__(self, text):
# Example of expected text:
#
# `\t"E24C5F67F4B8CBB9, RLOC16:d402, NetDataVer:175, LQIn:3, AveRssi:-20, LastRssi:-20, Timeout:120, Age:0, `
# `RxOnIdle:no, FTD:no, SecDataReq:yes, FullNetData:yes"`
#
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [
item[:-1] if item[-1] == ',' else item
for item in text[2:-1].split()
]
# First item in the extended address
self._ext_address = items[0]
# Convert the rest into a dictionary by splitting using ':' as
# separator
dict = {item.split(':')[0]: item.split(':')[1] for item in items[1:]}
self._rloc16 = dict['RLOC16']
self._timeout = dict['Timeout']
self._rx_on_idle = (dict['RxOnIdle'] == 'yes')
self._ftd = (dict['FTD'] == 'yes')
self._sec_data_req = (dict['SecDataReq'] == 'yes')
self._full_net_data = (dict['FullNetData'] == 'yes')
@property
def ext_address(self):
return self._ext_address
@property
def rloc16(self):
return self._rloc16
@property
def timeout(self):
return self._timeout
def is_rx_on_when_idle(self):
return self._rx_on_idle
def is_ftd(self):
return self._ftd
def is_sec_data_req(self):
return self._sec_data_req
def is_full_net_data(self):
return self._full_net_data
def __repr__(self):
return 'ChildEntry({})'.format(self.__dict__)
def parse_child_table_result(child_table_list):
""" Parses child table list string and returns an array of `ChildEntry` objects"""
return [ChildEntry(item) for item in child_table_list.split('\n')[1:-1]]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class NeighborEntry(object):
""" This object encapsulates a neighbor entry"""
def __init__(self, text):
# Example of expected text:
#
# `\t"5AC95ED4646D6565, RLOC16:9403, LQIn:3, AveRssi:-20, LastRssi:-20, Age:0, LinkFC:8, MleFC:0, IsChild:yes,'
# 'RxOnIdle:no, FTD:no, SecDataReq:yes, FullNetData:yes"'
#
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [
item[:-1] if item[-1] == ',' else item
for item in text[2:-1].split()
]
# First item in the extended address
self._ext_address = items[0]
# Convert the rest into a dictionary by splitting the text using ':' as
# separator
dict = {item.split(':')[0]: item.split(':')[1] for item in items[1:]}
self._rloc16 = dict['RLOC16']
self._is_child = (dict['IsChild'] == 'yes')
self._rx_on_idle = (dict['RxOnIdle'] == 'yes')
self._ftd = (dict['FTD'] == 'yes')
@property
def ext_address(self):
return self._ext_address
@property
def rloc16(self):
return self._rloc16
def is_rx_on_when_idle(self):
return self._rx_on_idle
def is_ftd(self):
return self._ftd
def is_child(self):
return self._is_child
def __repr__(self):
return 'NeighborEntry({})'.format(self.__dict__)
def parse_neighbor_table_result(neighbor_table_list):
""" Parses neighbor table list string and returns an array of `NeighborEntry` objects"""
return [
NeighborEntry(item) for item in neighbor_table_list.split('\n')[1:-1]
]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class RouterTableEntry(object):
""" This object encapsulates a router table entry"""
def __init__(self, text):
# Example of expected text:
#
# `\t"8A970B3251810826, RLOC16:4000, RouterId:16, NextHop:43, PathCost:1, LQIn:3, LQOut:3, Age:3, LinkEst:yes"`
#
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [
item[:-1] if item[-1] == ',' else item
for item in text[2:-1].split()
]
# First item in the extended address
self._ext_address = items[0]
# Convert the rest into a dictionary by splitting the text using ':' as
# separator
dict = {item.split(':')[0]: item.split(':')[1] for item in items[1:]}
self._rloc16 = int(dict['RLOC16'], 16)
self._router_id = int(dict['RouterId'], 0)
self._next_hop = int(dict['NextHop'], 0)
self._path_cost = int(dict['PathCost'], 0)
self._age = int(dict['Age'], 0)
self._le = (dict['LinkEst'] == 'yes')
@property
def ext_address(self):
return self._ext_address
@property
def rloc16(self):
return self._rloc16
@property
def router_id(self):
return self._router_id
@property
def next_hop(self):
return self._next_hop
@property
def path_cost(self):
return self._path_cost
def is_link_established(self):
return self._le
def __repr__(self):
return 'RouterTableEntry({})'.format(self.__dict__)
def parse_router_table_result(router_table_list):
""" Parses router table list string and returns an array of `RouterTableEntry` objects"""
return [
RouterTableEntry(item) for item in router_table_list.split('\n')[1:-1]
]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class AddressCacheEntry(object):
""" This object encapsulates an address cache entry"""
def __init__(self, text):
# Example of expected text:
#
# '\t"fd00:1234::100:8 -> 0xfffe, Age:1, State:query, CanEvict:no, Timeout:3, RetryDelay:15"`
# '\t"fd00:1234::3:2 -> 0x2000, Age:0, State:cached, LastTrans:0, ML-EID:fd40:ea58:a88c:0:b7ab:4919:aa7b:11a3"`
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [
item[:-1] if item[-1] == ',' else item
for item in text[2:-1].split()
]
# First item in the extended address
self._address = items[0]
self._rloc16 = int(items[2], 16)
# Convert the rest into a dictionary by splitting the text using ':' as
# separator
dict = {item.split(':')[0]: item.split(':')[1] for item in items[3:]}
self._age = int(dict['Age'], 0)
self._state = dict['State']
if self._state == ADDRESS_CACHE_ENTRY_STATE_CACHED:
self._last_trans = int(dict.get("LastTrans", "-1"), 0)
else:
self._can_evict = (dict['CanEvict'] == 'yes')
self._timeout = int(dict['Timeout'])
self._retry_delay = int(dict['RetryDelay'])
@property
def address(self):
return self._address
@property
def rloc16(self):
return self._rloc16
@property
def age(self):
return self._age
@property
def state(self):
return self._state
def can_evict(self):
return self._can_evict
@property
def timeout(self):
return self._timeout
@property
def retry_delay(self):
return self._retry_delay
@property
def last_trans(self):
return self._last_trans
def __repr__(self):
return 'AddressCacheEntry({})'.format(self.__dict__)
def parse_address_cache_table_result(addr_cache_table_list):
""" Parses address cache table list string and returns an array of `AddressCacheEntry` objects"""
return [
AddressCacheEntry(item)
for item in addr_cache_table_list.split('\n')[1:-1]
]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class InterfaceRoute(object):
""" This object encapsulates an interface route entry"""
def __init__(self, text):
# Example of expected text:
#
# '\t"fd00:abba::/64 metric:256 "'
#
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [
item[:-1] if item[-1] == ',' else item
for item in text[2:-1].split()
]
# First item in the extended address
self._route_prefix = items[0].split('/')[0]
self._prefix_len = int(items[0].split('/')[1], 0)
self._metric = int(items[1].split(':')[1], 0)
@property
def route_prefix(self):
return self._route_prefix
@property
def prefix_len(self):
return self._prefix_len
@property
def metric(self):
return self._metric
def __repr__(self):
return 'InterfaceRoute({})'.format(self.__dict__)
def parse_interface_routes_result(interface_routes_list):
""" Parses interface routes list string and returns an array of `InterfaceRoute` objects"""
return [
InterfaceRoute(item) for item in interface_routes_list.split('\n')[1:-1]
]
| bsd-3-clause | -7,788,658,360,895,456,000 | 36.195298 | 121 | 0.568218 | false |
filipr/hermes | doc/conf.py | 2 | 11201 | # -*- coding: utf-8 -*-
#
# Hermes documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.join("..", "python")))
sys.path.append(os.path.abspath('exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'math_dollar', 'youtube', 'popup', 'sourcecode', 'latexcode']
latex_preamble = r"""
\usepackage{dsfont}
\usepackage{braket}
\usepackage{slashed}
\usepackage{etoolbox}
\pagestyle{fancy}
\usepackage{color}
\usepackage{float}
\usepackage{bm}
\let\origfigure=\figure
\renewenvironment{figure}[6]{
\origfigure[H]}
{\endlist}
\def\degrees{^\circ}
\def\d{{\rm d}}
\pagenumbering{arabic}
\def\L{{\mathcal L}}
\def\H{{\mathcal H}}
\def\M{{\mathcal M}}
\def\matrix{}
\def\fslash#1{#1 \!\!\!/}
\def\F{{\bf F}}
\def\R{{\bf R}}
\def\J{{\bf J}}
\def\x{{\bf x}}
\def\y{{\bf y}}
\def\h{{\rm h}}
\def\a{{\rm a}}
\newcommand{\bfx}{\mbox{\boldmath $x$}}
\newcommand{\bfy}{\mbox{\boldmath $y$}}
\newcommand{\bfz}{\mbox{\boldmath $z$}}
\newcommand{\bfv}{\mbox{\boldmath $v$}}
\newcommand{\bfu}{\mbox{\boldmath $u$}}
\newcommand{\bfF}{\mbox{\boldmath $F$}}
\newcommand{\bfJ}{\mbox{\boldmath $J$}}
\newcommand{\bfU}{\mbox{\boldmath $U$}}
\newcommand{\bfY}{\mbox{\boldmath $Y$}}
\newcommand{\bfR}{\mbox{\boldmath $R$}}
\newcommand{\bfg}{\mbox{\boldmath $g$}}
\newcommand{\bfc}{\mbox{\boldmath $c$}}
\newcommand{\bfxi}{\mbox{\boldmath $\xi$}}
\newcommand{\bfw}{\mbox{\boldmath $w$}}
\newcommand{\bfE}{\mbox{\boldmath $E$}}
\newcommand{\bfS}{\mbox{\boldmath $S$}}
\newcommand{\bfb}{\mbox{\boldmath $b$}}
\newcommand{\bfH}{\mbox{\boldmath $H$}}
\def\Hcurl{{\bfH({\rm curl})}}
\def\Hdiv{{\bfH({\rm div})}}
\newcommand{\dd}[2]{\frac{\partial #1}{\partial #2}}
\newcommand{\dx}{\;\mbox{d}\bfx}
%\def\back{\!\!\!\!\!\!\!\!\!\!}
\def\PY{}
\def\PYZcb{}
\def\PYZob{}
\def\PYZus{}
\def\PYZbs{}
\def\PYZpc{}
\def\PYZti{}
\def\PYZsh{}
\def\PYZhy{-}
\def\back{}
\def\col#1#2{\left(\matrix{#1#2}\right)}
\def\row#1#2{\left(\matrix{#1#2}\right)}
\def\mat#1{\begin{pmatrix}#1\end{pmatrix}}
\def\matd#1#2{\left(\matrix{#1\back0\cr0\back#2}\right)}
\def\p#1#2{{\partial#1\over\partial#2}}
\def\cg#1#2#3#4#5#6{({#1},\,{#2},\,{#3},\,{#4}\,|\,{#5},\,{#6})}
\def\half{{\textstyle{1\over2}}}
\def\jsym#1#2#3#4#5#6{\left\{\matrix{
{#1}{#2}{#3}
{#4}{#5}{#6}
}\right\}}
\def\diag{\hbox{diag}}
\font\dsrom=dsrom10
\def\one{\hbox{\dsrom 1}}
\def\res{\mathop{\mathrm{Res}}}
\def\mathnot#1{\text{"$#1$"}}
%See Character Table for cmmib10:
%http://www.math.union.edu/~dpvc/jsmath/download/extra-fonts/cmmib10/cmmib10.html
\font\mib=cmmib10
\def\balpha{\hbox{\mib\char"0B}}
\def\bbeta{\hbox{\mib\char"0C}}
\def\bgamma{\hbox{\mib\char"0D}}
\def\bdelta{\hbox{\mib\char"0E}}
\def\bepsilon{\hbox{\mib\char"0F}}
\def\bzeta{\hbox{\mib\char"10}}
\def\boldeta{\hbox{\mib\char"11}}
\def\btheta{\hbox{\mib\char"12}}
\def\biota{\hbox{\mib\char"13}}
\def\bkappa{\hbox{\mib\char"14}}
\def\blambda{\hbox{\mib\char"15}}
\def\bmu{\hbox{\mib\char"16}}
\def\bnu{\hbox{\mib\char"17}}
\def\bxi{\hbox{\mib\char"18}}
\def\bpi{\hbox{\mib\char"19}}
\def\brho{\hbox{\mib\char"1A}}
\def\bsigma{\hbox{\mib\char"1B}}
\def\btau{\hbox{\mib\char"1C}}
\def\bupsilon{\hbox{\mib\char"1D}}
\def\bphi{\hbox{\mib\char"1E}}
\def\bchi{\hbox{\mib\char"1F}}
\def\bpsi{\hbox{\mib\char"20}}
\def\bomega{\hbox{\mib\char"21}}
\def\bvarepsilon{\hbox{\mib\char"22}}
\def\bvartheta{\hbox{\mib\char"23}}
\def\bvarpi{\hbox{\mib\char"24}}
\def\bvarrho{\hbox{\mib\char"25}}
\def\bvarphi{\hbox{\mib\char"27}}
%how to use:
%$$\alpha\balpha$$
%$$\beta\bbeta$$
%$$\gamma\bgamma$$
%$$\delta\bdelta$$
%$$\epsilon\bepsilon$$
%$$\zeta\bzeta$$
%$$\eta\boldeta$$
%$$\theta\btheta$$
%$$\iota\biota$$
%$$\kappa\bkappa$$
%$$\lambda\blambda$$
%$$\mu\bmu$$
%$$\nu\bnu$$
%$$\xi\bxi$$
%$$\pi\bpi$$
%$$\rho\brho$$
%$$\sigma\bsigma$$
%$$\tau\btau$$
%$$\upsilon\bupsilon$$
%$$\phi\bphi$$
%$$\chi\bchi$$
%$$\psi\bpsi$$
%$$\omega\bomega$$
%
%$$\varepsilon\bvarepsilon$$
%$$\vartheta\bvartheta$$
%$$\varpi\bvarpi$$
%$$\varrho\bvarrho$$
%$$\varphi\bvarphi$$
%small font
\font\mibsmall=cmmib7
\def\bsigmasmall{\hbox{\mibsmall\char"1B}}
\def\Tr{\hbox{Tr}\,}
\def\Arg{\hbox{Arg}}
\def\atan{\hbox{atan}}
"""
pngmath_latex_preamble = latex_preamble
latex_elements = {"preamble": latex_preamble}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Hermes'
copyright = u'2009-2013, hp-FEM group'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.1'
# The full version, including alpha/beta/rc tags.
release = '3.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
highlight_language = 'c++'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'bodyfont': 'verdana, helvetica, arial, sans-serif',
'bgcolor': '#FFFFFF',
'textcolor': '#000000',
'linkcolor': '#3D5C7A',
'rightsidebar': False,
'sidebarbgcolor': '#F8F8F8',
'sidebartextcolor': '#000000',
'sidebarlinkcolor': '#3D5C7A',
'headfont': '"trebuchet ms", verdana, helvetica, arial, sans-serif',
'headbgcolor': '#FFFFFF',
'headtextcolor': '#7590AE',
'headlinkcolor': '#3D5C7A',
'codebgcolor': '#F5F5F5',
'codetextcolor': '#000000',
'relbarbgcolor': '#1553ef',
'relbartextcolor': '#000000',
'relbarlinkcolor': '#FFFFFF',
'footerbgcolor': '#FFFFFF',
'footertextcolor': '#000000'
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Hermes Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Content'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'HermesDoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Hermes.tex', u'Hermes Documentation',
u'hp-FEM group', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True | gpl-3.0 | -8,940,958,333,252,457,000 | 28.171875 | 119 | 0.67985 | false |
mjpatter88/mjpython | test/unit/test_virtual_machine.py | 1 | 10066 | from virtual_machine import VirtualMachine, BIN_OPS, VirtualMachineError
from frame import Frame
from block import Block
from unittest.mock import MagicMock, patch
import pytest
class TestVirtualMachine:
def setup_method(self):
self.vm = VirtualMachine()
self.frame = MagicMock()
self.frame.blocks = []
def test_init__frames_empty_list(self):
assert self.vm.frames == []
def test_init__no_current_frame(self):
assert self.vm.current_frame == None
def test_init__no_return_value(self):
assert self.vm.return_value == None
def test_push_frame__adds_frame_to_frame_stack(self):
self.vm.push_frame(self.frame)
assert self.vm.frames[0] == self.frame
def test_push_frame__sets_current_frame(self):
self.vm.push_frame(self.frame)
assert self.vm.frames[0] == self.vm.current_frame
def test_run_code__creates_a_new_frame(self):
code = compile("None", "<string", 'eval')
self.vm.run_code(code)
assert len(self.vm.frames) == 1
def test_run_code__sets_current_frame_to_the_new_frame(self):
code = compile("None", "<string", 'eval')
self.vm.run_code(code)
assert self.vm.frames[0] == self.vm.current_frame
def test_run_code__assigns_given_code_to_the_new_frame(self):
code = compile("None", "<string>", "eval")
self.vm.run_code(code)
assert self.vm.frames[0].code == code
def test_run_code__assigns_main_as_new_frames_name(self):
code = compile("None", "<string>", "eval")
self.vm.run_code(code)
assert self.vm.frames[0].locals["__name__"] == "__main__"
@patch('virtual_machine.Frame')
def test_run_code__returns_the_result_of_execution(self, frame):
code = MagicMock()
f = MagicMock()
f.get_next_instr.return_value = ("RETURN_VALUE", 0)
f.stack = [10]
frame.return_value = f
assert self.vm.run_code(code) == 10
def test_run_frame__stops_execution_at_return(self):
self.frame.get_next_instr.side_effect = [("LOAD_CONST", 10), ("RETURN_VALUE", 0),
("LOAD_CONST", 15), ("RETURN_VALUE", 0)]
self.frame.stack = []
self.vm.push_frame(self.frame)
self.vm.run_frame(self.frame)
assert self.vm.return_value == 10
def test_run_frame__raises_unsupported_instr_ex_when_instr_not_recognized(self):
self.frame.get_next_instr.return_value = ("FAKE_INSTR", 0)
self.frame.stack = []
self.vm.push_frame(self.frame)
with pytest.raises(VirtualMachineError):
self.vm.run_frame(self.frame)
def test_get_func__returns_instr_function(self):
instr = "LOAD_CONST"
arg = 0
assert self.vm.get_func_and_arg(instr, arg) == (self.vm.instr_LOAD_CONST, arg)
def test_get_func__returns_binary_function_with_op_arg(self):
instr = "BINARY_ADD"
arg = 0
assert self.vm.get_func_and_arg(instr, arg) == (self.vm.binary_operation, BIN_OPS["ADD"])
def test_get_func__returns_binary_function_with_op_arg_when_inplace(self):
instr = "INPLACE_ADD"
arg = 0
assert self.vm.get_func_and_arg(instr, arg) == (self.vm.binary_operation, BIN_OPS["ADD"])
def test_get_func__returns_binary_function_with_op_arg_when_inplace_two_words(self):
instr = "INPLACE_FLOOR_DIVIDE"
arg = 0
assert self.vm.get_func_and_arg(instr, arg) == (self.vm.binary_operation, BIN_OPS["FLOOR_DIVIDE"])
def test_instr_LOAD_CONST__adds_arg_to_current_frames_stack(self):
arg = 5
self.frame.stack = []
self.vm.push_frame(self.frame)
self.vm.instr_LOAD_CONST(arg)
assert self.frame.stack[0] == arg
def test_instr_LOAD_GLOBAL__loads_from_builtins_to_current_frames_stack(self):
arg = 'foo'
self.frame.stack = []
self.frame.built_ins = {arg: 12}
self.vm.push_frame(self.frame)
self.vm.instr_LOAD_GLOBAL(arg)
assert self.frame.stack == [12]
def test_instr_LOAD_GLOBAL__raises_exception_if_name_not_found(self):
arg = 'foo'
self.frame.stack = []
self.frame.built_ins = {}
self.vm.push_frame(self.frame)
with pytest.raises(VirtualMachineError):
self.vm.instr_LOAD_GLOBAL(arg)
def test_instr_LOAD_NAME__loads_from_builtins_to_current_frames_stack(self):
arg = 'foo'
self.frame.stack = []
self.frame.built_ins = {arg: 12}
self.vm.push_frame(self.frame)
self.vm.instr_LOAD_NAME(arg)
assert self.frame.stack == [12]
def test_instr_LOAD_NAME__loads_from_locals_to_current_frames_stack(self):
arg = 'foo'
self.frame.stack = []
self.frame.locals = {arg: 12}
self.vm.push_frame(self.frame)
self.vm.instr_LOAD_NAME(arg)
assert self.frame.stack == [12]
def test_instr_LOAD_NAME__raises_exception_if_name_not_found(self):
arg = 'foo'
self.frame.stack = []
self.frame.built_ins = {}
self.vm.push_frame(self.frame)
with pytest.raises(VirtualMachineError):
self.vm.instr_LOAD_NAME(arg)
def test_instr_LOAD_ATTR__sets_TOS_to_attr_from_TOS(self):
arg = 'foo'
val = 10
tos = MagicMock()
setattr(tos, arg, val)
self.frame.stack = [tos]
self.vm.push_frame(self.frame)
self.vm.instr_LOAD_ATTR(arg)
assert self.frame.stack == [val]
def test_instr_STORE_FAST__removes_top_off_current_frames_stack(self):
self.frame.stack = [7]
self.vm.push_frame(self.frame)
self.vm.instr_STORE_FAST(5)
assert len(self.frame.stack) == 0
def test_instr_STORE_FAST__adds_arg_and_top_of_current_frames_stack_to_current_frames_locals(self):
arg = "foo"
self.frame.stack = [7]
self.frame.locals = {}
self.vm.push_frame(self.frame)
self.vm.instr_STORE_FAST(arg)
assert self.frame.locals == {arg: 7}
def test_instr_STORE_NAME__removes_top_off_current_frames_stack(self):
self.frame.stack = [7]
self.vm.push_frame(self.frame)
self.vm.instr_STORE_NAME(5)
assert len(self.frame.stack) == 0
def test_instr_STORE_NAME__adds_arg_and_top_of_current_frames_stack_to_current_frames_locals(self):
arg = "foo"
self.frame.stack = [7]
self.frame.locals = {}
self.vm.push_frame(self.frame)
self.vm.instr_STORE_NAME(arg)
assert self.frame.locals == {arg: 7}
def test_instr_LOAD_FAST__loads_current_frames_local_val_to_current_frames_stack(self):
arg = "foo"
self.frame.stack = []
self.frame.locals = {arg: 7}
self.vm.push_frame(self.frame)
self.vm.instr_LOAD_FAST(arg)
assert self.frame.stack == [7]
def test_instr_POP_JUMP_IF_FALSE__sets_current_instruction_to_arg_when_false(self):
arg = 1000
self.frame.stack =[False]
self.vm.push_frame(self.frame)
self.vm.instr_POP_JUMP_IF_FALSE(arg)
assert self.frame.instr_pointer == 1000
def test_instr_POP_JUMP_IF_FALSE__does_not_set_current_instruction_to_arg_when_true(self):
arg = 1000
self.frame.instr_pointer = 0
self.frame.stack =[True]
self.vm.push_frame(self.frame)
self.vm.instr_POP_JUMP_IF_FALSE(arg)
assert self.frame.instr_pointer == 0
def test_instr_JUMP_ABSOLUTE__sets_current_instruction_to_arg(self):
arg = 1000
self.vm.push_frame(self.frame)
self.vm.instr_JUMP_ABSOLUTE(arg)
assert self.frame.instr_pointer == arg
def test_instr_RETURN_VALUE__sets_return_to_top_of_current_frames_stack(self):
ret = 12
self.frame.stack = [ret]
self.vm.push_frame(self.frame)
self.vm.instr_RETURN_VALUE(0)
assert self.vm.return_value == ret
def test_instr_SETUP_LOOP__appends_new_block_to_current_frame(self):
arg = 1000
current_instr_pointer = 8
self.vm.push_frame(self.frame)
self.frame.instr_pointer = current_instr_pointer
self.vm.instr_SETUP_LOOP(arg)
assert len(self.vm.current_frame.blocks) == 1
def test_instr_SETUP_LOOP__sets_new_block_start_to_current_instr(self):
arg = 1000
current_instr_pointer = 8
self.vm.push_frame(self.frame)
self.frame.instr_pointer = current_instr_pointer
self.vm.instr_SETUP_LOOP(arg)
assert self.frame.blocks[0].start == 8
def test_instr_SETUP_LOOP__sets_new_block_end_to_arg_plus_current_instr(self):
arg = 1000
current_instr_pointer = 8
self.vm.push_frame(self.frame)
self.frame.instr_pointer = current_instr_pointer
self.vm.instr_SETUP_LOOP(arg)
assert self.frame.blocks[0].end == 1008
def test_instr_POP_BLOCK__pops_block_off_of_current_frame(self):
self.frame.blocks.append(Block(1,2))
self.vm.push_frame(self.frame)
self.vm.instr_POP_BLOCK(0)
assert len(self.frame.blocks) == 0
def test_instr_BREAK_LOOP__sets_current_instruction_to_end_of_block(self):
end = 1000
self.frame.blocks.append(Block(1,end))
self.vm.push_frame(self.frame)
self.vm.instr_BREAK_LOOP(0)
assert self.frame.instr_pointer == end
def test_instr_RETURN_VALUE__returns_return_control_code(self):
ret = 12
self.frame.stack = [ret]
self.vm.push_frame(self.frame)
assert self.vm.instr_RETURN_VALUE(0) == "RETURN"
def test_instr_POP_TOP__removes_the_current_frames_top_of_stack(self):
self.frame.stack = ["foo"]
self.vm.push_frame(self.frame)
self.vm.instr_POP_TOP(0)
assert self.frame.stack == []
def test_instr_LOAD_BUILD_CLASS(self):
self.frame.stack = []
self.vm.push_frame(self.frame)
self.vm.instr_LOAD_BUILD_CLASS(0)
assert self.frame.stack == [__builtins__['__build_class__']]
| mit | -6,974,837,825,826,239,000 | 36.281481 | 106 | 0.614147 | false |
realspencerdupre/PoS_Sourcecoin | contrib/linearize/linearize-hashes.py | 1 | 2763 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def get_block_hashes(settings):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
for height in xrange(settings['min_height'], settings['max_height']+1):
hash = rpc.getblockhash(height)
print(hash)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-hashes.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = CM_RPC
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 319000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| mit | 8,287,842,049,013,357,000 | 25.066038 | 78 | 0.6616 | false |
xsteadfastx/bib-api | app/mod_api/views.py | 1 | 7896 | import arrow
from flask import current_app, jsonify, request, Response, g
from itsdangerous import URLSafeSerializer
from app.mod_api import mod_api, schemes
from app.mod_api.decorators import valid_facility, valid_token
from app.mod_api.errors import InvalidUsage
from app.mod_api.ical import build_ical
@mod_api.route('/facilities', methods=['GET'])
def facility_list():
"""List all available facilities.
Request::
http GET "localhost:5000/api/facilities"
Response::
{
"facilities": {
"wolfsburg": {
"city": "Wolfsburg",
"name": "Stadtbibliothek Wolfsburg",
"url": "http://webopac.stadt.wolfsburg.de"
}
}
}
"""
facilities = {}
for i, j in current_app.facilities.items():
facilities[i] = j['metadata']
return jsonify(facilities=facilities)
@mod_api.route('/<facility>/search', methods=['POST'])
@valid_facility
def search(facility):
"""Search library for items.
It takes a JSON-object in a POST request. You also can put a
request argument with the name "page" to the url for pagnation.
Most times the library search forms return more items then they fit on a
single page. So they need some kind of pagination. If the page argument
is given, it will search for the page number and browse to that page
before parsing the result. If not given, it will use the page number "1".
Here is a example:
Request::
http POST "localhost:5000/api/wolfsburg/search?page=4" term="batman"
Response::
{
"next_page": 5,
"results": [
{
"annotation": "Der Schurke Two-Face wurde durch...",
"author": "geschrieben von Matthew K. Manning.",
"copies": [
{
"available": false,
"branch": "01:Kinderbibl. Zentr",
"due_date": "2016-02-05",
"id": "M1400963",
"position": "4.1 Mann",
"type": "Kinder- und Jugendliteratur"
}
],
"cover": "http://foo.bar/images/P/3555829.03.MZZZZZZZ.jpg",
"isbn": "978-3-596-85582-7",
"title": "Batman - Ein finsterer Plan",
"year": "2013-01-01"
}
]
}
:param facility: The facility to search in.
:type facility: str
"""
# get term and validate it
json_data, errors = schemes.SearchRequest().load(request.get_json())
if errors:
raise InvalidUsage(errors)
# parse request args for page
if request.args.get('page'):
if not request.args.get('page').isdigit():
raise InvalidUsage('page type not integer')
page = int(request.args.get('page'))
else:
page = 1
# perform search and marshmallow it
results = current_app.facilities[facility]['search'](json_data['term'],
page)
data = schemes.SearchResponse().dump(results)
return jsonify(data.data)
@mod_api.route('/<facility>/token', methods=['POST'])
@valid_facility
def get_token(facility):
"""Creates a authentication token.
This endpoint returns a authentication token for a specific facility.
Request::
http POST localhost:5000/api/wolfsburg/token username=foo password=bar
Response::
{
"token": "eyJwYXNzd29yZCI6IjoiZm9vIn0.DmRMyew4ukCAZHsnIrs4PaY8"
}
:param facility: The facility to get a token for.
:type facility: str
"""
post_data = request.get_json()
# if there is no data raise an error
if not post_data:
raise InvalidUsage('no data')
# get authentication data and validate it
json_data, errors = schemes.TokenRequest().load(post_data)
if errors:
raise InvalidUsage(errors)
# create serializer
s = URLSafeSerializer(current_app.config['SECRET_KEY'], salt=facility)
# create token
token = s.dumps(json_data)
# scheme it
data = schemes.TokenResponse().dump({'token': token})
return jsonify(data.data)
@mod_api.route('/<facility>/lent', methods=['GET'])
@valid_facility
@valid_token
def lent_list(facility):
"""Returns a list of lent items and the saldo of the account.
This view returns all lent items in a list with the title and author
plus the date until the item needs to get returned. It also tries to get
the saldo of the account.
Request::
http GET localhost:5000/api/wolfsburg/lent?token=pIUBfh1BSvoROF8wgHse
Response::
{
'saldo': '-36,00',
'items': [
{
'due_date': '2016-04-15', 'author': 'Dürer, Albrecht',
'title': 'Albrecht Dürer'
}, {
'due_date': '2016-04-15', 'author': 'Hopkins, John',
'title': 'Modezeichnen'
}, {
'due_date': '2016-04-15', 'author': 'Hopper, Edward',
'title': 'Edward Hopper'
}
]
}
:param facility: The facility to get a lent list from.
:type facility: str
"""
s = URLSafeSerializer(current_app.config['SECRET_KEY'], salt=facility)
token = request.args['token']
userdata = s.loads(token)
lent_list = current_app.facilities[facility]['lent_list'](
userdata['username'], userdata['password'])
data = schemes.LentListResponse().dump(lent_list)
return jsonify(data.data)
@mod_api.route('/<facility>/ical/lent.ics', methods=['GET'])
@valid_facility
@valid_token
def lent_ical(facility):
"""Returns a calendar for all lent items in the ical format.
The calendar file includes all return dates for all lent items. It can be
used for importing them into other calendar software like the
Google calendar or Thunderbird Lightning.
Request::
http GET localhost:5000/api/wolfsburg/ical/lent.ics?token=pIUBfh1se
Response::
BEGIN:VCALENDAR
PRODID:ics.py - http://git.io/lLljaA
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20160609T101434Z
DTSTART:20160415T000000Z
SUMMARY:Bibliotheksrueckgaben: 2
DESCRIPTION:Dürer\, Albrecht: Albrecht Dürer\\nHopper\, Edward: Edward
UID:[email protected]
END:VEVENT
BEGIN:VEVENT
DTSTAMP:20160609T101434Z
DTSTART:20160420T000000Z
SUMMARY:Bibliotheksrueckgaben: 1
DESCRIPTION:Hopkins\, John: Modezeichnen
UID:[email protected]
END:VEVENT
END:VCALENDAR
:param facility: The facility to get a lent list from.
:type facility: str
"""
s = URLSafeSerializer(current_app.config['SECRET_KEY'], salt=facility)
token = request.args['token']
# check if token already in redis
redis_entry = g.redis.hgetall(token)
if redis_entry:
two_hours_ago = arrow.utcnow().replace(hours=-2)
updated = arrow.get(redis_entry[b'updated'].decode('utf-8'))
if updated > two_hours_ago:
ical = redis_entry[b'ical'].decode('utf-8')
return Response(ical, mimetype='text/calendar')
userdata = s.loads(token)
lent_list = current_app.facilities[facility]['lent_list'](
userdata['username'], userdata['password'])
data = schemes.LentListResponse().dump(lent_list)
ical = build_ical(data.data)
# store new ical in redis
g.redis.hmset(token, dict(ical=ical, updated=arrow.utcnow()))
return Response(ical, mimetype='text/calendar')
| mit | -5,869,231,872,556,535,000 | 28.014706 | 79 | 0.589965 | false |
navtejsingh/pychimera | chimera/centroid.py | 1 | 2287 | from __future__ import division
import numpy as np
from photutils.morphology import centroid_com, centroid_1dg, centroid_2dg
def recenter(image, pos, window_size = 15, method = "2dg"):
"""
Recenter each star in each frame of the image cube before performing
aperture photometry to take care of slight misalignments between frames
because of atmospheric turbulence and tracking/pointing errors.
Parameters
----------
image : numpy array
2D image
pos : list
List of (x,y) tuples for star positions
window_size : int
Window size in which to fit the gaussian to the star to calculate
new center
method : string
Method used to find center of the star. Options are 1d Gaussian fit,
2d gaussian fit or com (center of mass)
Returns
-------
xcen, ycen : float
Source x and y centers
"""
pos = np.asarray(pos)
ny, nx = image.shape
window_size = int(window_size)
nstars = pos.shape[0]
star_pos = np.zeros([nstars,2], dtype = np.float32)
for i in range(nstars):
x, y = pos[i][0], pos[i][1]
xmin, xmax = int(x) - int(window_size/2), int(x) + int(window_size/2) + 1
ymin, ymax = int(y) - int(window_size/2), int(y) + int(window_size/2) + 1
if xmin < 0:
xmin = 0
if ymin < 0:
ymin = 0
if xmax > nx:
xmax = nx
if ymax > ny:
ymax = ny
if method == "1dg":
xcen, ycen = centroid_1dg(image[ymin:ymax,xmin:xmax])
elif method == "2dg":
xcen, ycen = centroid_2dg(image[ymin:ymax,xmin:xmax])
elif method == "com":
xcen, ycen = centroid_com(image[ymin:ymax,xmin:xmax])
if (np.abs(xmin + xcen - x)) > 3. or (np.abs(ymin + ycen - y)) > 3.:
star_pos[i,0] = x
star_pos[i,1] = y
else:
star_pos[i,0] = xmin + xcen
star_pos[i,1] = ymin + ycen
return star_pos | bsd-3-clause | 6,130,274,085,771,288,000 | 29.918919 | 94 | 0.493223 | false |
beetbox/beets | beetsplug/web/__init__.py | 1 | 15947 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A Web interface to beets."""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets import ui
from beets import util
import beets.library
import flask
from flask import g, jsonify
from werkzeug.routing import BaseConverter, PathConverter
import os
from unidecode import unidecode
import json
import base64
# Utilities.
def _rep(obj, expand=False):
"""Get a flat -- i.e., JSON-ish -- representation of a beets Item or
Album object. For Albums, `expand` dictates whether tracks are
included.
"""
out = dict(obj)
if isinstance(obj, beets.library.Item):
if app.config.get('INCLUDE_PATHS', False):
out['path'] = util.displayable_path(out['path'])
else:
del out['path']
# Filter all bytes attributes and convert them to strings.
for key, value in out.items():
if isinstance(out[key], bytes):
out[key] = base64.b64encode(value).decode('ascii')
# Get the size (in bytes) of the backing file. This is useful
# for the Tomahawk resolver API.
try:
out['size'] = os.path.getsize(util.syspath(obj.path))
except OSError:
out['size'] = 0
return out
elif isinstance(obj, beets.library.Album):
if app.config.get('INCLUDE_PATHS', False):
out['artpath'] = util.displayable_path(out['artpath'])
else:
del out['artpath']
if expand:
out['items'] = [_rep(item) for item in obj.items()]
return out
def json_generator(items, root, expand=False):
"""Generator that dumps list of beets Items or Albums as JSON
:param root: root key for JSON
:param items: list of :class:`Item` or :class:`Album` to dump
:param expand: If true every :class:`Album` contains its items in the json
representation
:returns: generator that yields strings
"""
yield '{"%s":[' % root
first = True
for item in items:
if first:
first = False
else:
yield ','
yield json.dumps(_rep(item, expand=expand))
yield ']}'
def is_expand():
"""Returns whether the current request is for an expanded response."""
return flask.request.args.get('expand') is not None
def is_delete():
"""Returns whether the current delete request should remove the selected
files.
"""
return flask.request.args.get('delete') is not None
def get_method():
"""Returns the HTTP method of the current request."""
return flask.request.method
def resource(name, patchable=False):
"""Decorates a function to handle RESTful HTTP requests for a resource.
"""
def make_responder(retriever):
def responder(ids):
entities = [retriever(id) for id in ids]
entities = [entity for entity in entities if entity]
if get_method() == "DELETE":
if app.config.get('READONLY', True):
return flask.abort(405)
for entity in entities:
entity.remove(delete=is_delete())
return flask.make_response(jsonify({'deleted': True}), 200)
elif get_method() == "PATCH" and patchable:
if app.config.get('READONLY', True):
return flask.abort(405)
for entity in entities:
entity.update(flask.request.get_json())
entity.try_sync(True, False) # write, don't move
if len(entities) == 1:
return flask.jsonify(_rep(entities[0], expand=is_expand()))
elif entities:
return app.response_class(
json_generator(entities, root=name),
mimetype='application/json'
)
elif get_method() == "GET":
if len(entities) == 1:
return flask.jsonify(_rep(entities[0], expand=is_expand()))
elif entities:
return app.response_class(
json_generator(entities, root=name),
mimetype='application/json'
)
else:
return flask.abort(404)
else:
return flask.abort(405)
responder.__name__ = 'get_{0}'.format(name)
return responder
return make_responder
def resource_query(name, patchable=False):
"""Decorates a function to handle RESTful HTTP queries for resources.
"""
def make_responder(query_func):
def responder(queries):
entities = query_func(queries)
if get_method() == "DELETE":
if app.config.get('READONLY', True):
return flask.abort(405)
for entity in entities:
entity.remove(delete=is_delete())
return flask.make_response(jsonify({'deleted': True}), 200)
elif get_method() == "PATCH" and patchable:
if app.config.get('READONLY', True):
return flask.abort(405)
for entity in entities:
entity.update(flask.request.get_json())
entity.try_sync(True, False) # write, don't move
return app.response_class(
json_generator(entities, root=name),
mimetype='application/json'
)
elif get_method() == "GET":
return app.response_class(
json_generator(
entities,
root='results', expand=is_expand()
),
mimetype='application/json'
)
else:
return flask.abort(405)
responder.__name__ = 'query_{0}'.format(name)
return responder
return make_responder
def resource_list(name):
"""Decorates a function to handle RESTful HTTP request for a list of
resources.
"""
def make_responder(list_all):
def responder():
return app.response_class(
json_generator(list_all(), root=name, expand=is_expand()),
mimetype='application/json'
)
responder.__name__ = 'all_{0}'.format(name)
return responder
return make_responder
def _get_unique_table_field_values(model, field, sort_field):
""" retrieve all unique values belonging to a key from a model """
if field not in model.all_keys() or sort_field not in model.all_keys():
raise KeyError
with g.lib.transaction() as tx:
rows = tx.query('SELECT DISTINCT "{0}" FROM "{1}" ORDER BY "{2}"'
.format(field, model._table, sort_field))
return [row[0] for row in rows]
class IdListConverter(BaseConverter):
"""Converts comma separated lists of ids in urls to integer lists.
"""
def to_python(self, value):
ids = []
for id in value.split(','):
try:
ids.append(int(id))
except ValueError:
pass
return ids
def to_url(self, value):
return ','.join(str(v) for v in value)
class QueryConverter(PathConverter):
"""Converts slash separated lists of queries in the url to string list.
"""
def to_python(self, value):
queries = value.split('/')
"""Do not do path substitution on regex value tests"""
return [query if '::' in query else query.replace('\\', os.sep)
for query in queries]
def to_url(self, value):
return ','.join([v.replace(os.sep, '\\') for v in value])
class EverythingConverter(PathConverter):
regex = '.*?'
# Flask setup.
app = flask.Flask(__name__)
app.url_map.converters['idlist'] = IdListConverter
app.url_map.converters['query'] = QueryConverter
app.url_map.converters['everything'] = EverythingConverter
@app.before_request
def before_request():
g.lib = app.config['lib']
# Items.
@app.route('/item/<idlist:ids>', methods=["GET", "DELETE", "PATCH"])
@resource('items', patchable=True)
def get_item(id):
return g.lib.get_item(id)
@app.route('/item/')
@app.route('/item/query/')
@resource_list('items')
def all_items():
return g.lib.items()
@app.route('/item/<int:item_id>/file')
def item_file(item_id):
item = g.lib.get_item(item_id)
# On Windows under Python 2, Flask wants a Unicode path. On Python 3, it
# *always* wants a Unicode path.
if os.name == 'nt':
item_path = util.syspath(item.path)
else:
item_path = util.py3_path(item.path)
try:
unicode_item_path = util.text_string(item.path)
except (UnicodeDecodeError, UnicodeEncodeError):
unicode_item_path = util.displayable_path(item.path)
base_filename = os.path.basename(unicode_item_path)
try:
# Imitate http.server behaviour
base_filename.encode("latin-1", "strict")
except UnicodeEncodeError:
safe_filename = unidecode(base_filename)
else:
safe_filename = base_filename
response = flask.send_file(
item_path,
as_attachment=True,
attachment_filename=safe_filename
)
response.headers['Content-Length'] = os.path.getsize(item_path)
return response
@app.route('/item/query/<query:queries>', methods=["GET", "DELETE", "PATCH"])
@resource_query('items', patchable=True)
def item_query(queries):
return g.lib.items(queries)
@app.route('/item/path/<everything:path>')
def item_at_path(path):
query = beets.library.PathQuery('path', path.encode('utf-8'))
item = g.lib.items(query).get()
if item:
return flask.jsonify(_rep(item))
else:
return flask.abort(404)
@app.route('/item/values/<string:key>')
def item_unique_field_values(key):
sort_key = flask.request.args.get('sort_key', key)
try:
values = _get_unique_table_field_values(beets.library.Item, key,
sort_key)
except KeyError:
return flask.abort(404)
return flask.jsonify(values=values)
# Albums.
@app.route('/album/<idlist:ids>', methods=["GET", "DELETE"])
@resource('albums')
def get_album(id):
return g.lib.get_album(id)
@app.route('/album/')
@app.route('/album/query/')
@resource_list('albums')
def all_albums():
return g.lib.albums()
@app.route('/album/query/<query:queries>', methods=["GET", "DELETE"])
@resource_query('albums')
def album_query(queries):
return g.lib.albums(queries)
@app.route('/album/<int:album_id>/art')
def album_art(album_id):
album = g.lib.get_album(album_id)
if album and album.artpath:
return flask.send_file(album.artpath.decode())
else:
return flask.abort(404)
@app.route('/album/values/<string:key>')
def album_unique_field_values(key):
sort_key = flask.request.args.get('sort_key', key)
try:
values = _get_unique_table_field_values(beets.library.Album, key,
sort_key)
except KeyError:
return flask.abort(404)
return flask.jsonify(values=values)
# Artists.
@app.route('/artist/')
def all_artists():
with g.lib.transaction() as tx:
rows = tx.query("SELECT DISTINCT albumartist FROM albums")
all_artists = [row[0] for row in rows]
return flask.jsonify(artist_names=all_artists)
# Library information.
@app.route('/stats')
def stats():
with g.lib.transaction() as tx:
item_rows = tx.query("SELECT COUNT(*) FROM items")
album_rows = tx.query("SELECT COUNT(*) FROM albums")
return flask.jsonify({
'items': item_rows[0][0],
'albums': album_rows[0][0],
})
# UI.
@app.route('/')
def home():
return flask.render_template('index.html')
# Plugin hook.
class WebPlugin(BeetsPlugin):
def __init__(self):
super(WebPlugin, self).__init__()
self.config.add({
'host': u'127.0.0.1',
'port': 8337,
'cors': '',
'cors_supports_credentials': False,
'reverse_proxy': False,
'include_paths': False,
'readonly': True,
})
def commands(self):
cmd = ui.Subcommand('web', help=u'start a Web interface')
cmd.parser.add_option(u'-d', u'--debug', action='store_true',
default=False, help=u'debug mode')
def func(lib, opts, args):
args = ui.decargs(args)
if args:
self.config['host'] = args.pop(0)
if args:
self.config['port'] = int(args.pop(0))
app.config['lib'] = lib
# Normalizes json output
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
app.config['INCLUDE_PATHS'] = self.config['include_paths']
app.config['READONLY'] = self.config['readonly']
# Enable CORS if required.
if self.config['cors']:
self._log.info(u'Enabling CORS with origin: {0}',
self.config['cors'])
from flask_cors import CORS
app.config['CORS_ALLOW_HEADERS'] = "Content-Type"
app.config['CORS_RESOURCES'] = {
r"/*": {"origins": self.config['cors'].get(str)}
}
CORS(
app,
supports_credentials=self.config[
'cors_supports_credentials'
].get(bool)
)
# Allow serving behind a reverse proxy
if self.config['reverse_proxy']:
app.wsgi_app = ReverseProxied(app.wsgi_app)
# Start the web application.
app.run(host=self.config['host'].as_str(),
port=self.config['port'].get(int),
debug=opts.debug, threaded=True)
cmd.func = func
return [cmd]
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
From: http://flask.pocoo.org/snippets/35/
:param app: the WSGI application
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
| mit | 7,547,048,632,634,396,000 | 29.202652 | 79 | 0.577475 | false |
googleapis/python-debugger-client | docs/conf.py | 1 | 12472 | # -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-debugger-client documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "google-cloud-debugger-client"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-debugger-client",
"github_user": "googleapis",
"github_repo": "python-debugger-client",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-debugger-client-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-debugger-client.tex",
"google-cloud-debugger-client Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-debugger-client",
"google-cloud-debugger-client Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-debugger-client",
"google-cloud-debugger-client Documentation",
author,
"google-cloud-debugger-client",
"google-cloud-debugger-client Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| apache-2.0 | -833,755,084,706,354,800 | 31.821053 | 88 | 0.694917 | false |
lewisodriscoll/sasview | src/sas/sasgui/guiframe/local_perspectives/plotting/graphAppearance.py | 3 | 10122 | #!/usr/bin/python
"""
Dialog for general graph appearance
This software was developed by Institut Laue-Langevin as part of
Distributed Data Analysis of Neutron Scattering Experiments (DANSE).
Copyright 2012 Institut Laue-Langevin
"""
import wx
from sas.sasgui.plottools.SimpleFont import SimpleFont
COLOR = ['black', 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow']
class graphAppearance(wx.Frame):
def __init__(self, parent, title, legend=True):
super(graphAppearance, self).__init__(parent, title=title, size=(520, 435))
self.legend = legend
self.InitUI()
self.Centre()
self.Show()
self.xfont = None
self.yfont = None
self.is_xtick = False
self.is_ytick = False
def InitUI(self):
panel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
xhbox1 = wx.BoxSizer(wx.HORIZONTAL)
xhbox2 = wx.BoxSizer(wx.HORIZONTAL)
yhbox1 = wx.BoxSizer(wx.HORIZONTAL)
yhbox2 = wx.BoxSizer(wx.HORIZONTAL)
if self.legend:
legendLocText = wx.StaticText(panel, label='Legend location: ')
self.legend_loc_combo = wx.ComboBox(panel, style=wx.CB_READONLY, size=(180, -1))
self.fillLegendLocs()
else:
self.legend_loc_combo = None
if self.legend:
self.toggle_legend = wx.CheckBox(panel, label='Toggle legend on/off')
else:
self.toggle_legend = None
self.toggle_grid = wx.CheckBox(panel, label='Toggle grid on/off')
xstatic_box = wx.StaticBox(panel, -1, 'x-axis label')
xstatic_box_sizer = wx.StaticBoxSizer(xstatic_box, wx.VERTICAL)
ystatic_box = wx.StaticBox(panel, -1, 'y-axis label')
ystatic_box_sizer = wx.StaticBoxSizer(ystatic_box, wx.VERTICAL)
xaxis_label = wx.StaticText(panel, label='X-axis: ')
yaxis_label = wx.StaticText(panel, label='Y-axis: ')
unitlabel_1 = wx.StaticText(panel, label='Units: ')
unitlabel_2 = wx.StaticText(panel, label='Units: ')
self.xaxis_text = wx.TextCtrl(panel, -1, "", size=(220, -1))
self.yaxis_text = wx.TextCtrl(panel, -1, "", size=(220, -1))
self.xaxis_unit_text = wx.TextCtrl(panel, -1, "", size=(100, -1))
self.yaxis_unit_text = wx.TextCtrl(panel, -1, "", size=(100, -1))
xcolorLabel = wx.StaticText(panel, label='Font color: ')
self.xfont_color = wx.ComboBox(panel, size=(100, -1), style=wx.CB_READONLY)
self.xfill_colors()
self.xfont_color.SetSelection(0)
xfont_button = wx.Button(panel, label='Font')
xfont_button.Bind(wx.EVT_BUTTON, self.on_x_font)
ycolorLabel = wx.StaticText(panel, label='Font color: ')
self.yfont_color = wx.ComboBox(panel, size=(100, -1), style=wx.CB_READONLY)
self.yfill_colors()
self.yfont_color.SetSelection(0)
yfont_button = wx.Button(panel, label='Font')
yfont_button.Bind(wx.EVT_BUTTON, self.on_y_font)
self.cancel_button = wx.Button(panel, label='Cancel')
self.ok_button = wx.Button(panel, label='OK')
self.cancel_button.Bind(wx.EVT_BUTTON, self.on_cancel)
self.ok_button.Bind(wx.EVT_BUTTON, self.on_ok)
xhbox1.Add(xaxis_label, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=10)
xhbox1.Add(self.xaxis_text, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=10)
xhbox1.Add(unitlabel_1, flag=wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, border=10)
xhbox1.Add(self.xaxis_unit_text, flag=wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, border=10)
yhbox1.Add(yaxis_label, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=10)
yhbox1.Add(self.yaxis_text, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=10)
yhbox1.Add(unitlabel_2, flag=wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, border=10)
yhbox1.Add(self.yaxis_unit_text, flag=wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, border=10)
xhbox2.Add(xcolorLabel, flag=wx.ALL | wx.ALIGN_RIGHT, border=10)
xhbox2.Add(self.xfont_color, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
xhbox2.Add(xfont_button, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
yhbox2.Add(ycolorLabel, flag=wx.ALL | wx.ALIGN_RIGHT, border=10)
yhbox2.Add(self.yfont_color, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
yhbox2.Add(yfont_button, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
if self.legend:
hbox1.Add(legendLocText, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=5)
hbox1.Add(self.legend_loc_combo, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=5)
if self.legend:
hbox1.Add((5, -1))
hbox1.Add(self.toggle_legend, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=5)
hbox2.Add(self.ok_button, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
hbox2.Add(self.cancel_button, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
hbox2.Add((15, -1))
xstatic_box_sizer.Add(xhbox1, flag=wx.EXPAND, border=5)
xstatic_box_sizer.Add(xhbox2, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
ystatic_box_sizer.Add(yhbox1, flag=wx.EXPAND, border=5)
ystatic_box_sizer.Add(yhbox2, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
vbox.Add((-1, 20))
vbox.Add(hbox1, flag=wx.EXPAND | wx.ALL, border=5)
vbox.Add(xstatic_box_sizer, flag=wx.ALL | wx.EXPAND, border=10)
vbox.Add(ystatic_box_sizer, flag=wx.ALL | wx.EXPAND, border=10)
vbox.Add(self.toggle_grid, flag=wx.ALIGN_RIGHT | wx.RIGHT, border=20)
vbox.Add(hbox2, flag=wx.ALIGN_RIGHT | wx.ALL, border=5)
panel.SetSizer(vbox)
def xfill_colors(self):
c_list = COLOR
for idx in range(len(c_list)):
self.xfont_color.Append(c_list[idx], idx)
def yfill_colors(self):
c_list = COLOR
for idx in range(len(c_list)):
self.yfont_color.Append(c_list[idx], idx)
def on_x_font(self, e):
title = 'Modify x axis font'
fonty = SimpleFont(self, wx.NewId(), title)
fonty.set_default_font(self.xfont)
fonty.set_ticklabel_check(self.is_xtick)
if fonty.ShowModal() == wx.ID_OK:
self.xfont = fonty.get_font()
self.is_xtick = fonty.get_ticklabel_check()
def on_y_font(self, e):
title = 'Modify y axis font'
fonty = SimpleFont(self, wx.NewId(), title)
fonty.set_default_font(self.yfont)
fonty.set_ticklabel_check(self.is_ytick)
if fonty.ShowModal() == wx.ID_OK:
self.yfont = fonty.get_font()
self.is_ytick = fonty.get_ticklabel_check()
def on_ok(self, e):
self.Close()
def on_cancel(self, e):
self.Destroy()
def get_loc_label(self):
"""
Associates label to a specific legend location
"""
_labels = {}
i = 0
_labels['best'] = i
i += 1
_labels['upper right'] = i
i += 1
_labels['upper left'] = i
i += 1
_labels['lower left'] = i
i += 1
_labels['lower right'] = i
i += 1
_labels['right'] = i
i += 1
_labels['center left'] = i
i += 1
_labels['center right'] = i
i += 1
_labels['lower center'] = i
i += 1
_labels['upper center'] = i
i += 1
_labels['center'] = i
return _labels
def fillLegendLocs(self):
# labels = []
# for label in self.get_loc_label():
# labels.append(str(label))
# for label in reversed(labels):
# self.legend_loc_combo.Append(label)
for label in self.get_loc_label():
self.legend_loc_combo.Append(label)
def setDefaults(self, grid, legend, xlab, ylab, xunit, yunit,
xaxis_font, yaxis_font, legend_loc,
xcolor, ycolor, is_xtick, is_ytick):
self.toggle_grid.SetValue(grid)
if self.legend:
self.toggle_legend.SetValue(legend)
self.xaxis_text.SetValue(xlab)
self.yaxis_text.SetValue(ylab)
self.xaxis_unit_text.SetValue(xunit)
self.yaxis_unit_text.SetValue(yunit)
self.xfont = xaxis_font
self.yfont = yaxis_font
self.is_xtick = is_xtick
self.is_ytick = is_ytick
if not xcolor:
self.xfont_color.SetSelection(0)
else:
self.xfont_color.SetStringSelection(xcolor)
if not ycolor:
self.yfont_color.SetSelection(0)
else:
self.yfont_color.SetStringSelection(ycolor)
if self.legend:
self.legend_loc_combo.SetStringSelection(legend_loc)
# get whether grid is toggled on/off
def get_togglegrid(self):
return self.toggle_grid.GetValue()
# get whether legend is toggled on/off
def get_togglelegend(self):
return self.toggle_legend.GetValue()
# get x label
def get_xlab(self):
return self.xaxis_text.GetValue()
# get y label
def get_ylab(self):
return self.yaxis_text.GetValue()
# get x unit
def get_xunit(self):
return self.xaxis_unit_text.GetValue()
# get y unit
def get_yunit(self):
return self.yaxis_unit_text.GetValue()
# get legend location
def get_legend_loc(self):
return self.get_loc_label()[self.legend_loc_combo.GetStringSelection()]
# get x axis label color
def get_xcolor(self):
return self.xfont_color.GetValue()
# get y axis label color
def get_ycolor(self):
return self.yfont_color.GetValue()
# get x axis font (type is FontProperties)
def get_xfont(self):
return self.xfont
# get y axis font
def get_yfont(self):
return self.yfont
def get_xtick_check(self):
return self.is_xtick
def get_ytick_check(self):
return self.is_ytick
if __name__ == '__main__':
app = wx.App()
graphD = graphAppearance(None, title='Modify graph appearance')
app.MainLoop()
| bsd-3-clause | 4,084,120,535,552,454,700 | 31.757282 | 95 | 0.598202 | false |
daviewales/pimotion | pimotion/backend.py | 1 | 4082 | #!/usr/bin/env python3
import picamera
import numpy
import io
import time
def get_png_image(resolution=(640, 480)):
width, height = resolution
image_stream = io.BytesIO()
with picamera.PiCamera() as camera:
camera.resolution = resolution
camera.start_preview()
camera.capture(image_stream, format='png')
image_stream.seek(0)
return image_stream.read()
def get_image(resolution=(640, 480)):
'''
Yield an image of specified resolution to a byte stream.
'''
width, height = resolution
pixels = width * height
image_stream = io.BytesIO()
with picamera.PiCamera() as camera:
camera.resolution = resolution
camera.start_preview()
time.sleep(2) # Let the camera 'warm up'.
while True:
camera.capture(image_stream, format='yuv', use_video_port=True)
image_stream.seek(0)
image_bytes = image_stream.read(pixels)
image = numpy.fromstring(image_bytes, count=pixels,
dtype=numpy.int8)
image = image.reshape((height, width))[:height, :width]
yield image
image_stream.seek(0)
camera.stop_preview()
def difference_image(image1, image2, threshold):
height, width = image1.shape
return abs(image1 - image2).astype(numpy.uint8) > threshold
def motion_coordinates(difference_image, tile_width, tile_height, tile_motion):
"""
Get the coordinates of motion from a difference_image.
Split the image into tiles with dimensions
tile_width * tile_height.
Return the coordinates of the centre of each tile where the sum of
motion pixels within the tile is >= tile_motion * tile_area.
"""
height, width = difference_image.shape
tile_area = tile_height * tile_width
# tile_motion * tile_area gives the total number of
# changed pixels within a given tile required for
# motion to be registered.
changed_pixel_threshold = tile_motion * tile_area
centre_offset_x, centre_offset_y = tile_width//2, tile_height//2
coordinates = [
[x + centre_offset_x, y + centre_offset_y]
for x in range(0, width, tile_width)
for y in range(0, height, tile_height)
if difference_image[y:y+tile_height, x:x+tile_width].sum()
>= changed_pixel_threshold]
return coordinates
def get_motion_data(resolution=(640, 480), threshold=16,
tile_dimensions=(20, 20), tile_motion=0.8):
'''
Return list of lists of coordinates of motion.
resolution is a tuple containing the dimensions of the image:
resolution = (width, height).
threshold is a number specifying the minimum change in pixel intensity
required for motion to be registered.
tile_dimensions is a tuple containing the dimensions of the tiles
which the image will be divided into to check for motion.
tile_dimensions = (width, height).
tile_motion is the fraction of a given tile which must contain motion
for motion to be registered. For instance, if we are using 20x20 tiles,
then the total number of pixels contained in a given tile is 400 pixels.
If tile_motion == 1, then a tile will not be registerred as containing
motion
if < 400 pixels within the tile contain motion.
However, if tile_motion == 0.5, then only half the tile must contain motion
in order for the tile to be registered as motion.
'''
width, height = resolution
tile_width, tile_height = tile_dimensions
threshold = threshold * numpy.ones((height, width), dtype=numpy.uint8)
image_generator = get_image(resolution=resolution)
image1 = next(image_generator)
while True:
image2 = next(image_generator)
difference = difference_image(image1, image2, threshold)
motion = motion_coordinates(difference, tile_width,
tile_height, tile_motion)
yield motion
image1 = image2
if __name__ == '__main__':
print("You aren't supposed to run this directly!")
| bsd-2-clause | -5,974,357,752,693,007,000 | 32.735537 | 79 | 0.656786 | false |
yukisakurai/hhntup | higgstautau/datasets.py | 1 | 53181 | """
This module generates a database of all MC and data datasets
"""
from rootpy.io import root_open, DoesNotExist
#from multiprocessing import Pool, cpu_count
import sys
from operator import itemgetter
import logging
import re
import glob
import os
import cPickle as pickle
import atexit
import fnmatch
from collections import namedtuple
import yaml
from . import log; log = log[__name__]
from .decorators import cached_property
from .yaml_utils import Serializable
from . import xsec
USE_PYAMI = True
try:
from pyAMI.client import AMIClient
from pyAMI.query import get_dataset_xsec_effic, \
get_dataset_info, \
get_datasets, \
get_provenance, \
get_periods, \
get_runs
from pyAMI import query
from pyAMI.auth import AMI_CONFIG, create_auth_config
except ImportError:
USE_PYAMI = False
log.warning("pyAMI is not installed. "
"Cross section retrieval will be disabled.")
# data types
DATA, MC, EMBED, MCEMBED = range(4)
TYPES = {
'DATA': DATA,
'MC': MC,
'EMBED': EMBED,
'MCEMBED': MCEMBED,
}
Namedset = namedtuple('Namedset',
'name tags meta properties')
Dataset = namedtuple('Dataset',
Namedset._fields + ('datatype',))
class Fileset(namedtuple('Fileset', Dataset._fields + ('files', 'treename'))):
def split(self, partitions):
files = self.files[:]
fileset_files = [[] for _ in xrange(partitions)]
while len(files) > 0:
for fileset in fileset_files:
if len(files) > 0:
fileset.append(files.pop(0))
else:
break
mydict = self._asdict()
filesets = []
for fileset in fileset_files:
mydict['files'] = fileset
filesets.append(Fileset(**mydict))
return filesets
class Treeset(namedtuple('Treeset', Dataset._fields + ('trees',))):
def GetEntries(self, *args, **kwargs):
return sum([tree.GetEntries(*args, **kwargs) for tree in self.trees])
def Scale(self, value):
for tree in self.trees:
tree.Scale(value)
def __iter__(self):
for tree in self.trees:
yield tree
def Draw(self, *args, **kwargs):
for tree in self.trees:
tree.Draw(*args, **kwargs)
ATLASFileset = namedtuple('ATLASFileset', Fileset._fields + ('year', 'grl',))
DS_PATTERN = re.compile(
'^(?P<prefix>\S+\.)?'
'(?P<type>(data|mc))(?P<year>\d+)_(?P<energy>\d+)TeV'
'\.(?P<id>(\d+|period[A-Z]))'
'\.(?P<name>\w+)'
'(\.PhysCont)?'
'(\.(?P<ntup>merge\.NTUP_TAU(MEDIUM)?))?'
'\.(?P<tag>\w+)'
'(\.small)?'
'(\.v(?P<version>\d+))?(_s)?'
'\.(?P<suffix>\S+)$')
MC_TAG_PATTERN1 = re.compile(
'^e(?P<evnt>\d+)_'
's(?P<digi>\d+)_'
's(?P<digimerge>\d+)_'
'r(?P<reco>\d+)_'
'r(?P<recomerge>\d+)_'
'p(?P<ntup>\d+)$')
# not all valid samples have a recomerge tag:
MC_TAG_PATTERN2 = re.compile(
'^e(?P<evnt>\d+)_'
'[sa](?P<digi>\d+)_'
'[sa](?P<digimerge>\d+)_'
'r(?P<reco>\d+)_'
'p(?P<ntup>\d+)$')
# Embedded sample pattern
EMBED_PATTERN11 = re.compile(
'^(?P<prefix>\S+)?'
'period(?P<period>[A-Z])'
'\.DESD_SGLMU'
'\.pro(?P<prod>\d+)'
'\.embedding-(?P<embedtag>\S+)?'
'\.Ztautau_'
'(?P<channel>(lh)|(hh))_'
'(?P<isol>[a-z]+)_'
'(?P<mfs>[a-z]+)_'
'rereco_'
'p(?P<tag>\d+)_'
'EXT0'
'(\.(?P<suffix>\S+))?$')
EMBED_PATTERN12 = re.compile(
'^(?P<prefix>\S+)?'
'period(?P<period>[A-Z])'
'\.DESD_ZMUMU'
'\.pro(?P<prod>\d+)'
'\.embedding-(?P<embedtag>\S+)?'
'\.Ztautau_'
'(?P<channel>(lh)|(hh))_'
'(((high)|(low))pt_)?'
'(?P<mfs>[a-z]+)_'
'filter_'
'taureco_'
'p(?P<tag>\d+)_'
'EXT0'
'(\.(?P<suffix>\S+))?$')
EMBED_PATTERN12_NEW = re.compile(
'^(?P<prefix>\S+)?'
'data12_8TeV\.'
'period(?P<period>[A-Z])\.'
'physics_Muons\.PhysCont\.'
'NTUP_EMB(?P<channel>(LH)|(HH))'
'(?P<sys>(DN)|(IM)|(UP))\.'
'(?P<suffix>\S+)')
MC_EMBED_PATTERN = re.compile(
'^(?P<prefix>\S+)?'
'Pyth8.DESD_SGLMU.pro14.embedding-01-01-10.'
'Ztautau_MCEmbedding[\d]*_hh(?P<sys>(dn)|(up))?_p1344_EXT0'
'(\.(?P<suffix>\S+))?$')
## Common lephad ntuple pattern
CN_MC_PATTERN12 = re.compile(
'^(?P<prefix>\S+\.)?'
'(?P<id>\d+)'
'\.(?P<name>\w+)'
'\.(?P<tag>\w+)'
'_lhCN'
'(v(?P<version1>\d+))?'
'(-(?P<version2>\d+))?'
'(-(?P<version3>\d+))?'
'\.(?P<suffix>\S+)$')
CN_DATA_PATTERN12 = re.compile(
'^(?P<prefix>\S+\.)?'
'data12_8TeV\.'
'(?P<id>\S+)'
'\.(?P<name>\w+)'
'((\.TunaCont.2013-March-29.v03)?)'
'\.(?P<tag>\w+)'
'_lhCN'
'(v(?P<version1>\d+))?'
'(-(?P<version2>\d+))?'
'\.(?P<suffix>\S+)$')
CN_EMBED_PATTERN12 = re.compile(
'^(?P<prefix>\S+\.)?'
'data12_8TeV\.'
'(?P<id>\S+)'
'\.(?P<name>\w+)'
'\.PhysCont'
'((\.NTUP_EMB)?)'
'(?P<channel>(LH)|(HH))'
'(?P<mfs>(IM)|(UP)|(DN))'
'\.grp14_v02'
'\_(?P<tag>\w+)'
'_lhCN'
'(v(?P<version1>\d+))?'
'(-(?P<version2>\d+))?'
'(-(?P<version3>\d+))?'
'\.(?P<suffix>\S+)$')
# MC[11|12][a|b|c|...] categories are defined here
# Each MC dataset is automatically classified
# acccording to these categories by matching the reco
# and merge tags of the dataset name.
# Order by decreasing preference:
MC_CATEGORIES = {
'mc11a': {'reco': (2730, 2731),
'merge': (2780, 2700)},
'mc11b': {'reco': (2920, 2923),
'merge': (3063, 2993, 2900)},
'mc11c': {'reco': (3043, 3060, 3108),
'merge': (3109, 3063, 2993)},
'mc12a': {'reco': (3753, 3752, 3658, 3605, 3553, 3542, 3549),
'merge': (3549,)},
'mc12b': {'reco': (4485, 5470,),
'merge': (4540,)}}
HERE = os.path.dirname(os.path.abspath(__file__))
# Any datasets which don't have the provenance stored properly in AMI
# should be hardcoded here (it happens)
DS_NOPROV = {}
# Cross-sections are cached so that we don't need to keep asking AMI
# for them over and over
XSEC_CACHE_FILE = os.path.join(HERE, 'xsec', 'cache.pickle')
XSEC_CACHE_MODIFIED = False
XSEC_CACHE = {}
if USE_PYAMI:
amiclient = AMIClient()
if not os.path.exists(AMI_CONFIG):
create_auth_config()
amiclient.read_config(AMI_CONFIG)
class NoMatchingDatasetsFound(Exception):
pass
GLOBAL_BASE = '/global/'
def find_global(path):
if not path.startswith('/global/'):
raise ValueError("path must be absolute and rooted at /global")
path = re.sub('^/global/', '/cluster/data%02d/export/', path)
for node in range(1, 13):
if os.path.exists(path % node):
return path % node
raise IOError('path %s does not exist' % path)
class Database(dict):
@classmethod
def match_to_ds(cls, match):
"""
Construct the original NTUP dataset name from a skim match object
"""
if match.group('year') == '11':
ntup = 'merge.NTUP_TAUMEDIUM'
else:
ntup = 'merge.NTUP_TAU'
return '%s%s_%sTeV.%s.%s.%s.%s' % (
match.group('type'),
match.group('year'),
match.group('energy'),
match.group('id'),
match.group('name'),
ntup,
match.group('tag'))
def __init__(self, name='datasets', verbose=False, stream=None):
super(Database, self).__init__()
self.name = name
self.verbose = verbose
self.filepath = os.path.join(HERE, '%s.yml' % self.name)
if os.path.isfile(self.filepath):
with open(self.filepath) as db:
log.info("Loading database '%s' ..." % self.name)
d = yaml.load(db)
if d:
self.update(d)
self.modified = False
if stream is None:
self.stream = sys.stdout
else:
self.stream = stream
def write(self):
if self.modified:
with open(self.filepath, 'w') as db:
log.info("Saving database '%s' ..." % self.name)
yaml.dump(dict(self), db)
def reset(self):
return self.clear()
def clear(self):
# erase all datasets in database
log.info("Resetting database '%s' ..." % self.name)
super(Database, self).clear()
self.modified = True
def validate(self,
pattern=None,
datatype=None,
year=None):
ds = {}
for name, info in self.items():
if year is not None and info.year != year:
continue
if datatype is not None and info.datatype != datatype:
continue
if info.datatype == DATA and info.id < 0:
# only validate data run datasets
continue
if pattern is None or fnmatch.fnmatch(name, pattern):
ds[name] = info
incomplete = []
for name, info in sorted(ds.items(), key=lambda item: item[0]):
log.info("Validating %s ..." % name)
complete = validate_single((name, info), child=False)
log.info("Complete: %s" % complete)
log.info('-' * 50)
if not complete:
incomplete.append(info.ds)
#pool = Pool(processes=cpu_count())
#for result, complete in pool.map(
# validate_single, sorted(ds.items(), key=itemgetter(0))):
# print result
# print "Complete: %s" % complete
# print '-'*50
# if not complete:
# all_complete = False
if not incomplete:
log.info("ALL DATASETS ARE COMPLETE")
else:
log.warning("SOME DATASETS ARE NOT COMPLETE:")
for ds in incomplete:
print ds
def scan(self, year,
mc_path=None,
mc_prefix=None,
mc_pattern=None,
mc_treename=None,
mc_sampletype=None,
data_path=None,
data_prefix=None,
data_pattern=None,
data_treename=None,
data_sampletype=None,
data_grl=None,
data_period_containers=False,
embed_path=None,
embed_prefix=None,
embed_pattern=None,
embed_treename=None,
embed_sampletype=None,
versioned=False,
deep=False):
"""
Update the dataset database
"""
log.info("Updating database '%s' ..." % self.name)
self.modified = True
###############################
# MC
###############################
if mc_path is not None:
if deep:
mc_dirs = get_all_dirs_under(mc_path, prefix=mc_prefix)
else:
if mc_prefix:
mc_dirs = glob.glob(os.path.join(mc_path, mc_prefix) + '*')
else:
mc_dirs = glob.glob(os.path.join(mc_path, '*'))
for dir in mc_dirs:
dirname, basename = os.path.split(dir)
if mc_sampletype == 'standard':
match = re.match(DS_PATTERN, basename)
if match:
if int(match.group('year')) != (year % 1E3):
continue
if match.group('type') != 'mc':
continue
ds_name = Database.match_to_ds(match)
name = match.group('name')
tag = match.group('tag')
try:
version = int(match.group('version'))
except IndexError:
version = 0
except:
log.warning(basename)
raise
tag_match = re.match(MC_TAG_PATTERN1, tag)
tag_match2 = re.match(MC_TAG_PATTERN2, tag)
MC_TAG_PATTERN = MC_TAG_PATTERN1
if (tag_match2 and not tag_match) :
tag_match = tag_match2
MC_TAG_PATTERN = MC_TAG_PATTERN2
if not tag_match:
log.warning("not tag-matched: %s" % basename)
continue
cat = None
for cat_name, cat_params in MC_CATEGORIES.items():
if int(tag_match.group('reco')) in cat_params['reco']:
cat = cat_name
break
if cat is None:
log.warning(
"does not match a category: %s" % basename)
continue
name += '.' + cat
dataset = self.get(name, None)
if dataset is not None and version == dataset.version:
if tag != dataset.tag:
this_reco = int(tag_match.group('reco'))
other_reco = int(
re.match(dataset.tag_pattern,
dataset.tag).group('reco'))
use_mergetag = True
try:
this_merge = int(tag_match.group('recomerge'))
other_merge = int(
re.match(dataset.tag_pattern,
dataset.tag).group('recomerge'))
except IndexError:
use_mergetag = False
cat_params = MC_CATEGORIES[cat]
reco_tags = list(cat_params['reco'])
merge_tags = list(cat_params['merge'])
assert(this_reco in reco_tags and other_reco in reco_tags)
take_this = False
if reco_tags.index(this_reco) < reco_tags.index(other_reco):
take_this = True
elif (use_mergetag and this_reco == other_reco and
(merge_tags.index(this_merge) <
merge_tags.index(other_merge))):
take_this = True
if take_this:
log.warning("taking %s over %s" % (
basename, dataset.ds))
self[name] = Dataset(
name=name,
datatype=MC,
treename=mc_treename,
ds=ds_name,
id=int(match.group('id')),
category=cat,
version=version,
tag_pattern=MC_TAG_PATTERN.pattern,
tag=tag,
dirs=[dir],
file_pattern=mc_pattern,
year=year)
elif dir not in dataset.dirs:
dataset.dirs.append(dir)
elif dataset is None or (
dataset is not None and version > dataset.version):
self[name] = Dataset(
name=name,
datatype=MC,
treename=mc_treename,
ds=ds_name,
id=int(match.group('id')),
category=cat,
version=version,
tag_pattern=MC_TAG_PATTERN.pattern,
tag=tag,
dirs=[dir],
file_pattern=mc_pattern,
year=year)
elif self.verbose:
log.warning("not a valid mc dataset name: %s" % basename)
elif mc_sampletype == 'lhCN':
match = re.match(CN_MC_PATTERN12, basename)
if match:
name = match.group('name')
cat = 'mc12a'
tag = match.group('tag')
year = 2012
## Calculate a version int
version_1 = match.group('version1')
version_2 = match.group('version2')
version = int(version_1)*1000 + int(version_2)*10
dataset = self.get(name, None)
if dataset is not None and version == dataset.version:
if dir not in dataset.dirs:
dataset.dirs.append(dir)
else:
log.info('\'%s\',' % name)
self[name] = Dataset(
name=name,
datatype=MC,
treename=mc_treename,
ds=name,
id=int(match.group('id')),
category=cat,
version=version,
tag_pattern=None,
tag=tag,
dirs=[dir],
file_pattern=mc_pattern,
year=year)
#####################################
# EMBEDDING
#####################################
if embed_path is not None:
if deep:
embed_dirs = get_all_dirs_under(embed_path, prefix=embed_prefix)
else:
if embed_prefix:
embed_dirs = glob.glob(
os.path.join(embed_path, embed_prefix) + '*')
else:
embed_dirs = glob.glob(
os.path.join(embed_path, '*'))
if embed_sampletype == 'new':
EMBED_PATTERN = EMBED_PATTERN12_NEW
# determine what channels are available
channels = {}
for dir in embed_dirs:
if os.path.isdir(dir):
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
channel = match.group('channel')
if channel not in channels:
channels[channel] = []
channels[channel].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
elif self.verbose:
log.warning("skipping file: %s" % dir)
for channel, channel_dirs in channels.items():
syst = {}
for dir in channel_dirs:
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
isol = match.group('sys')
if isol not in syst:
syst[isol] = []
syst[isol].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
for syst_type, dirs in syst.items():
name = 'embed%d-%s-%s' % (
year % 1000, channel, syst_type)
self[name] = Dataset(
name,
datatype=EMBED,
treename=embed_treename,
ds=name,
id=1,
grl=data_grl,
dirs=dirs,
file_pattern=embed_pattern,
year=year)
elif embed_sampletype == 'standard':
if year == 2011:
EMBED_PATTERN = EMBED_PATTERN11
else:
EMBED_PATTERN = EMBED_PATTERN12
# determine what channels are available
channels = {}
for dir in embed_dirs:
if os.path.isdir(dir):
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
channel = match.group('channel')
if channel not in channels:
channels[channel] = []
channels[channel].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
elif self.verbose:
log.warning("skipping file: %s" % dir)
for channel, channel_dirs in channels.items():
if year == 2011:
# group dirs by isolation
isols = {}
for dir in channel_dirs:
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
isol = match.group('isol')
if isol not in isols:
isols[isol] = []
isols[isol].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
for isol, isol_dirs in isols.items():
# group dirs by mfs
mfss = {}
for dir in isol_dirs:
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
mfs = match.group('mfs')
if mfs not in mfss:
mfss[mfs] = []
mfss[mfs].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
for mfs, mfs_dirs in mfss.items():
name = 'embed%d-%s-%s-%s' % (
year % 1000, channel, isol, mfs)
self[name] = Dataset(
name,
datatype=EMBED,
treename=embed_treename,
ds=name,
id=1,
grl=data_grl,
dirs=mfs_dirs,
file_pattern=embed_pattern,
year=year)
periods = {}
for dir in mfs_dirs:
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
period = match.group('period')
tag = match.group('tag')
if period not in periods:
periods[period] = {'tag': tag, 'dirs': [dir]}
else:
periods[period]['dirs'].append(dir)
if tag != periods[period]['tag']:
log.warning(
'multiple copies of run with '
'different tags: %s' %
periods[period]['dirs'])
elif self.verbose:
log.warning(
"not a valid embeding dataset name: %s"
% basename)
for period, info in periods.items():
period_name = '%s-%s' % (name, period)
self[period_name] = Dataset(
name=period_name,
datatype=EMBED,
treename=embed_treename,
ds=period_name,
id=1,
grl=data_grl,
dirs=info['dirs'],
file_pattern=embed_pattern,
year=year)
else:
# group dirs by mfs
mfss = {}
for dir in channel_dirs:
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
mfs = match.group('mfs')
if mfs not in mfss:
mfss[mfs] = []
mfss[mfs].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
for mfs, mfs_dirs in mfss.items():
name = 'embed%d-%s-%s' % (
year % 1000, channel, mfs)
self[name] = Dataset(
name,
datatype=EMBED,
treename=embed_treename,
ds=name,
id=1,
grl=data_grl,
dirs=mfs_dirs,
file_pattern=embed_pattern,
year=year)
periods = {}
for dir in mfs_dirs:
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
period = match.group('period')
tag = match.group('tag')
if period not in periods:
periods[period] = {'tag': tag, 'dirs': [dir]}
else:
periods[period]['dirs'].append(dir)
if tag != periods[period]['tag']:
log.warning(
'multiple copies of run with '
'different tags: %s' %
periods[period]['dirs'])
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
for period, info in periods.items():
period_name = '%s-%s' % (name, period)
self[period_name] = Dataset(
name=period_name,
datatype=EMBED,
treename=embed_treename,
ds=period_name,
id=1,
grl=data_grl,
dirs=info['dirs'],
file_pattern=embed_pattern,
year=year)
elif embed_sampletype == 'lhCN':
year = 2012
channels = {}
for dir in embed_dirs:
if os.path.isdir(dir):
dirname, basename = os.path.split(dir)
match = re.match(CN_EMBED_PATTERN12, basename)
if match:
channel = match.group('channel')
if channel not in channels:
channels[channel] = []
channels[channel].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
elif self.verbose:
log.warning("skipping file: %s" % dir)
for channel, channel_dirs in channels.items():
# group dirs by mfs
mfss = {}
for dir in channel_dirs:
dirname, basename = os.path.split(dir)
match = re.match(CN_EMBED_PATTERN12, basename)
if match:
mfs = match.group('mfs')
if mfs not in mfss:
mfss[mfs] = []
mfss[mfs].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
for mfs, mfs_dirs in mfss.items():
name = 'embed%d-%s-%s' % (
year % 1000, channel, mfs)
self[name] = Dataset(
name,
datatype=EMBED,
treename=embed_treename,
ds=name,
id=1,
grl=data_grl,
dirs=mfs_dirs,
file_pattern=embed_pattern,
year=year)
# MC EMBEDDING
variations = {}
for dir in embed_dirs:
dirname, basename = os.path.split(dir)
match = re.match(MC_EMBED_PATTERN, basename)
if not match:
continue
syst = match.group('sys') or ''
variations.setdefault(syst, []).append(dir)
for variation, dirs in variations.items():
name = 'mcembed12-hh%s' % variation
self[name] = Dataset(
name,
datatype=MCEMBED,
treename=embed_treename,
ds=name,
id=1,
dirs=dirs,
file_pattern=embed_pattern,
year=2012)
##############################
# DATA
##############################
if data_path is not None:
if deep:
data_dirs = get_all_dirs_under(data_path, prefix=data_prefix)
else:
if data_prefix:
data_dirs = glob.glob(
os.path.join(data_path, data_prefix) + '*')
else:
data_dirs = glob.glob(
os.path.join(data_path, '*'))
if data_sampletype == 'standard':
# classify dir by stream
streams = {}
for dir in data_dirs:
dirname, basename = os.path.split(dir)
match = re.match(DS_PATTERN, basename)
if match:
# pass embed
if re.match(EMBED_PATTERN12_NEW, basename) or \
re.match(EMBED_PATTERN12, basename) or \
re.match(EMBED_PATTERN11, basename) :
continue
if int(match.group('year')) != (year % 1E3):
continue
if match.group('type') != 'data':
continue
stream = match.group('name').split('_')[-1]
if stream not in streams:
streams[stream] = []
streams[stream].append(dir)
elif self.verbose:
log.warning(
"not a valid data dataset name: %s" % basename)
for stream, dirs in streams.items():
name = 'data%d-%s' % (year % 1000, stream)
self[name] = Dataset(
name=name,
datatype=DATA,
treename=data_treename,
ds=name,
id=-1,
# The GRL is the same for both lephad and hadhad analyses
grl=data_grl,
dirs=dirs,
stream=stream,
file_pattern=data_pattern,
year=year)
if data_period_containers:
# in each stream create a separate dataset for each run
periods = {}
for dir in dirs:
dirname, basename = os.path.split(dir)
match = re.match(DS_PATTERN, basename)
if match:
period = match.group('id')
if not period.startswith('period'):
continue
tag = match.group('tag')
if period not in periods:
periods[period] = {
'tag': tag,
'dirs': [dir],
'ds': Database.match_to_ds(match)}
else:
periods[period]['dirs'].append(dir)
if tag != periods[period]['tag']:
log.warning(
'multiple copies of period with different '
'tags: \n%s' %
('\n'.join(periods[period]['dirs'])))
elif self.verbose:
log.warning(
"not a valid data dataset name: %s" % basename)
# need to use the actual ds name for ds for validation
for period, info in periods.items():
name = 'data%d-%s-%s' % (year % 1000, stream, period[-1])
self[name] = Dataset(
name=name,
datatype=DATA,
treename=data_treename,
ds=name,
id=period,
grl=data_grl,
dirs=info['dirs'],
stream=stream,
file_pattern=data_pattern,
year=year)
else:
# in each stream create a separate dataset for each run
runs = {}
for dir in dirs:
dirname, basename = os.path.split(dir)
match = re.match(DS_PATTERN, basename)
if match:
run = int(match.group('id'))
tag = match.group('tag')
if run not in runs:
runs[run] = {
'tag': tag,
'dirs': [dir],
'ds': Database.match_to_ds(match)}
else:
runs[run]['dirs'].append(dir)
if tag != runs[run]['tag']:
log.warning(
'multiple copies of run with different '
'tags: %s' % runs[run]['dirs'])
elif self.verbose:
log.warning(
"not a valid data dataset name: %s" % basename)
# need to use the actual ds name for ds for validation
for run, info in runs.items():
name = 'data%d-%s-%d' % (year % 1000, stream, run)
self[name] = Dataset(
name=name,
datatype=DATA,
treename=data_treename,
ds=name,
id=run,
grl=data_grl,
dirs=info['dirs'],
stream=stream,
file_pattern=data_pattern,
year=year)
if USE_PYAMI:
# in each stream create a separate dataset for each period
run_periods = get_periods(amiclient, year=year, level=2)
# ignore subset periods like Ba in 2012
run_periods = [
p.name for p in run_periods if len(p.name) == 1]
period_runs = {}
for period in run_periods:
if period == 'VdM':
continue
_runs = get_runs(amiclient, periods=period, year=year)
for run in _runs:
period_runs[run] = period
periods = {}
for run, info in runs.items():
if run in period_runs:
_period = period_runs[run]
else:
# ignore spurious runs
continue
if _period in periods:
periods[_period] += info['dirs']
else:
periods[_period] = info['dirs'][:]
for period, dirs in periods.items():
name = 'data%d-%s-%s' % (year % 1000, stream, period)
self[name] = Dataset(
name=name,
datatype=DATA,
treename=data_treename,
ds=name,
id=-1,
grl=data_grl,
dirs=dirs,
stream=stream,
file_pattern=data_pattern,
year=year)
elif data_sampletype == 'lhCN':
year = 2012
streams = {}
for dir in data_dirs:
match = re.match(CN_DATA_PATTERN12, dir)
if match:
stream = match.group('name')
if stream not in streams:
streams[stream] = []
streams[stream].append(dir)
elif self.verbose:
log.warning("not a valid data dataset name: %s" % dir)
for stream, dirs in streams.items():
name = 'data%d-%s' % (year % 1000, stream)
log.info('\'%s\',' % name)
self[name] = Dataset(
name=name,
datatype=DATA,
treename=data_treename,
ds=name,
id=-1,
# The GRL is the same for both lephad and hadhad analyses
grl=data_grl,
dirs=dirs,
stream=stream,
file_pattern=data_pattern,
year=year)
# in each stream create a separate dataset for each period
periods = {}
for dir in dirs:
match = re.match(CN_DATA_PATTERN12, dir)
if match:
period = match.group('id')
tag = match.group('tag')
if period not in periods:
periods[period] = {
'tag': tag,
'dirs': [dir],
'ds': -1}
else:
periods[period]['dirs'].append(dir)
if tag != periods[period]['tag']:
log.warning(
'multiple copies of period with different '
'tags: %s' % periods[period]['dirs'])
elif self.verbose:
log.warning(
"not a valid data dataset name: %s" % dir)
# need to use the actual ds name for ds for validation
for period, info in periods.items():
name = 'data%d-%s-%s' % (year % 1000, stream, period)
log.info('\'%s\',' % name)
self[name] = Dataset(
name=name,
datatype=DATA,
treename=data_treename,
ds=info['ds'],
id=period,
grl=data_grl,
dirs=info['dirs'],
stream=stream,
file_pattern=data_pattern,
year=year)
def __setitem__(self, name, ds):
if self.verbose:
print >> self.stream, str(ds)
super(Database, self).__setitem__(name, ds)
def search(self, pattern):
data = []
patterns = pattern
if not isinstance(pattern, (list, tuple)):
patterns = [pattern]
for name, ds in self.items():
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
data.append(ds)
continue
if not pattern.startswith('^'):
pattern = '^' + pattern
if not pattern.endswith('$'):
pattern = pattern + '$'
if re.match(pattern, name):
data.append(ds)
continue
return data
class Dataset(Serializable):
yaml_tag = u'!Dataset'
def __init__(self, name, datatype, treename, ds, dirs,
file_pattern='*.root*',
id=None,
category=None,
version=None,
tag_pattern=None,
tag=None,
grl=None,
year=None,
stream=None):
self.name = name
self.datatype = datatype
self.treename = treename
self.id = id
self.ds = ds
self.category = category
self.version = version
self.tag_pattern = tag_pattern
self.tag = tag
self.dirs = dirs
self.file_pattern = file_pattern
self.grl = grl
self.year = year
self.stream = stream
def __repr__(self):
return ("%s(name=%r, datatype=%r, treename=%r, "
"id=%r, ds=%r, category=%r, version=%r, "
"tag_pattern=%r, tag=%r, dirs=%r, "
"file_pattern=%r, grl=%r, year=%r, stream=%r)") % (
self.__class__.__name__,
self.name, self.datatype, self.treename,
self.id, self.ds, self.category, self.version,
self.tag_pattern, self.tag, self.dirs,
self.file_pattern, self.grl, self.year, self.stream)
@cached_property
def xsec_kfact_effic(self):
global XSEC_CACHE_MODIFIED
year = self.year % 1E3
if self.datatype == DATA:
return 1., 1., 1.
if year in XSEC_CACHE and self.name in XSEC_CACHE[year]:
log.warning("using cached cross section for dataset %s" % self.ds)
return XSEC_CACHE[year][self.name]
try:
return xsec.xsec_kfact_effic(self.year, self.id)
except KeyError:
log.warning("cross section of dataset %s not available locally."
"Looking it up in AMI instead. AMI cross sections can be very"
"wrong! You have been warned!"
% self.ds)
if USE_PYAMI:
if self.ds in DS_NOPROV:
xs, effic = get_dataset_xsec_effic(amiclient, DS_NOPROV[self.ds])
else:
xs, effic = get_dataset_xsec_effic(amiclient, self.ds)
if year not in XSEC_CACHE:
XSEC_CACHE[year] = {}
XSEC_CACHE[year][self.name] = (xs, 1., effic)
XSEC_CACHE_MODIFIED = True
return xs, 1., effic
raise Exception("cross section of dataset %s is not known!" % self.ds)
@cached_property
def files(self):
if not self.dirs:
log.warning(
"files requested from dataset %s "
"with an empty list of directories" % self.name)
_files = []
for dir in self.dirs:
if not os.path.exists(dir):
raise IOError("%s is not readable" % dir)
for path, dirs, files in os.walk(dir):
_files += [os.path.join(path, f) for f in
fnmatch.filter(files, self.file_pattern)]
return _files
def __str__(self):
return "%s (%d files):\n\t%s" % (
self.name,
len(self.files),
self.ds)
def dataset_constructor(loader, node):
kwargs = loader.construct_mapping(node)
try:
return Dataset(**kwargs)
except:
fields = '\n'.join('%s = %s' % item for item in kwargs.items())
log.error("unable to load dataset %s with these fields:\n\n%s\n" %
(kwargs['name'], fields))
raise
yaml.add_constructor(u'!Dataset', dataset_constructor)
if os.path.isfile(XSEC_CACHE_FILE):
with open(XSEC_CACHE_FILE) as cache:
log.info("Loading cross section cache in %s ..." % XSEC_CACHE_FILE)
XSEC_CACHE = pickle.load(cache)
@atexit.register
def write_cache():
if XSEC_CACHE_MODIFIED:
with open(XSEC_CACHE_FILE, 'w') as cache:
log.info("Saving cross-section cache to disk...")
pickle.dump(XSEC_CACHE, cache)
def validate_single(args, child=True):
if child:
from cStringIO import StringIO
sys.stdout = out = StringIO()
sys.stderr = out
name = args[0]
info = args[1]
complete = True
try:
dirs = info.dirs
root_files = []
for dir in dirs:
root_files += glob.glob(os.path.join(dir, info.file_pattern))
events = 0
for fname in root_files:
try:
with root_open(fname) as rfile:
try: # skimmed dataset
events += int(rfile.cutflow_event[0])
except DoesNotExist: # unskimmed dataset
tree = rfile.tau
events += tree.GetEntries()
except IOError:
log.warning("Currupt file: %s" % fname)
pass
# determine events in original ntuples
# use first dir
ds_name = info.ds
log.info('NTUP: ' + ds_name)
ds_info = get_dataset_info(amiclient, ds_name)
ntuple_events = int(ds_info.info['totalEvents'])
try:
# determine events in AODs
prov = get_provenance(amiclient, ds_name, type='AOD')
AOD_ds = prov.values()[0][0].replace('recon', 'merge')
log.info('AOD: ' + AOD_ds)
AOD_events = int(get_datasets(amiclient, AOD_ds, fields='events',
flatten=True)[0][0])
except IndexError:
log.info('AOD: UNKNOWN')
AOD_events = ntuple_events
log.info(name)
log.info("\tevts\tNTUP\tAOD")
log.info("\t%i\t%i\t%i" % (events, ntuple_events, AOD_events))
if events != ntuple_events:
log.warning("NTUP MISMATCH")
if events != AOD_events:
log.warning("AOD MISMATCH")
if events != ntuple_events and (events != AOD_events or AOD_events == 0):
log.warning("MISSING EVENTS")
complete = False
if child:
return out.getvalue(), complete
return complete
except Exception, e:
import traceback
log.warning("dataset %s exception" % name)
traceback.print_exception(*sys.exc_info())
if child:
return out.getvalue(), False
return False
def get_all_dirs_under(path, prefix=None):
"""
Get list of all directories under path
"""
dirs = []
for dirpath, dirnames, filenames in os.walk(path):
_dirnames = []
for dirname in dirnames:
fullpath = os.path.join(dirpath, dirname)
# check if this dir contains other dirs
subdirs_exist = False
subdirs = os.listdir(fullpath)
for subdir in subdirs:
if os.path.isdir(os.path.join(fullpath, subdir)):
subdirs_exist = True
break
if subdirs_exist:
_dirnames.append(dirname)
else:
# this must be a dataset, don't walk into this dir
if prefix is not None:
if not dirname.startswith(prefix):
continue
dirs.append(fullpath)
# only recurse on directories containing subdirectories
dirnames = _dirnames
return dirs
| gpl-3.0 | -2,071,067,147,111,899,000 | 39.565217 | 92 | 0.391756 | false |
sahat/bokeh | bokeh/plot_object.py | 1 | 14108 | from __future__ import absolute_import, print_function
import os.path
from uuid import uuid4
from functools import wraps
import warnings
import logging
logger = logging.getLogger(__file__)
from six import add_metaclass, iteritems
from six.moves.urllib.parse import urlsplit
from .embed import autoload_static, autoload_server
from .properties import HasProps, MetaHasProps, Instance
from .protocol import serialize_json
from .utils import get_ref, convert_references, dump
class Viewable(MetaHasProps):
""" Any plot object (Data Model) which has its own View Model in the
persistence layer.
Adds handling of a __view_model__ attribute to the class (which is
provided by default) which tells the View layer what View class to
create.
One thing to keep in mind is that a Viewable should have a single
unique representation in the persistence layer, but it might have
multiple concurrent client-side Views looking at it. Those may
be from different machines altogether.
"""
# Stores a mapping from subclass __view_model__ names to classes
model_class_reverse_map = {}
# Mmmm.. metaclass inheritance. On the one hand, it seems a little
# overkill. On the other hand, this is exactly the sort of thing
# it's meant for.
def __new__(cls, class_name, bases, class_dict):
if "__view_model__" not in class_dict:
class_dict["__view_model__"] = class_name
class_dict["get_class"] = Viewable.get_class
# Create the new class
newcls = super(Viewable,cls).__new__(cls, class_name, bases, class_dict)
entry = class_dict["__view_model__"]
# Add it to the reverse map, but check for duplicates first
if entry in Viewable.model_class_reverse_map:
raise Warning("Duplicate __view_model__ declaration of '%s' for " \
"class %s. Previous definition: %s" % \
(entry, class_name,
Viewable.model_class_reverse_map[entry]))
Viewable.model_class_reverse_map[entry] = newcls
return newcls
@classmethod
def _preload_models(cls):
from . import objects, widgetobjects
@classmethod
def get_class(cls, view_model_name):
""" Given a __view_model__ name, returns the corresponding class
object
"""
cls._preload_models()
d = Viewable.model_class_reverse_map
if view_model_name in d:
return d[view_model_name]
else:
raise KeyError("View model name '%s' not found" % view_model_name)
def usesession(meth):
""" Checks for 'session' in kwargs and in **self**, and guarantees
that **kw** always has a valid 'session' parameter. Wrapped methods
should define 'session' as an optional argument, and in the body of
the method, should expect an
"""
@wraps(meth)
def wrapper(self, *args, **kw):
session = kw.get("session", None)
if session is None:
session = getattr(self, "session")
if session is None:
raise RuntimeError("Call to %s needs a session" % meth.__name__)
kw["session"] = session
return meth(self, *args, **kw)
return wrapper
def is_ref(frag):
return isinstance(frag, dict) and \
frag.get('type') and \
frag.get('id')
def json_apply(fragment, check_func, func):
"""recursively searches through a nested dict/lists
if check_func(fragment) is True, then we return
func(fragment)
"""
if check_func(fragment):
return func(fragment)
elif isinstance(fragment, list):
output = []
for val in fragment:
output.append(json_apply(val, check_func, func))
return output
elif isinstance(fragment, dict):
output = {}
for k, val in fragment.items():
output[k] = json_apply(val, check_func, func)
return output
else:
return fragment
def resolve_json(fragment, models):
check_func = is_ref
def func(fragment):
if fragment['id'] in models:
return models[fragment['id']]
else:
logging.error("model not found for %s", fragment)
return None
return json_apply(fragment, check_func, func)
@add_metaclass(Viewable)
class PlotObject(HasProps):
""" Base class for all plot-related objects """
session = Instance(".session.Session")
def __init__(self, **kwargs):
# Eventually should use our own memo instead of storing
# an attribute on the class
if "id" in kwargs:
self._id = kwargs.pop("id")
else:
self._id = str(uuid4())
self._dirty = True
self._callbacks_dirty = False
self._callbacks = {}
self._callback_queue = []
self._block_callbacks = False
block_events = kwargs.pop('_block_events', False)
if not block_events:
super(PlotObject, self).__init__(**kwargs)
self.setup_events()
else:
self._block_callbacks = True
super(PlotObject, self).__init__(**kwargs)
def get_ref(self):
return {
'type': self.__view_model__,
'id': self._id,
}
def setup_events(self):
pass
@classmethod
def load_json(cls, attrs, instance=None):
"""Loads all json into a instance of cls, EXCEPT any references
which are handled in finalize
"""
if 'id' not in attrs:
raise RuntimeError("Unable to find 'id' attribute in JSON: %r" % attrs)
_id = attrs.pop('id')
if not instance:
instance = cls(id=_id, _block_events=True)
_doc = attrs.pop("doc", None)
ref_props = {}
for p in instance.properties_with_refs():
if p in attrs:
ref_props[p] = attrs.pop(p)
special_props = {}
for p in dict(attrs):
if p not in instance.properties():
special_props[p] = attrs.pop(p)
instance._ref_props = ref_props
instance._special_props = special_props
instance.update(**attrs)
return instance
def finalize(self, models):
"""Convert any references into instances
models is a dict of id->model mappings
"""
if hasattr(self, "_ref_props"):
return resolve_json(self._ref_props, models)
else:
return {}
@classmethod
def collect_plot_objects(cls, *input_objs):
""" Iterate over ``input_objs`` and descend through their structure
collecting all nested ``PlotObjects`` on the go. The resulting list
is duplicate-free based on objects' identifiers.
"""
ids = set([])
objs = []
def descend_props(obj):
for attr in obj.properties_with_refs():
descend(getattr(obj, attr))
def descend(obj):
if isinstance(obj, PlotObject):
if obj._id not in ids:
ids.add(obj._id)
descend_props(obj)
objs.append(obj)
elif isinstance(obj, HasProps):
descend_props(obj)
elif isinstance(obj, (list, tuple)):
for item in obj:
descend(item)
elif isinstance(obj, dict):
for key, value in iteritems(obj):
descend(key); descend(value)
descend(input_objs)
return objs
def references(self):
"""Returns all ``PlotObjects`` that this object has references to. """
return set(self.collect_plot_objects(self))
#---------------------------------------------------------------------
# View Model connection methods
#
# Whereas a rich client rendering framework can maintain view state
# alongside model state, we need an explicit send/receive protocol for
# communicating with a set of view models that reside on the front end.
# Many of the calls one would expect in a rich client map instead to
# batched updates on the M-VM-V approach.
#---------------------------------------------------------------------
def vm_props(self):
""" Returns the ViewModel-related properties of this object. """
props = self.changed_properties_with_values()
props.pop("session", None)
return props
def vm_serialize(self):
""" Returns a dictionary of the attributes of this object, in
a layout corresponding to what BokehJS expects at unmarshalling time.
"""
attrs = self.vm_props()
attrs['id'] = self._id
return attrs
def dump(self, docid=None):
"""convert all references to json
"""
models = self.references()
return dump(models, docid=docid)
def update(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return "%s, ViewModel:%s, ref _id: %s" % (self.__class__.__name__,
self.__view_model__, getattr(self, "_id", None))
def on_change(self, attrname, obj, callbackname=None):
"""when attrname of self changes, call callbackname
on obj
"""
callbacks = self._callbacks.setdefault(attrname, [])
callback = dict(obj=obj, callbackname=callbackname)
if callback not in callbacks:
callbacks.append(callback)
self._callbacks_dirty = True
def _trigger(self, attrname, old, new):
"""attrname of self changed. So call all callbacks
"""
callbacks = self._callbacks.get(attrname)
if callbacks:
for callback in callbacks:
obj = callback.get('obj')
callbackname = callback.get('callbackname')
fn = obj if callbackname is None else getattr(obj, callbackname)
fn(self, attrname, old, new)
# TODO: deprecation warnign about args change (static_path)
def create_html_snippet(
self, server=False, embed_base_url="", embed_save_loc=".",
static_path="http://localhost:5006/bokehjs/static"):
"""create_html_snippet is used to embed a plot in an html page.
create_html_snippet returns the embed string to be put in html.
This will be a <script> tag.
To embed a plot dependent on the Bokeh Plot Server, set server=True,
otherwise a file with the data for the plot will be built.
embed_base_url is used for non-server embedding. This is used
as the root of the url where the embed.js file will be saved.
embed_save_loc controls where the embed.js will be actually written to.
static_path controls where the embed snippet looks to find
bokeh.js and the other resources it needs for bokeh.
"""
if server:
from .session import Session
if embed_base_url:
session = Session(root_url=server_url)
else:
session = Session()
return autoload_server(self, session)
from .templates import AUTOLOAD, AUTOLOAD_STATIC
import uuid
js_filename = "%s.embed.js" % self._id
script_path = embed_base_url + js_filename
elementid = str(uuid.uuid4())
js = AUTOLOAD.render(
all_models = serialize_json(self.dump()),
js_url = static_path + "js/bokeh.min.js",
css_files = [static_path + "css/bokeh.min.css"],
elementid = elementid,
)
tag = AUTOLOAD_STATIC.render(
src_path = script_path,
elementid = elementid,
modelid = self._id,
modeltype = self.__view_model__,
)
save_path = os.path.join(embed_save_loc, js_filename)
with open(save_path,"w") as f:
f.write(js)
return tag
def inject_snippet(
self, server=False, embed_base_url="", embed_save_loc=".",
static_path="http://localhost:5006/bokeh/static/"):
warnings.warn("inject_snippet is deprecated, please use create_html_snippet")
return self.create_html_snippet(
server, embed_base_url, embed_save_loc, static_path)
def _build_server_snippet(self, base_url=False):
sess = self._session
modelid = self._id
typename = self.__view_model__
if not base_url:
base_url = sess.root_url
split = urlsplit(base_url)
if split.scheme == 'http':
ws_conn_string = "ws://%s/bokeh/sub" % split.netloc
else:
ws_conn_string = "wss://%s/bokeh/sub" % split.netloc
f_dict = dict(
docid = sess.docid,
ws_conn_string = ws_conn_string,
docapikey = sess.apikey,
root_url = base_url,
modelid = modelid,
modeltype = typename,
script_url = base_url + "bokeh/embed.js")
e_str = '''<script src="%(script_url)s" bokeh_plottype="serverconn"
bokeh_docid="%(docid)s" bokeh_ws_conn_string="%(ws_conn_string)s"
bokeh_docapikey="%(docapikey)s" bokeh_root_url="%(root_url)s"
bokeh_modelid="%(modelid)s" bokeh_modeltype="%(modeltype)s" async="true"></script>
'''
return "", e_str % f_dict
def _build_static_embed_snippet(self, static_path, embed_base_url):
embed_filename = "%s.embed.js" % self._id
full_embed_path = embed_base_url + embed_filename
js_str = self._session.embed_js(self._id, static_path)
sess = self._session
modelid = self._id
typename = self.__view_model__
embed_filename = full_embed_path
f_dict = dict(modelid = modelid, modeltype = typename,
embed_filename=embed_filename)
e_str = '''<script src="%(embed_filename)s" bokeh_plottype="embeddata"
bokeh_modelid="%(modelid)s" bokeh_modeltype="%(modeltype)s" async="true"></script>
'''
return js_str, e_str % f_dict
| bsd-3-clause | 4,577,947,309,496,214,000 | 34.007444 | 90 | 0.581372 | false |
apierleoni/MyBioDb | modules/search_engine.py | 1 | 20410 |
__author__ = 'pierleonia'
DEBUG=True
import os, traceback
from multiprocessing import Pool
class BioentrySearchEngineBackend(object):
def rebuild(self, bioentry_ids=[], **kwargs):
raise NotImplementedError()
def indexes(self, **kwargs):
raise NotImplementedError()
def after_insert(self, **kwargs):
raise NotImplementedError()
def after_update(self, **kwargs):
raise NotImplementedError()
def get_ids(self, **kwargs):
raise NotImplementedError()
def after_delete(self, **kwargs):
raise NotImplementedError()
def search(self, query, **kwargs):
raise NotImplementedError()
def quick_search(self, query):
raise NotImplementedError()
def create_loading_Pool(self):
self.pool = Pool(processes=getCPUs())
def add_bioentry_id_to_index(self, bioentry_id, counter = 1):
raise NotImplementedError()
def map_to_index(self,handler, bioentry_id):
def add_element(element, container):
if isinstance(element,str):
container.append(unicode(element))
elif isinstance(element,list):
for i in element:
if isinstance(i, list):
container.append(unicode(i[0]))
elif isinstance(i, str):
container.append(unicode(i))
return container
seqrecord = handler._retrieve_seqrecord(bioentry_id)
annotation_types, annotation_values = [],[]
feature_types, feature_values = [],[]
comments = []
accessions = []
keywords = []
pubids, pubauths, pubtitles, pubjournals= [],[],[],[]
taxonomy = [] #TODO: add taxonomy
for k,v in seqrecord.annotations.items():
if k == 'accessions':
accessions = add_element(v,accessions)
elif k.startswith('comment'):
comments = add_element(v,comments)
elif k == 'keywords':
keywords = add_element(v,keywords)
elif k=='references':
if isinstance(v,list):
for ref in v:
pubids.append(ref.pubmed_id)
pubtitles.append(ref.title)
pubauths.append(ref.authors.strip())
pubjournals.append(ref.journal)
else:
annotation_values = add_element(v,annotation_values)
annotation_types = add_element(k,annotation_types)
for feature in seqrecord.features:
feature_types.append(feature.type)
for k,v in feature.qualifiers.items():
feature_values = add_element(v,feature_values)
kwargs = dict(id = unicode(bioentry_id),
db = unicode(handler.adaptor.biodatabase[handler.adaptor.bioentry[bioentry_id].biodatabase_id].name),
name=unicode(seqrecord.name),
accession=accessions,
identifier=unicode(seqrecord.id),
description=unicode(seqrecord.description),
keyword=keywords,
annotation=annotation_values,
annotationtype=annotation_types,
comment=comments,
feature=feature_values,
featuretype=feature_types,
lenght=unicode(len(seqrecord)),
dbxref=seqrecord.dbxrefs,
pubid=pubids,
pubtitle=pubtitles,
pubauth=pubauths,
pubjournal=pubjournals)
return kwargs
class SearchEngineResult(object):
def __init__(self, ids, handler, data = {}):
self.biodb = handler.adaptor
self.db_query = self.biodb.bioentry._id.belongs(ids) # to be used in DAL queries
self.selected_ids = ids
self.count = len(ids)
self.select_sql = self.biodb(self.biodb.bioentry.id.belongs(ids))._select() #raw sql to retrieve data from the bioentry table
self.data = data
def getCPUs():
import multiprocessing
try:
return multiprocessing.cpu_count()
except:
return 1
def picklable_call(instance, name, args=(), kwargs=None):
"indirect caller for instance methods and multiprocessing"
if kwargs is None:
kwargs = {}
return getattr(instance, name)(*args, **kwargs)
class WhooshBackend(BioentrySearchEngineBackend):
def __init__(self, handler, indexdir):
self.handler = handler
self.biodb = handler.adaptor
self.indexdir = indexdir
if not os.path.exists(indexdir):
os.mkdir(indexdir)
def indexes(self):
try:
from whoosh.index import create_in,open_dir
except ImportError:
raise ImportError("Cannot find Whoosh")
self.indexname =".".join([self.biodb._uri_hash, 'whoosh'])
try:
self.ix = open_dir(self.indexdir, indexname=self.indexname)
except:
self.ix = create_in(self.indexdir, self._get_schema(), indexname=self.indexname)
def rebuild(self, **kwargs):
cpus = getCPUs()
writer = self.ix.writer(procs=cpus, multisegment=True)
bioentries = kwargs.get('bientry_ids',[])
if DEBUG: print "starting global index rebuilding with %i CPUs"%cpus
if not bioentries:
bioentries = [row.id for row in self.biodb(self.biodb.bioentry.id >0
).select(self.biodb.bioentry.id)]
if DEBUG: print "starting indexing of %i bioentries"%len(bioentries)
#iterate over all bioentries at 100 max a time
i, m = 0, 1000
while True:
start = i*m
end = (i+1)*m
if DEBUG:
print "searching for round ",start,end
#print "searching query: " + self.biodb(self.biodb.bioentry.id.belongs(bioentries[start:end]))._select()
rows = self.biodb(self.biodb.bioentry.id.belongs(bioentries[start:end])).select(self.biodb.bioentry.id)
#if DEBUG: print "round size found: ",len(rows)
for row in rows:
try:
#if DEBUG: print "Indexing bioentry %s - %i"%(row.name, i+1)
writer.update_document(**self.map_to_index(self.handler,row.id))
#if DEBUG:
# print "Indexed bioentry %s - %i"%(row.name, start)
except:
if DEBUG:
print "error building index for id: ",row.id
traceback.print_exc()
if len(rows)<m: break
i+=1
writer.commit()
def search(self, query, **kwargs):
from whoosh.qparser import QueryParser,MultifieldParser
fieldnames = kwargs.pop('fieldnames', self.ix.schema.names())
qp = MultifieldParser( fieldnames, schema=self.ix.schema)
q = qp.parse(query)
with self.ix.searcher() as s:
results = s.search(q, **kwargs)
if DEBUG: print "found %i hits in %.2fms"%(len(results.top_n),results.runtime*1000)
ids = list(set(long(result['id']) for result in results))
result = SearchEngineResult(ids, self.handler)
return result
#return ids
def _get_schema(self):
from whoosh.fields import Schema, TEXT, ID, KEYWORD, NUMERIC
from whoosh.analysis import StemmingAnalyzer
return Schema(id=ID(unique=True,stored=True),
db=ID(stored=True),
name=ID(stored=True),
accession=KEYWORD(scorable=True),
identifier=ID(stored=True),
description=TEXT(stored=True),
taxonomy=KEYWORD(lowercase=True,
commas=True,
scorable=True),
keyword=KEYWORD(lowercase=True,
commas=True,
scorable=True),
annotation=TEXT(analyzer=StemmingAnalyzer()),
annotationtype=KEYWORD(lowercase=True,
scorable=True),
comment=TEXT(analyzer=StemmingAnalyzer()),
feature=TEXT(analyzer=StemmingAnalyzer()),
featuretype=KEYWORD(lowercase=True,
commas=True,
scorable=True),
lenght=NUMERIC(),
dbxref=KEYWORD(scorable=True),
pubid=KEYWORD(scorable=True),
pubtitle=TEXT(analyzer=StemmingAnalyzer()),
pubauth=KEYWORD(lowercase=True,
commas=True,
scorable=True),
pubjournal=KEYWORD(lowercase=True,
commas=True,
scorable=True),
)
def map_to_index(self, handler, bioentry_id):
documents = super(WhooshBackend, self).map_to_index(handler, bioentry_id)
for k,v in documents.items():
if isinstance(v, list):
documents[k]=unicode(" ".join(v))
return documents
def quick_search(self, query, limit = 0):
if limit > 0:
return self.search(query, limit = limit,
scored=True,
fieldnames = ['accession',
'description',
'name'])
else:
return self.search(query,scored=True,
fieldnames = ['accession',
'description',
'name'])
class SolrBackend(BioentrySearchEngineBackend):
def __init__(self, handler, url="http://localhost:8983",schema=""):
self.handler = handler
self.biodb = handler.adaptor
self.url = url
if not schema:
schema = self._get_default_schema()
self.schemadoc = schema
# if DEBUG: print schema
def _get_default_schema(self):#TODO: update schema.xml to make strings not case sensitive
from gluon import request
return os.path.join(request.folder, 'databases', 'solr_schema.xml')
def indexes(self):
import sunburnt
# import pysolr
# if 1:
try:
self.interface = sunburnt.SolrInterface(self.url, self.schemadoc)
except:
raise RuntimeError("Cannot connect to Solr: %s" % self.url)
# self.interface = pysolr.Solr(self.url)
def rebuild(self, bioentry_ids=[], **kwargs):
bioentries = kwargs.get('bientry_ids',[])
if DEBUG: print "starting global index rebuilding"
if not bioentries:
bioentries = [row.id for row in self.biodb(self.biodb.bioentry.id >0
).select(self.biodb.bioentry.id)]
if DEBUG: print "starting indexing of %i bioentries"%len(bioentries)
#iterate over all bioentries at 100 max a time
i, m = 0, 100
while True:
start = i*m
end = (i+1)*m
if DEBUG:
print "searching for round ",start,end
rows = self.biodb(self.biodb.bioentry.id.belongs(bioentries[start:end])).select(self.biodb.bioentry.id)
documents = []
for row in rows:
try:
documents.append(self.map_to_index(self.handler,row.id))
except:
if DEBUG:
print "error building index for id: ",row.id
traceback.print_exc()
self.interface.add(documents)
# self.interface.add_many(documents)
if len(rows)<m: break
i+=1
self.interface.commit()
def search(self, query, **kwargs):
# results = self.interface.query(**fieldkeys).paginate(0,limit)
# ids = [r['id'] for r in results]
# return ids
fieldnames = kwargs.pop('fieldnames', self.interface.schema.fields.keys())
search_all_fields = kwargs.pop('search_all_fields', False)
if search_all_fields:
fieldnames = self.interface.schema.fields.keys()
qd = dict()
fields = self.interface.schema.fields
for fname in fieldnames:
field = fields[fname]
if getattr(field, "class") == 'solr.StrField' :
qd[fname] = query
elif getattr(field, "class") == 'solr.TriIntField' :
try:
qd[fname] = int(query)
except:
pass
results = self.interface.query(**qd).field_limit("id") .execute()#TODO: modify to get the OR by default
if DEBUG: print "found %i hits in %.2fms"%(len(results),results.QTime)
ids = list(set(long(result['id']) for result in results))
result = SearchEngineResult(ids, self.handler)#TODO: return the stored data to avoid querying the db again if possible. use a Storage object and try to get the required fields, otherwise fallback to db query.
return result
def quick_search(self, query, limit = 0):
if limit > 0:
return self.search(query,rows=limit,
fieldnames = ['accession',
'description',
'name'])
else:
return self.search(query,fieldnames = ['accession',
'description',
'name'])
def map_to_index(self, handler, bioentry_id):
document = super(SolrBackend, self).map_to_index(handler, bioentry_id)
try:
document['lenght'] = int(document['lenght'])
except:
pass
return document
class ElasticSearchBackend(BioentrySearchEngineBackend):
def __init__(self, handler, nodes = [], index_name = 'mybiodb'):
self.handler = handler
self.biodb = handler.adaptor
self.nodes = nodes
self.index_name = index_name
def rebuild(self, bioentry_ids=[], **kwargs):
# self.create_loading_Pool()
bioentries = kwargs.get('bientry_ids',[])
if DEBUG: print "starting global index rebuilding"
if not bioentries:
bioentries = [row.id for row in self.biodb(self.biodb.bioentry.id >0
).select(self.biodb.bioentry.id)]
if DEBUG: print "starting indexing of %i bioentries"%len(bioentries)
#iterate over all bioentries at 100 max a time
# self.pool.apply_async(picklable_call, args = (self, 'add_bioentry_id_to_index', zip(bioentries, range(len(bioentries)))))
# self.pool.close()
# self.pool.join()
for i,bioentry_id in enumerate(bioentries):
self.add_bioentry_id_to_index(bioentry_id, i)
def add_bioentry_id_to_index(self, bioentry_id, counter = 1):
if counter%100 ==0 and DEBUG:
print "\tadded %i bioentries to index"%counter
try:
self.interface.index(index=self.index_name,
doc_type="full_bioentry",
id=bioentry_id,
body=self.map_to_index(self.handler,bioentry_id)
)
except:
if DEBUG:
print "error building index for id: ",bioentry_id
traceback.print_exc()
def search(self, query, **kwargs):
if DEBUG:
from datetime import datetime
start_time = datetime.now()
fieldnames = kwargs.pop('fieldnames', "_all")
size = kwargs.pop('limit', 100)
from_ = kwargs.pop('from', 0)
# results = self.interface.search(index=self.index_name, body={"query": {"query_string": {
# "query": query},
# "term": {
# "fields": fieldnames},
#
# 'from': from_arg,
# 'size' : size
# }})
results = self.interface.search(index=self.index_name,
q= query,
from_ =from_,
size = size,
#fields = fieldnames,#TODO: should be the list of fields to return, check!
_source_include = ['id','name','accession','description'])
if DEBUG:
print "found %i hits in %ims"%(results['hits']['total'], results['took'])
ids = []
data = []
if results['hits']['total']:
for r in results['hits']['hits']:
ids = r['_id']
data.append( dict( id = r['_source']['id'],
name = r['_source']['name'],
accession = r['_source']['accession'][0],
description = r['_source']['description']))
return SearchEngineResult(ids, self.handler, data)
def quick_search(self, query, limit = 0):
if limit > 0:
return self.search(query,
size = limit,
fieldnames = ['accession',
'description',
'name'],
**{'from':0 })
else:
return self.search(query,fieldnames = ['accession',
'description',
'name'])
def map_to_index(self, handler, bioentry_id):
return super(ElasticSearchBackend, self).map_to_index(handler, bioentry_id)
def indexes(self, **kwargs):
import elasticsearch
if self.nodes:
try:
self.interface = self.interface = elasticsearch.Elasticsearch(self.nodes, **kwargs)
except:
raise RuntimeError("Cannot connect to ElasticSearch nodes: %s" % ", ".join(self.nodes))
else:
try:
self.interface = elasticsearch.Elasticsearch(**kwargs)
except:
raise RuntimeError("Cannot connect to ElasticSearch on localhost")
class BioentrySearchEngine(object):
def __init__(self,handler, backend=WhooshBackend,**kwargs):
self.handler = handler
self.backend = backend(handler, **kwargs)
def indexes(self):
'''init indexes '''
self.backend.indexes()
#self.table._after_insert.append(
# lambda fields,id: self.backend.after_insert(fields,id))
#self.table._after_update.append(
# lambda queryset,fields: self.backend.after_update(queryset,fields))
#self.table._after_delete.append(
# lambda queryset: self.backend.after_delete(queryset))
def rebuild(self, **kwargs):
self.backend.rebuild( **kwargs)
def search(self, query, **kwargs):
return self.backend.search(query, **kwargs)
def quick_search(self, query, limit = 0):
return self.backend.quick_search(query, limit)
def add_bioentry_id_to_index(self, bioentry_id):
return self.backend.add_bioentry_id_to_index(bioentry_id)
| bsd-3-clause | -3,849,043,284,926,460,400 | 38.941292 | 216 | 0.507937 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.