repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
quasipedia/swaggery | swaggery/test/test_responses.py | 1 | 3974 | '''Test suite for the responses module.'''
import json
import unittest
import unittest.mock as mock
from werkzeug.exceptions import InternalServerError, BadRequest
from ..responses import AsyncResponse, BadResponse, GoodResponse, HEADERS
from .. import responses
class TestAsync(unittest.TestCase):
'''Test the Asyncronous response abstract class.'''
def test_init(self):
'''Payload is jsonified at instantiation, if it is not a generator.'''
data = {'foo': 'bar'}
r = AsyncResponse(data)
self.assertEqual(data, json.loads(r.data.decode()))
def test_stream_array(self):
'''A generator payload's outcome, is an array of jsonified objects.'''
data = (letter for letter in 'SPAM')
r = AsyncResponse(data)
expected = b'["S",\n"P",\n"A",\n"M"]'
start_response = mock.MagicMock
self.assertEqual(expected, b''.join(r.async(None, start_response)))
def test_stream_empty_array(self):
'''An empty generator payload's outcome, is a json empty array.'''
data = (letter for letter in '')
r = AsyncResponse(data)
expected = b'[]'
start_response = mock.MagicMock
self.assertEqual(expected, b''.join(r.async(None, start_response)))
def test_async_objects(self):
'''A single-object response iterate once.'''
data = {'foo': 'bar'}
r = AsyncResponse(data)
start_response = mock.MagicMock
for n, bit in enumerate(r.async(None, start_response)):
pass
self.assertEqual(0, n)
def test_async_generators(self):
'''A generator response iterate several times.'''
data = (letter for letter in 'SPAM')
r = AsyncResponse(data)
start_response = mock.MagicMock
for n, bit in enumerate(r.async(None, start_response)):
pass
self.assertGreater(n, 1)
class TestBad(unittest.TestCase):
'''Test the BadRespsonse class.'''
def test_500(self):
'''HTTP 500 responses get special logging.'''
exception = InternalServerError('SPAM')
# When python 3.4 is out this should be changed to self.assertLogs
with mock.patch.object(responses, 'log') as lg:
# This silly try/raise/except is due to the fact that log.exception
# requires an exception context that is created only when the
# exception is actually raised (not simply instantiated)
try:
raise exception
except InternalServerError:
BadResponse(mock.MagicMock(), exception)
self.assertEqual(2, lg.error.call_count)
self.assertEqual(1, lg.exception.call_count)
def test_others(self):
'''HTTP (non 500) bad resoponses get logged.'''
exception = BadRequest('SPAM')
# When python 3.4 is out this should be changed to self.assertLogs
with mock.patch.object(responses, 'log') as lg:
# This silly try/raise/except is due to the fact that log.exception
# requires an exception context that is created only when the
# exception is actually raised (not simply instantiated)
try:
raise exception
except BadRequest:
BadResponse(mock.MagicMock(), exception)
self.assertEqual(1, lg.info.call_count)
def test_headers(self):
'''A bad response has the headers properly set.'''
request = mock.MagicMock()
exception = BadRequest('SPAM')
r = BadResponse(request, exception)
for header in HEADERS:
self.assertIn(header, list(r.headers))
class TestGood(unittest.TestCase):
'''Test the GoodResponse class.'''
def test_headers(self):
'''A good response has the headers properly set.'''
r = GoodResponse(None, mock.MagicMock(status=200, payload=None))
for header in HEADERS:
self.assertIn(header, list(r.headers))
| agpl-3.0 | 7,329,444,113,078,119,000 | 35.796296 | 79 | 0.626573 | false |
cassinius/pyNLPGraphs | genSimWord2Vec/someModelCalcs.py | 1 | 1241 | import gensim
import logging
import os
W2V = gensim.models.Word2Vec
SHAKY_MODEL = '../Models_Shakespeare/'
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
categories = ['comedies', 'histories', 'poetry', 'tragedies']
for category in categories:
model = W2V.load_word2vec_format(SHAKY_MODEL + category + ".w2v", binary=True)
# print model.most_similar(positive=['woman', 'king'], negative=['man'], topn=1)
print "Most similar to king: \n", model.most_similar(['king'])
print "Most similar to queen: \n", model.most_similar(['queen'])
print "Most similar to love: \n", model.most_similar(['love'])
# print "Most similar to peace: \n", model.most_similar(['peace'])
print "Not fitting (king, queen, woman, power, Juliet): \n",model.doesnt_match("king queen woman power Juliet".split())
print "N similarity (king, power) (queen, love): \n", model.n_similarity(["king", "power"], ["queen", "love"])
print "Similarity (king, queen): \n", model.similarity("king", "queen")
print "Similarity (king, love): \n", model.similarity("king", "love"), "\n"
"""
TODO
-) load pre-trained models on meaningful datasets
-) formulate interesting questions
""" | apache-2.0 | -6,932,447,444,441,028,000 | 41.827586 | 123 | 0.666398 | false |
ebonyclock/vizdoom_cig2017 | f1/F1_track1/tensorpack/models/pool.py | 1 | 6208 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: pool.py
# Author: Yuxin Wu <[email protected]>
import tensorflow as tf
import numpy
from ._common import *
from ..tfutils.symbolic_functions import *
__all__ = ['MaxPooling', 'FixedUnPooling', 'AvgPooling', 'GlobalAvgPooling',
'BilinearUpSample']
@layer_register()
def MaxPooling(x, shape, stride=None, padding='VALID'):
"""
MaxPooling on images.
:param input: NHWC tensor.
:param shape: int or [h, w]
:param stride: int or [h, w]. default to be shape.
:param padding: 'valid' or 'same'. default to 'valid'
:returns: NHWC tensor.
"""
padding = padding.upper()
shape = shape4d(shape)
if stride is None:
stride = shape
else:
stride = shape4d(stride)
return tf.nn.max_pool(x, ksize=shape, strides=stride, padding=padding)
@layer_register()
def AvgPooling(x, shape, stride=None, padding='VALID'):
"""
Average pooling on images.
:param input: NHWC tensor.
:param shape: int or [h, w]
:param stride: int or [h, w]. default to be shape.
:param padding: 'valid' or 'same'. default to 'valid'
:returns: NHWC tensor.
"""
padding = padding.upper()
shape = shape4d(shape)
if stride is None:
stride = shape
else:
stride = shape4d(stride)
return tf.nn.avg_pool(x, ksize=shape, strides=stride, padding=padding)
@layer_register()
def GlobalAvgPooling(x):
"""
Global average pooling as in `Network In Network
<http://arxiv.org/abs/1312.4400>`_.
:param input: NHWC tensor.
:returns: NC tensor.
"""
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
# https://github.com/tensorflow/tensorflow/issues/2169
def UnPooling2x2ZeroFilled(x):
out = tf.concat(3, [x, tf.zeros_like(x)])
out = tf.concat(2, [out, tf.zeros_like(out)])
sh = x.get_shape().as_list()
if None not in sh[1:]:
out_size = [-1, sh[1] * 2, sh[2] * 2, sh[3]]
return tf.reshape(out, out_size)
else:
shv = tf.shape(x)
ret = tf.reshape(out, tf.pack([-1, shv[1] * 2, shv[2] * 2, sh[3]]))
ret.set_shape([None, None, None, sh[3]])
return ret
@layer_register()
def FixedUnPooling(x, shape, unpool_mat=None):
"""
Unpool the input with a fixed mat to perform kronecker product with.
:param input: NHWC tensor
:param shape: int or [h, w]
:param unpool_mat: a tf/np matrix with size=shape. If None, will use a mat
with 1 at top-left corner.
:returns: NHWC tensor
"""
shape = shape2d(shape)
# a faster implementation for this special case
if shape[0] == 2 and shape[1] == 2 and unpool_mat is None:
return UnPooling2x2ZeroFilled(x)
input_shape = tf.shape(x)
if unpool_mat is None:
mat = np.zeros(shape, dtype='float32')
mat[0][0] = 1
unpool_mat = tf.constant(mat, name='unpool_mat')
elif isinstance(unpool_mat, np.ndarray):
unpool_mat = tf.constant(unpool_mat, name='unpool_mat')
assert unpool_mat.get_shape().as_list() == list(shape)
# perform a tensor-matrix kronecker product
fx = flatten(tf.transpose(x, [0, 3, 1, 2]))
fx = tf.expand_dims(fx, -1) # (bchw)x1
mat = tf.expand_dims(flatten(unpool_mat), 0) #1x(shxsw)
prod = tf.matmul(fx, mat) #(bchw) x(shxsw)
prod = tf.reshape(prod, tf.pack(
[-1, input_shape[3], input_shape[1], input_shape[2], shape[0], shape[1]]))
prod = tf.transpose(prod, [0, 2, 4, 3, 5, 1])
prod = tf.reshape(prod, tf.pack(
[-1, input_shape[1] * shape[0], input_shape[2] * shape[1], input_shape[3]]))
return prod
@layer_register()
def BilinearUpSample(x, shape):
"""
Non-parametric bilinear upsample the input images.
:param x: input NHWC tensor
:param shape: an integer
"""
def bilinear_conv_filler(s):
"""
s: width, height of the conv filter
See https://github.com/BVLC/caffe/blob/master/include%2Fcaffe%2Ffiller.hpp#L244
"""
f = np.ceil(float(s) / 2)
c = float(2 * f - 1 - f % 2) / (2 * f)
ret = np.zeros((s, s), dtype='float32')
for x in range(s):
for y in range(s):
ret[x,y] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
return ret
ch = x.get_shape().as_list()[3]
shape = int(shape)
unpool_mat = np.zeros((shape, shape), dtype='float32')
unpool_mat[-1,-1] = 1
x = FixedUnPooling('unpool', x, shape, unpool_mat)
filter_shape = 2 * shape
w = bilinear_conv_filler(filter_shape)
w = np.repeat(w, ch * ch).reshape((filter_shape, filter_shape, ch, ch))
weight_var = tf.constant(w,
tf.float32,
shape=(filter_shape, filter_shape, ch, ch))
output = tf.nn.conv2d(x, weight_var, [1,1,1,1], padding='SAME')
return output
from ._test import TestModel
class TestPool(TestModel):
def test_fixed_unpooling(self):
h, w = 3, 4
mat = np.random.rand(h, w, 3).astype('float32')
inp = self.make_variable(mat)
inp = tf.reshape(inp, [1, h, w, 3])
output = FixedUnPooling('unpool', inp, 2)
res = self.run_variable(output)
self.assertEqual(res.shape, (1, 2*h, 2*w, 3))
# mat is on cornser
ele = res[0,::2,::2,0]
self.assertTrue((ele == mat[:,:,0]).all())
# the rest are zeros
res[0,::2,::2,:] = 0
self.assertTrue((res == 0).all())
def test_upsample(self):
h, w = 5, 5
scale = 2
mat = np.random.rand(h, w).astype('float32')
inp = self.make_variable(mat)
inp = tf.reshape(inp, [1, h, w, 1])
output = BilinearUpSample('upsample', inp, scale)
res = self.run_variable(output)
from skimage.transform import rescale
res2 = rescale(mat, scale)
diff = np.abs(res2 - res[0,:,:,0])
# not equivalent to rescale on edge
diff[0,:] = 0
diff[:,0] = 0
if not diff.max() < 1e-4:
import IPython;
IPython.embed(config=IPython.terminal.ipapp.load_default_config())
self.assertTrue(diff.max() < 1e-4)
| mit | 727,770,379,412,654,100 | 30.835897 | 87 | 0.580219 | false |
spandanb/horizon | horizon/tables/actions.py | 1 | 30386 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import logging
import new
from django.conf import settings
from django.core import urlresolvers
from django import shortcuts
from django.template.loader import render_to_string # noqa
from django.utils.datastructures import SortedDict
from django.utils.functional import Promise # noqa
from django.utils.http import urlencode # noqa
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
import six
from horizon import exceptions
from horizon import messages
from horizon.utils import functions
from horizon.utils import html
LOG = logging.getLogger(__name__)
# For Bootstrap integration; can be overridden in settings.
ACTION_CSS_CLASSES = ("btn", "btn-small")
STRING_SEPARATOR = "__"
class BaseActionMetaClass(type):
"""Metaclass for adding all actions options from inheritance tree
to action.
This way actions can inherit from each other but still use
the class attributes DSL. Meaning, all attributes of Actions are
defined as class attributes, but in the background, it will be used as
parameters for the initializer of the object. The object is then
initialized clean way. Similar principle is used in DataTableMetaclass.
"""
def __new__(mcs, name, bases, attrs):
# Options of action are set ass class attributes, loading them.
options = {}
if attrs:
options = attrs
# Iterate in reverse to preserve final order
for base in bases[::-1]:
# It actually throws all super classes away except immediate
# superclass. But it's fine, immediate super-class base_options
# includes everything because superclasses was created also by
# this metaclass. Same principle is used in DataTableMetaclass.
if hasattr(base, 'base_options') and base.base_options:
base_options = {}
# Updating options by superclasses.
base_options.update(base.base_options)
# Updating superclass options by actual class options.
base_options.update(options)
options = base_options
# Saving all options to class attribute, this will be used for
# instantiating of the specific Action.
attrs['base_options'] = options
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
cls.base_options.update(kwargs)
# Adding cls.base_options to each init call.
klass = super(BaseActionMetaClass, cls).__call__(
*args, **cls.base_options)
return klass
@six.add_metaclass(BaseActionMetaClass)
class BaseAction(html.HTMLElement):
"""Common base class for all ``Action`` classes."""
def __init__(self, **kwargs):
super(BaseAction, self).__init__()
self.datum = kwargs.get('datum', None)
self.table = kwargs.get('table', None)
self.handles_multiple = kwargs.get('handles_multiple', False)
self.requires_input = kwargs.get('requires_input', False)
self.preempt = kwargs.get('preempt', False)
self.policy_rules = kwargs.get('policy_rules', None)
def data_type_matched(self, datum):
"""Method to see if the action is allowed for a certain type of data.
Only affects mixed data type tables.
"""
if datum:
action_data_types = getattr(self, "allowed_data_types", [])
# If the data types of this action is empty, we assume it accepts
# all kinds of data and this method will return True.
if action_data_types:
datum_type = getattr(datum, self.table._meta.data_type_name,
None)
if datum_type and (datum_type not in action_data_types):
return False
return True
def get_policy_target(self, request, datum):
"""Provide the target for a policy request.
This method is meant to be overridden to return target details when
one of the policy checks requires them. E.g., {"user_id": datum.id}
"""
return {}
def allowed(self, request, datum):
"""Determine whether this action is allowed for the current request.
This method is meant to be overridden with more specific checks.
"""
return True
def _allowed(self, request, datum):
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check and self.policy_rules:
target = self.get_policy_target(request, datum)
return (policy_check(self.policy_rules, request, target) and
self.allowed(request, datum))
return self.allowed(request, datum)
def update(self, request, datum):
"""Allows per-action customization based on current conditions.
This is particularly useful when you wish to create a "toggle"
action that will be rendered differently based on the value of an
attribute on the current row's data.
By default this method is a no-op.
"""
pass
def get_default_classes(self):
"""Returns a list of the default classes for the action. Defaults to
``["btn", "btn-small"]``.
"""
return getattr(settings, "ACTION_CSS_CLASSES", ACTION_CSS_CLASSES)
def get_default_attrs(self):
"""Returns a list of the default HTML attributes for the action.
Defaults to returning an ``id`` attribute with the value
``{{ table.name }}__action_{{ action.name }}__{{ creation counter }}``.
"""
if self.datum is not None:
bits = (self.table.name,
"row_%s" % self.table.get_object_id(self.datum),
"action_%s" % self.name)
else:
bits = (self.table.name, "action_%s" % self.name)
return {"id": STRING_SEPARATOR.join(bits)}
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
def associate_with_table(self, table):
self.table = table
class Action(BaseAction):
"""Represents an action which can be taken on this table's data.
.. attribute:: name
Required. The short name or "slug" representing this
action. This name should not be changed at runtime.
.. attribute:: verbose_name
A descriptive name used for display purposes. Defaults to the
value of ``name`` with the first letter of each word capitalized.
.. attribute:: verbose_name_plural
Used like ``verbose_name`` in cases where ``handles_multiple`` is
``True``. Defaults to ``verbose_name`` with the letter "s" appended.
.. attribute:: method
The HTTP method for this action. Defaults to ``POST``. Other methods
may or may not succeed currently.
.. attribute:: requires_input
Boolean value indicating whether or not this action can be taken
without any additional input (e.g. an object id). Defaults to ``True``.
.. attribute:: preempt
Boolean value indicating whether this action should be evaluated in
the period after the table is instantiated but before the data has
been loaded.
This can allow actions which don't need access to the full table data
to bypass any API calls and processing which would otherwise be
required to load the table.
.. attribute:: allowed_data_types
A list that contains the allowed data types of the action. If the
datum's type is in this list, the action will be shown on the row
for the datum.
Default to be an empty list (``[]``). When set to empty, the action
will accept any kind of data.
.. attribute:: policy_rules
list of scope and rule tuples to do policy checks on, the
composition of which is (scope, rule)
scope: service type managing the policy for action
rule: string representing the action to be checked
for a policy that requires a single rule check:
policy_rules should look like
"(("compute", "compute:create_instance"),)"
for a policy that requires multiple rule checks:
rules should look like
"(("identity", "identity:list_users"),
("identity", "identity:list_roles"))"
At least one of the following methods must be defined:
.. method:: single(self, data_table, request, object_id)
Handler for a single-object action.
.. method:: multiple(self, data_table, request, object_ids)
Handler for multi-object actions.
.. method:: handle(self, data_table, request, object_ids)
If a single function can work for both single-object and
multi-object cases then simply providing a ``handle`` function
will internally route both ``single`` and ``multiple`` requests
to ``handle`` with the calls from ``single`` being transformed
into a list containing only the single object id.
"""
def __init__(self, single_func=None, multiple_func=None, handle_func=None,
attrs=None, **kwargs):
super(Action, self).__init__(**kwargs)
self.method = kwargs.get('method', "POST")
self.requires_input = kwargs.get('requires_input', True)
self.verbose_name = kwargs.get('verbose_name', self.name.title())
self.verbose_name_plural = kwargs.get('verbose_name_plural',
"%ss" % self.verbose_name)
self.allowed_data_types = kwargs.get('allowed_data_types', [])
if attrs:
self.attrs.update(attrs)
# Don't set these if they're None
if single_func:
self.single = single_func
if multiple_func:
self.multiple = multiple_func
if handle_func:
self.handle = handle_func
# Ensure we have the appropriate methods
has_handler = hasattr(self, 'handle') and callable(self.handle)
has_single = hasattr(self, 'single') and callable(self.single)
has_multiple = hasattr(self, 'multiple') and callable(self.multiple)
if has_handler or has_multiple:
self.handles_multiple = True
if not has_handler and (not has_single or has_multiple):
cls_name = self.__class__.__name__
raise NotImplementedError('You must define either a "handle" '
'method or a "single" or "multiple" '
'method on %s.' % cls_name)
if not has_single:
def single(self, data_table, request, object_id):
return self.handle(data_table, request, [object_id])
self.single = new.instancemethod(single, self)
if not has_multiple and self.handles_multiple:
def multiple(self, data_table, request, object_ids):
return self.handle(data_table, request, object_ids)
self.multiple = new.instancemethod(multiple, self)
def get_param_name(self):
"""Returns the full POST parameter name for this action.
Defaults to
``{{ table.name }}__{{ action.name }}``.
"""
return "__".join([self.table.name, self.name])
class LinkAction(BaseAction):
"""A table action which is simply a link rather than a form POST.
.. attribute:: name
Required. The short name or "slug" representing this
action. This name should not be changed at runtime.
.. attribute:: verbose_name
A string which will be rendered as the link text. (Required)
.. attribute:: url
A string or a callable which resolves to a url to be used as the link
target. You must either define the ``url`` attribute or override
the ``get_link_url`` method on the class.
.. attribute:: allowed_data_types
A list that contains the allowed data types of the action. If the
datum's type is in this list, the action will be shown on the row
for the datum.
Defaults to be an empty list (``[]``). When set to empty, the action
will accept any kind of data.
"""
# class attribute name is used for ordering of Actions in table
name = "link"
ajax = False
def __init__(self, attrs=None, **kwargs):
super(LinkAction, self).__init__(**kwargs)
self.method = kwargs.get('method', "GET")
self.bound_url = kwargs.get('bound_url', None)
self.name = kwargs.get('name', self.name)
self.verbose_name = kwargs.get('verbose_name', self.name.title())
self.url = kwargs.get('url', None)
self.allowed_data_types = kwargs.get('allowed_data_types', [])
if not kwargs.get('verbose_name', None):
raise NotImplementedError('A LinkAction object must have a '
'verbose_name attribute.')
if attrs:
self.attrs.update(attrs)
if self.ajax:
self.classes = list(self.classes) + ['ajax-update']
def get_ajax_update_url(self):
table_url = self.table.get_absolute_url()
params = urlencode(
SortedDict([("action", self.name), ("table", self.table.name)])
)
return "%s?%s" % (table_url, params)
def render(self):
return render_to_string("horizon/common/_data_table_table_action.html",
{"action": self})
def associate_with_table(self, table):
super(LinkAction, self).associate_with_table(table)
if self.ajax:
self.attrs['data-update-url'] = self.get_ajax_update_url()
def get_link_url(self, datum=None):
"""Returns the final URL based on the value of ``url``.
If ``url`` is callable it will call the function.
If not, it will then try to call ``reverse`` on ``url``.
Failing that, it will simply return the value of ``url`` as-is.
When called for a row action, the current row data object will be
passed as the first parameter.
"""
if not self.url:
raise NotImplementedError('A LinkAction class must have a '
'url attribute or define its own '
'get_link_url method.')
if callable(self.url):
return self.url(datum, **self.kwargs)
try:
if datum:
obj_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=(obj_id,))
else:
return urlresolvers.reverse(self.url)
except urlresolvers.NoReverseMatch as ex:
LOG.info('No reverse found for "%s": %s' % (self.url, ex))
return self.url
class FilterAction(BaseAction):
"""A base class representing a filter action for a table.
.. attribute:: name
The short name or "slug" representing this action. Defaults to
``"filter"``.
.. attribute:: verbose_name
A descriptive name used for display purposes. Defaults to the
value of ``name`` with the first letter of each word capitalized.
.. attribute:: param_name
A string representing the name of the request parameter used for the
search term. Default: ``"q"``.
.. attribute: filter_type
A string representing the type of this filter. Default: ``"query"``.
.. attribute: needs_preloading
If True, the filter function will be called for the initial
GET request with an empty ``filter_string``, regardless of the
value of ``method``.
"""
# TODO(gabriel): The method for a filter action should be a GET,
# but given the form structure of the table that's currently impossible.
# At some future date this needs to be reworked to get the filter action
# separated from the table's POST form.
# class attribute name is used for ordering of Actions in table
name = "filter"
def __init__(self, **kwargs):
super(FilterAction, self).__init__(**kwargs)
self.method = kwargs.get('method', "POST")
self.name = kwargs.get('name', self.name)
self.verbose_name = kwargs.get('verbose_name', _("Filter"))
self.filter_type = kwargs.get('filter_type', "query")
self.needs_preloading = kwargs.get('needs_preloading', False)
self.param_name = kwargs.get('param_name', 'q')
def get_param_name(self):
"""Returns the full query parameter name for this action.
Defaults to
``{{ table.name }}__{{ action.name }}__{{ action.param_name }}``.
"""
return "__".join([self.table.name, self.name, self.param_name])
def get_default_classes(self):
classes = super(FilterAction, self).get_default_classes()
classes += ("btn-search",)
return classes
def assign_type_string(self, table, data, type_string):
for datum in data:
setattr(datum, table._meta.data_type_name, type_string)
def data_type_filter(self, table, data, filter_string):
filtered_data = []
for data_type in table._meta.data_types:
func_name = "filter_%s_data" % data_type
filter_func = getattr(self, func_name, None)
if not filter_func and not callable(filter_func):
# The check of filter function implementation should happen
# in the __init__. However, the current workflow of DataTable
# and actions won't allow it. Need to be fixed in the future.
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"for %s data type in %s." %
(func_name, data_type, cls_name))
_data = filter_func(table, data, filter_string)
self.assign_type_string(table, _data, data_type)
filtered_data.extend(_data)
return filtered_data
def filter(self, table, data, filter_string):
"""Provides the actual filtering logic.
This method must be overridden by subclasses and return
the filtered data.
"""
raise NotImplementedError("The filter method has not been "
"implemented by %s." % self.__class__)
class FixedFilterAction(FilterAction):
"""A filter action with fixed buttons."""
def __init__(self, **kwargs):
super(FixedFilterAction, self).__init__(**kwargs)
self.filter_type = kwargs.get('filter_type', "fixed")
self.needs_preloading = kwargs.get('needs_preloading', True)
self.fixed_buttons = self.get_fixed_buttons()
self.filter_string = ''
def filter(self, table, images, filter_string):
self.filter_string = filter_string
categories = self.categorize(table, images)
self.categories = defaultdict(list, categories)
for button in self.fixed_buttons:
button['count'] = len(self.categories[button['value']])
if not filter_string:
return images
return self.categories[filter_string]
def get_fixed_buttons(self):
"""Returns a list of dictionaries describing the fixed buttons
to use for filtering.
Each list item should be a dict with the following keys:
* ``text``: Text to display on the button
* ``icon``: Icon class for icon element (inserted before text).
* ``value``: Value returned when the button is clicked. This value is
passed to ``filter()`` as ``filter_string``.
"""
raise NotImplementedError("The get_fixed_buttons method has "
"not been implemented by %s." %
self.__class__)
def categorize(self, table, images):
"""Override to separate images into categories.
Return a dict with a key for the value of each fixed button,
and a value that is a list of images in that category.
"""
raise NotImplementedError("The categorize method has not been "
"implemented by %s." % self.__class__)
class BatchAction(Action):
"""A table action which takes batch action on one or more
objects. This action should not require user input on a
per-object basis.
.. attribute:: name
An internal name for this action.
.. attribute:: action_present
String or tuple/list. The display forms of the name.
Should be a transitive verb, capitalized and translated. ("Delete",
"Rotate", etc.) If tuple or list - then setting
self.current_present_action = n will set the current active item
from the list(action_present[n])
You can pass a complete action name including 'data_type' by specifying
'%(data_type)s' substitution in action_present ("Delete %(data_type)s").
Otherwise a complete action name is a format of "<action> <data_type>".
<data_type> is determined based on the number of items.
By passing a complete action name you allow translators to control
the order of words as they want.
.. attribute:: action_past
String or tuple/list. The past tense of action_present. ("Deleted",
"Rotated", etc.) If tuple or list - then
setting self.current_past_action = n will set the current active item
from the list(action_past[n])
.. attribute:: data_type_singular
A display name for the type of data that receives the
action. ("Key Pair", "Floating IP", etc.)
.. attribute:: data_type_plural
Optional plural word for the type of data being acted
on. Defaults to appending 's'. Relying on the default is bad
for translations and should not be done.
.. attribute:: success_url
Optional location to redirect after completion of the delete
action. Defaults to the current page.
"""
def __init__(self, **kwargs):
super(BatchAction, self).__init__(**kwargs)
self.success_url = kwargs.get('success_url', None)
self.data_type_singular = kwargs.get('data_type_singular', None)
self.data_type_plural = kwargs.get('data_type_plural',
self.data_type_singular + 's')
# If setting a default name, don't initialize it too early
self.verbose_name = kwargs.get('verbose_name', self._get_action_name)
self.verbose_name_plural = kwargs.get('verbose_name_plural',
lambda: self._get_action_name('plural'))
if not kwargs.get('data_type_singular', None):
raise NotImplementedError('A batchAction object must have a '
'data_type_singular attribute.')
self.current_present_action = 0
self.current_past_action = 0
# Keep record of successfully handled objects
self.success_ids = []
def _allowed(self, request, datum=None):
# Override the default internal action method to prevent batch
# actions from appearing on tables with no data.
if not self.table.data and not datum:
return False
return super(BatchAction, self)._allowed(request, datum)
def _get_action_name(self, items=None, past=False):
"""Builds combinations like 'Delete Object' and 'Deleted
Objects' based on the number of items and `past` flag.
"""
action_type = "past" if past else "present"
action_attr = getattr(self, "action_%s" % action_type)
if isinstance(action_attr, (basestring, Promise)):
action = action_attr
else:
toggle_selection = getattr(self, "current_%s_action" % action_type)
action = action_attr[toggle_selection]
if items is None or len(items) == 1:
data_type = self.data_type_singular
else:
data_type = self.data_type_plural
if '%(data_type)s' in action:
# If full action string is specified, use action as format string.
msgstr = action
else:
if action_type == "past":
msgstr = pgettext_lazy("past", "%(action)s %(data_type)s")
else:
msgstr = pgettext_lazy("present", "%(action)s %(data_type)s")
return msgstr % {'action': action, 'data_type': data_type}
def action(self, request, datum_id):
"""Required. Accepts a single object id and performs the specific
action.
Return values are discarded, errors raised are caught and logged.
"""
raise NotImplementedError('action() must be defined for %s'
% self.__class__.__name__)
def update(self, request, datum):
"""Switches the action verbose name, if needed."""
if getattr(self, 'action_present', False):
self.verbose_name = self._get_action_name()
self.verbose_name_plural = self._get_action_name('plural')
def get_success_url(self, request=None):
"""Returns the URL to redirect to after a successful action."""
if self.success_url:
return self.success_url
return request.get_full_path()
def handle(self, table, request, obj_ids):
action_success = []
action_failure = []
action_not_allowed = []
for datum_id in obj_ids:
datum = table.get_object_by_id(datum_id)
datum_display = table.get_object_display(datum) or _("N/A")
if not table._filter_action(self, request, datum):
action_not_allowed.append(datum_display)
LOG.info('Permission denied to %s: "%s"' %
(self._get_action_name(past=True).lower(),
datum_display))
continue
try:
self.action(request, datum_id)
#Call update to invoke changes if needed
self.update(request, datum)
action_success.append(datum_display)
self.success_ids.append(datum_id)
LOG.info('%s: "%s"' %
(self._get_action_name(past=True), datum_display))
except Exception as ex:
# Handle the exception but silence it since we'll display
# an aggregate error message later. Otherwise we'd get
# multiple error messages displayed to the user.
if getattr(ex, "_safe_message", None):
ignore = False
else:
ignore = True
action_failure.append(datum_display)
exceptions.handle(request, ignore=ignore)
# Begin with success message class, downgrade to info if problems.
success_message_level = messages.success
if action_not_allowed:
msg = _('You are not allowed to %(action)s: %(objs)s')
params = {"action":
self._get_action_name(action_not_allowed).lower(),
"objs": functions.lazy_join(", ", action_not_allowed)}
messages.error(request, msg % params)
success_message_level = messages.info
if action_failure:
msg = _('Unable to %(action)s: %(objs)s')
params = {"action": self._get_action_name(action_failure).lower(),
"objs": functions.lazy_join(", ", action_failure)}
messages.error(request, msg % params)
success_message_level = messages.info
if action_success:
msg = _('%(action)s: %(objs)s')
params = {"action":
self._get_action_name(action_success, past=True),
"objs": functions.lazy_join(", ", action_success)}
success_message_level(request, msg % params)
return shortcuts.redirect(self.get_success_url(request))
class DeleteAction(BatchAction):
"""Doc missing."""
name = "delete"
def __init__(self, **kwargs):
super(DeleteAction, self).__init__(**kwargs)
self.name = kwargs.get('name', self.name)
self.action_present = kwargs.get('action_present', _("Delete"))
self.action_past = kwargs.get('action_past', _("Deleted"))
def action(self, request, obj_id):
return self.delete(request, obj_id)
def delete(self, request, obj_id):
raise NotImplementedError("DeleteAction must define a delete method.")
def get_default_classes(self):
classes = super(DeleteAction, self).get_default_classes()
classes += ("btn-danger", "btn-delete")
return classes
class UpdateAction(object):
"""A table action for cell updates by inline editing."""
name = "update"
action_present = _("Update")
action_past = _("Updated")
data_type_singular = "update"
def action(self, request, datum, obj_id, cell_name, new_cell_value):
self.update_cell(request, datum, obj_id, cell_name, new_cell_value)
def update_cell(self, request, datum, obj_id, cell_name, new_cell_value):
"""Method for saving data of the cell.
This method must implements saving logic of the inline edited table
cell.
"""
raise NotImplementedError(
"UpdateAction must define a update_cell method.")
def allowed(self, request, datum, cell):
"""Determine whether updating is allowed for the current request.
This method is meant to be overridden with more specific checks.
Data of the row and of the cell are passed to the method.
"""
return True
| apache-2.0 | -5,626,407,117,300,322,000 | 38.824377 | 79 | 0.608339 | false |
bartvbl/pixeltoy | Python-Powered-Pixels/src/init.py | 1 | 1603 | from graphics import GraphicsController
from graphics import Colour
from core import GameController
from core import Input
from java.util import Random
#internal variables. Please do not use them; they can change name and function at any time.
sys_gameController = GameController()
sys_inputController = Input()
sys_random = Random()
sys_colour = Colour()
def random():
return sys_random.nextDouble()
def drawRectangle(x, y, width, height):
GraphicsController.drawRectangle(x, y, width, height)
def drawCircle(x, y, radius):
GraphicsController.drawCircle(x, y, radius)
def drawLine(x1, y1, x2, y2):
GraphicsController.drawLine(x1, y1, x2, y2)
def drawPoint(x, y):
GraphicsController.drawPoint(x, y)
def drawString(x, y, string):
GraphicsController.drawString(x, y, string)
def useColour(r, g, b, a = 255):
sys_colour.useColour(r, g, b, a)
def newFrame():
sys_gameController.newFrame()
def quit():
sys_gameController.quit()
def isLeftMouseDown():
return sys_inputController.isLeftMouseDown()
def isRightMouseDown():
return sys_inputController.isRightMouseDown()
def getMouseWheelDelta():
return sys_inputController.getMouseWheelDelta()
def isKeyDown(key):
return sys_inputController.isKeyDown(key)
def loadImage(src, smooth = True, animatedImageCountX = 1):
return GraphicsController.loadImage(src, smooth, animatedImageCountX)
def drawImage(image, x, y, width, height):
GraphicsController.drawImage(image, x, y, width, height)
#variables updated by backend
_mouseX = 0
_mouseY = 0
_screenWidth = 640
_screenHeight = 480
sys_colour.resetColour() | gpl-2.0 | 1,318,768,260,145,429,000 | 24.0625 | 91 | 0.753587 | false |
magnunor/hyperspy | setup.py | 1 | 13305 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
v = sys.version_info
if v[0] != 3:
error = "ERROR: From version 0.8.4 HyperSpy requires Python 3. " \
"For Python 2.7 install Hyperspy 0.8.3 e.g. " \
"$ pip install --upgrade hyperspy==0.8.3"
print(error, file=sys.stderr)
sys.exit(1)
from setuptools import setup, Extension, Command
import warnings
import os
import subprocess
import itertools
# stuff to check presence of compiler:
import distutils.sysconfig
import distutils.ccompiler
from distutils.errors import CompileError, DistutilsPlatformError
setup_path = os.path.dirname(__file__)
import hyperspy.Release as Release
install_req = ['scipy>=0.15',
'matplotlib>=2.2.3',
'numpy>=1.10, !=1.13.0',
'traits>=4.5.0',
'natsort',
'requests',
'tqdm>=0.4.9',
'sympy',
'dill',
'h5py',
'python-dateutil>=2.5.0',
'ipyparallel',
'dask[array]>=0.18',
'scikit-image>=0.13',
'pint>=0.8',
'statsmodels',
'numexpr',
'sparse',
]
extras_require = {
"learning": ['scikit-learn'],
"gui-jupyter": ["hyperspy_gui_ipywidgets"],
"gui-traitsui": ["hyperspy_gui_traitsui"],
"mrcz": ["blosc>=1.5", 'mrcz>=0.3.6'],
"test": ["pytest>=3", "pytest-mpl", "matplotlib>=2.0.2"],
"doc": ["sphinx>=1.7", "sphinx_rtd_theme"],
"speed": ["numba<0.39"],
}
extras_require["all"] = list(itertools.chain(*list(extras_require.values())))
def update_version(version):
release_path = "hyperspy/Release.py"
lines = []
with open(release_path, "r") as f:
for line in f:
if line.startswith("version = "):
line = "version = \"%s\"\n" % version
lines.append(line)
with open(release_path, "w") as f:
f.writelines(lines)
# Extensions. Add your extension here:
raw_extensions = [Extension("hyperspy.io_plugins.unbcf_fast",
[os.path.join('hyperspy', 'io_plugins', 'unbcf_fast.pyx')]),
]
cleanup_list = []
for leftover in raw_extensions:
path, ext = os.path.splitext(leftover.sources[0])
if ext in ('.pyx', '.py'):
cleanup_list.append(''.join([os.path.join(setup_path, path), '.c*']))
if os.name == 'nt':
bin_ext = '.cpython-*.pyd'
else:
bin_ext = '.cpython-*.so'
cleanup_list.append(''.join([os.path.join(setup_path, path), bin_ext]))
def count_c_extensions(extensions):
c_num = 0
for extension in extensions:
# if first source file with extension *.c or *.cpp exists
# it is cythonised or pure c/c++ extension:
sfile = extension.sources[0]
path, ext = os.path.splitext(sfile)
if os.path.exists(path + '.c') or os.path.exists(path + '.cpp'):
c_num += 1
return c_num
def cythonize_extensions(extensions):
try:
from Cython.Build import cythonize
return cythonize(extensions)
except ImportError:
warnings.warn("""WARNING: cython required to generate fast c code is not found on this system.
Only slow pure python alternative functions will be available.
To use fast implementation of some functions writen in cython either:
a) install cython and re-run the installation,
b) try alternative source distribution containing cythonized C versions of fast code,
c) use binary distribution (i.e. wheels, egg).""")
return []
def no_cythonize(extensions):
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources[:] = sources
return extensions
# to cythonize, or not to cythonize... :
if len(raw_extensions) > count_c_extensions(raw_extensions):
extensions = cythonize_extensions(raw_extensions)
else:
extensions = no_cythonize(raw_extensions)
# to compile or not to compile... depends if compiler is present:
compiler = distutils.ccompiler.new_compiler()
assert isinstance(compiler, distutils.ccompiler.CCompiler)
distutils.sysconfig.customize_compiler(compiler)
try:
compiler.compile([os.path.join(setup_path, 'hyperspy', 'misc', 'etc',
'test_compilers.c')])
except (CompileError, DistutilsPlatformError):
warnings.warn("""WARNING: C compiler can't be found.
Only slow pure python alternative functions will be available.
To use fast implementation of some functions writen in cython/c either:
a) check that you have compiler (EXACTLY SAME as your python
distribution was compiled with) installed,
b) use binary distribution of hyperspy (i.e. wheels, egg, (only osx and win)).
Installation will continue in 5 sec...""")
extensions = []
from time import sleep
sleep(5) # wait 5 secs for user to notice the message
class Recythonize(Command):
"""cythonize all extensions"""
description = "(re-)cythonize all changed cython extensions"
user_options = []
def initialize_options(self):
"""init options"""
pass
def finalize_options(self):
"""finalize options"""
pass
def run(self):
# if there is no cython it is supposed to fail:
from Cython.Build import cythonize
global raw_extensions
global extensions
cythonize(extensions)
class update_version_when_dev:
def __enter__(self):
self.release_version = Release.version
# Get the hash from the git repository if available
self.restore_version = False
if self.release_version.endswith(".dev"):
p = subprocess.Popen(["git", "describe",
"--tags", "--dirty", "--always"],
stdout=subprocess.PIPE,
shell=True)
stdout = p.communicate()[0]
if p.returncode != 0:
# Git is not available, we keep the version as is
self.restore_version = False
self.version = self.release_version
else:
gd = stdout[1:].strip().decode()
# Remove the tag
gd = gd[gd.index("-") + 1:]
self.version = self.release_version + "+git."
self.version += gd.replace("-", ".")
update_version(self.version)
self.restore_version = True
else:
self.version = self.release_version
return self.version
def __exit__(self, type, value, traceback):
if self.restore_version is True:
update_version(self.release_version)
with update_version_when_dev() as version:
setup(
name="hyperspy",
package_dir={'hyperspy': 'hyperspy'},
version=version,
ext_modules=extensions,
packages=['hyperspy',
'hyperspy.datasets',
'hyperspy._components',
'hyperspy.datasets',
'hyperspy.io_plugins',
'hyperspy.docstrings',
'hyperspy.drawing',
'hyperspy.drawing._markers',
'hyperspy.drawing._widgets',
'hyperspy.learn',
'hyperspy._signals',
'hyperspy.utils',
'hyperspy.tests',
'hyperspy.tests.axes',
'hyperspy.tests.component',
'hyperspy.tests.datasets',
'hyperspy.tests.drawing',
'hyperspy.tests.io',
'hyperspy.tests.model',
'hyperspy.tests.mva',
'hyperspy.tests.samfire',
'hyperspy.tests.signal',
'hyperspy.tests.utils',
'hyperspy.tests.misc',
'hyperspy.models',
'hyperspy.misc',
'hyperspy.misc.eels',
'hyperspy.misc.eds',
'hyperspy.misc.io',
'hyperspy.misc.holography',
'hyperspy.misc.machine_learning',
'hyperspy.external',
'hyperspy.external.mpfit',
'hyperspy.external.astroML',
'hyperspy.samfire_utils',
'hyperspy.samfire_utils.segmenters',
'hyperspy.samfire_utils.weights',
'hyperspy.samfire_utils.goodness_of_fit_tests',
],
install_requires=install_req,
tests_require=["pytest>=3.0.2"],
extras_require=extras_require,
package_data={
'hyperspy':
[
'tests/drawing/*.png',
'tests/drawing/data/*.hspy',
'tests/drawing/plot_signal/*.png',
'tests/drawing/plot_signal1d/*.png',
'tests/drawing/plot_signal2d/*.png',
'tests/drawing/plot_markers/*.png',
'tests/drawing/plot_model1d/*.png',
'tests/drawing/plot_model/*.png',
'tests/drawing/plot_roi/*.png',
'misc/eds/example_signals/*.hdf5',
'misc/holography/example_signals/*.hdf5',
'tests/drawing/plot_mva/*.png',
'tests/drawing/plot_signal/*.png',
'tests/drawing/plot_signal1d/*.png',
'tests/drawing/plot_signal2d/*.png',
'tests/drawing/plot_markers/*.png',
'tests/drawing/plot_widgets/*.png',
'tests/drawing/plot_signal_tools/*.png',
'tests/io/blockfile_data/*.blo',
'tests/io/dens_data/*.dens',
'tests/io/dm_stackbuilder_plugin/test_stackbuilder_imagestack.dm3',
'tests/io/dm3_1D_data/*.dm3',
'tests/io/dm3_2D_data/*.dm3',
'tests/io/dm3_3D_data/*.dm3',
'tests/io/dm4_1D_data/*.dm4',
'tests/io/dm4_2D_data/*.dm4',
'tests/io/dm4_3D_data/*.dm4',
'tests/io/dm3_locale/*.dm3',
'tests/io/FEI_new/*.emi',
'tests/io/FEI_new/*.ser',
'tests/io/FEI_new/*.npy',
'tests/io/FEI_old/*.emi',
'tests/io/FEI_old/*.ser',
'tests/io/FEI_old/*.npy',
'tests/io/msa_files/*.msa',
'tests/io/hdf5_files/*.hdf5',
'tests/io/hdf5_files/*.hspy',
'tests/io/tiff_files/*.tif',
'tests/io/tiff_files/*.dm3',
'tests/io/npy_files/*.npy',
'tests/io/unf_files/*.unf',
'tests/io/bruker_data/*.bcf',
'tests/io/bruker_data/*.json',
'tests/io/bruker_data/*.npy',
'tests/io/bruker_data/*.spx',
'tests/io/ripple_files/*.rpl',
'tests/io/ripple_files/*.raw',
'tests/io/emd_files/*.emd',
'tests/io/emd_files/fei_emd_files.zip',
'tests/io/protochips_data/*.npy',
'tests/io/protochips_data/*.csv',
'tests/signal/test_find_peaks1D_ohaver/test_find_peaks1D_ohaver.hdf5',
],
},
author=Release.authors['all'][0],
author_email=Release.authors['all'][1],
maintainer='Francisco de la Peña',
maintainer_email='[email protected]',
description=Release.description,
long_description=open('README.rst').read(),
license=Release.license,
platforms=Release.platforms,
url=Release.url,
keywords=Release.keywords,
cmdclass={
'recythonize': Recythonize,
},
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Physics",
],
)
| gpl-3.0 | 57,100,455,097,177,540 | 35.349727 | 102 | 0.548106 | false |
volodymyrss/3ML | threeML/test/test_ogip.py | 1 | 18448 | import pytest
import os
from astropy.io import fits
from threeML.plugins.OGIPLike import OGIPLike
from threeML.plugins.spectrum.pha_spectrum import PHASpectrum
from threeML.classicMLE.joint_likelihood import JointLikelihood
from threeML.plugins.OGIP.response import OGIPResponse
from threeML.data_list import DataList
from threeML.classicMLE.likelihood_ratio_test import LikelihoodRatioTest
from astromodels.core.model import Model
from astromodels.functions.functions import Powerlaw, Exponential_cutoff, Cutoff_powerlaw
from astromodels.sources.point_source import PointSource
from threeML.plugins.SwiftXRTLike import SwiftXRTLike
from threeML.plugins.OGIP.likelihood_functions import *
from threeML import *
from threeML.io.file_utils import within_directory
__this_dir__ = os.path.join(os.path.abspath(os.path.dirname(__file__)))
__example_dir = os.path.join(__this_dir__, '../../examples')
class AnalysisBuilder(object):
def __init__(self, plugin):
self._plugin = plugin
self._shapes = {}
self._shapes['normal'] = Powerlaw
self._shapes['cpl'] = Cutoff_powerlaw
@property
def keys(self):
return self._shapes.keys()
def get_jl(self, key):
assert key in self._shapes
data_list = DataList(self._plugin)
ps = PointSource('test', 0, 0, spectral_shape=self._shapes[key]())
model = Model(ps)
jl = JointLikelihood(model, data_list, verbose=False)
jl.set_minimizer("minuit")
return jl
def test_loading_a_generic_pha_file():
with within_directory(__this_dir__):
ogip = OGIPLike('test_ogip', observation='test.pha{1}')
pha_info = ogip.get_pha_files()
assert ogip.name == 'test_ogip'
assert ogip.n_data_points == sum(ogip._mask)
assert sum(ogip._mask) == ogip.n_data_points
assert ogip.tstart is None
assert ogip.tstop is None
assert 'cons_test_ogip' in ogip.nuisance_parameters
assert ogip.nuisance_parameters['cons_test_ogip'].fix == True
assert ogip.nuisance_parameters['cons_test_ogip'].free == False
assert 'pha' in pha_info
assert 'bak' in pha_info
assert 'rsp' in pha_info
ogip.__repr__()
def test_loading_a_loose_ogip_pha_file():
with within_directory(__this_dir__):
ogip = OGIPLike('test_ogip', observation='example_integral.pha')
pha_info = ogip.get_pha_files()
assert ogip.name == 'test_ogip'
assert ogip.n_data_points == sum(ogip._mask)
assert sum(ogip._mask) == ogip.n_data_points
#assert ogip.tstart is None
#assert ogip.tstop is None
assert 'cons_test_ogip' in ogip.nuisance_parameters
assert ogip.nuisance_parameters['cons_test_ogip'].fix == True
assert ogip.nuisance_parameters['cons_test_ogip'].free == False
assert 'pha' in pha_info
#assert 'bak' in pha_info
assert 'rsp' in pha_info
ogip.__repr__()
def test_loading_bad_keywords_file():
with within_directory(__this_dir__):
pha_fn='example_integral_spi.pha'
rsp_fn='example_integral_spi.rsp'
pha_spectrum=PHASpectrum(pha_fn,rsp_file=rsp_fn)
assert type(pha_spectrum.is_poisson) == bool
ogip = OGIPLike('test_ogip', observation=pha_fn, response=rsp_fn)
ogip.__repr__()
def test_pha_files_in_generic_ogip_constructor_spec_number_in_file_name():
with within_directory(__this_dir__):
ogip = OGIPLike('test_ogip', observation='test.pha{1}')
ogip.set_active_measurements('all')
pha_info = ogip.get_pha_files()
for key in ['pha', 'bak']:
assert isinstance(pha_info[key], PHASpectrum)
assert pha_info['pha'].background_file == 'test_bak.pha{1}'
assert pha_info['pha'].ancillary_file is None
assert pha_info['pha'].instrument == 'GBM_NAI_03'
assert pha_info['pha'].mission == 'GLAST'
assert pha_info['pha'].is_poisson == True
assert pha_info['pha'].n_channels == ogip.n_data_points
assert pha_info['pha'].n_channels == len(pha_info['pha'].rates)
# Test that Poisson rates cannot call rate error
assert pha_info['pha'].rate_errors is None
assert sum(pha_info['pha'].sys_errors == np.zeros_like(pha_info['pha'].rates)) == pha_info['bak'].n_channels
assert pha_info['pha'].response_file.split('/')[-1] == 'glg_cspec_n3_bn080916009_v07.rsp'
assert pha_info['pha'].scale_factor == 1.0
assert pha_info['bak'].background_file is None
# Test that we cannot get a bak file
#
#
# with pytest.raises(KeyError):
#
# _ = pha_info['bak'].background_file
# Test that we cannot get a anc file
# with pytest.raises(KeyError):
#
# _ = pha_info['bak'].ancillary_file
# Test that we cannot get a RSP file
assert pha_info['bak'].response_file is None
assert pha_info['bak'].ancillary_file is None
# with pytest.raises(AttributeError):
# _ = pha_info['bak'].response_file
assert pha_info['bak'].instrument == 'GBM_NAI_03'
assert pha_info['bak'].mission == 'GLAST'
assert pha_info['bak'].is_poisson == False
assert pha_info['bak'].n_channels == ogip.n_data_points
assert pha_info['bak'].n_channels == len(pha_info['pha'].rates)
assert len(pha_info['bak'].rate_errors) == pha_info['bak'].n_channels
assert sum(pha_info['bak'].sys_errors == np.zeros_like(pha_info['pha'].rates)) == pha_info['bak'].n_channels
assert pha_info['bak'].scale_factor == 1.0
assert isinstance(pha_info['rsp'], OGIPResponse)
def test_pha_files_in_generic_ogip_constructor_spec_number_in_arguments():
with within_directory(__this_dir__):
ogip = OGIPLike('test_ogip', observation='test.pha', spectrum_number=1)
ogip.set_active_measurements('all')
pha_info = ogip.get_pha_files()
for key in ['pha', 'bak']:
assert isinstance(pha_info[key], PHASpectrum)
assert pha_info['pha'].background_file == 'test_bak.pha{1}'
assert pha_info['pha'].ancillary_file is None
assert pha_info['pha'].instrument == 'GBM_NAI_03'
assert pha_info['pha'].mission == 'GLAST'
assert pha_info['pha'].is_poisson == True
assert pha_info['pha'].n_channels == ogip.n_data_points
assert pha_info['pha'].n_channels == len(pha_info['pha'].rates)
# Test that Poisson rates cannot call rate error
assert pha_info['pha'].rate_errors is None
assert sum(pha_info['pha'].sys_errors == np.zeros_like(pha_info['pha'].rates)) == pha_info['bak'].n_channels
assert pha_info['pha'].response_file.split('/')[-1] == 'glg_cspec_n3_bn080916009_v07.rsp'
assert pha_info['pha'].scale_factor == 1.0
assert pha_info['bak'].background_file is None
# Test that we cannot get a bak file
#
# with pytest.raises(KeyError):
#
# _ = pha_info['bak'].background_file
#
# Test that we cannot get a anc file
# with pytest.raises(KeyError):
#
# _ = pha_info['bak'].ancillary_file
assert pha_info['bak'].response_file is None
assert pha_info['bak'].ancillary_file is None
# # Test that we cannot get a RSP file
# with pytest.raises(AttributeError):
# _ = pha_info['bak'].response_file
assert pha_info['bak'].instrument == 'GBM_NAI_03'
assert pha_info['bak'].mission == 'GLAST'
assert pha_info['bak'].is_poisson == False
assert pha_info['bak'].n_channels == ogip.n_data_points
assert pha_info['bak'].n_channels == len(pha_info['pha'].rates)
assert len(pha_info['bak'].rate_errors) == pha_info['bak'].n_channels
assert sum(pha_info['bak'].sys_errors == np.zeros_like(pha_info['pha'].rates)) == pha_info['bak'].n_channels
assert pha_info['bak'].scale_factor == 1.0
assert isinstance(pha_info['rsp'], OGIPResponse)
def test_ogip_energy_selection():
with within_directory(__this_dir__):
ogip = OGIPLike('test_ogip', observation='test.pha{1}')
assert sum(ogip._mask) == sum(ogip.quality.good)
# Test that selecting a subset reduces the number of data points
ogip.set_active_measurements("10-30")
assert sum(ogip._mask) == ogip.n_data_points
assert sum(ogip._mask) < 128
# Test selecting all channels
ogip.set_active_measurements("all")
assert sum(ogip._mask) == ogip.n_data_points
assert sum(ogip._mask) == 128
# Test channel setting
ogip.set_active_measurements(exclude=['c0-c1'])
assert sum(ogip._mask) == ogip.n_data_points
assert sum(ogip._mask) == 126
# Test mixed ene/chan setting
ogip.set_active_measurements(exclude=['0-c1'], verbose=True)
assert sum(ogip._mask) == ogip.n_data_points
assert sum(ogip._mask) == 126
# Test that energies cannot be input backwards
with pytest.raises(AssertionError):
ogip.set_active_measurements("50-30")
with pytest.raises(AssertionError):
ogip.set_active_measurements("c20-c10")
with pytest.raises(AssertionError):
ogip.set_active_measurements("c100-0")
with pytest.raises(AssertionError):
ogip.set_active_measurements("c1-c200")
with pytest.raises(AssertionError):
ogip.set_active_measurements("10-c200")
ogip.set_active_measurements('reset')
assert sum(ogip._mask) == sum(ogip.quality.good)
def test_ogip_rebinner():
with within_directory(__this_dir__):
ogip = OGIPLike('test_ogip', observation='test.pha{1}')
n_data_points = 128
ogip.set_active_measurements("all")
assert ogip.n_data_points == n_data_points
ogip.rebin_on_background(min_number_of_counts=100)
assert ogip.n_data_points < 128
with pytest.raises(AssertionError):
ogip.set_active_measurements('all')
ogip.remove_rebinning()
assert ogip._rebinner is None
assert ogip.n_data_points == n_data_points
ogip.view_count_spectrum()
def test_various_effective_area():
with within_directory(__this_dir__):
ogip = OGIPLike('test_ogip', observation='test.pha{1}')
ogip.use_effective_area_correction()
ogip.fix_effective_area_correction(
)
def test_simulating_data_sets():
with within_directory(__this_dir__):
ogip = OGIPLike('test_ogip', observation='test.pha{1}')
with pytest.raises(AssertionError):
_ = ogip.simulated_parameters
n_data_points = 128
ogip.set_active_measurements("all")
assert ogip._n_synthetic_datasets == 0
ab = AnalysisBuilder(ogip)
_ = ab.get_jl('normal')
new_ogip = ogip.get_simulated_dataset('sim')
assert new_ogip.name == 'sim'
assert ogip._n_synthetic_datasets == 1
assert new_ogip.n_data_points == n_data_points
assert new_ogip.n_data_points == sum(new_ogip._mask)
assert sum(new_ogip._mask) == new_ogip.n_data_points
assert new_ogip.tstart is None
assert new_ogip.tstop is None
assert 'cons_sim' in new_ogip.nuisance_parameters
assert new_ogip.nuisance_parameters['cons_sim'].fix == True
assert new_ogip.nuisance_parameters['cons_sim'].free == False
pha_info = new_ogip.get_pha_files()
assert 'pha' in pha_info
assert 'bak' in pha_info
assert 'rsp' in pha_info
del ogip
del new_ogip
ogip = OGIPLike('test_ogip', observation='test.pha{1}')
ab = AnalysisBuilder(ogip)
_ = ab.get_jl('normal')
# Now check that generationing a lot of data sets works
sim_data_sets = [ogip.get_simulated_dataset('sim%d' % i) for i in range(100)]
assert len(sim_data_sets) == ogip._n_synthetic_datasets
for i, ds in enumerate(sim_data_sets):
assert ds.name == "sim%d" % i
assert sum(ds._mask) == sum(ogip._mask)
assert ds._rebinner is None
def test_likelihood_ratio_test():
with within_directory(__this_dir__):
ogip = OGIPLike('test_ogip', observation='test.pha{1}')
ogip.set_active_measurements("all")
ab = AnalysisBuilder(ogip)
jl1 = ab.get_jl('normal')
res1, _ = jl1.fit(compute_covariance=True)
jl2 = ab.get_jl('cpl')
res2, _ = jl2.fit(compute_covariance=True)
lrt = LikelihoodRatioTest(jl1, jl2)
null_hyp_prob, TS, data_frame, like_data_frame = lrt.by_mc(n_iterations=50, continue_on_failure=True)
def test_xrt():
with within_directory(__example_dir):
trigger = "GRB110731A"
dec = -28.546
ra = 280.52
xrt_dir = 'xrt'
xrt = SwiftXRTLike("XRT", observation=os.path.join(xrt_dir, "xrt_src.pha"),
background=os.path.join(xrt_dir, "xrt_bkg.pha"),
response=os.path.join(xrt_dir, "xrt.rmf"),
arf_file=os.path.join(xrt_dir, "xrt.arf"))
spectral_model = Powerlaw()
ptsrc = PointSource(trigger, ra, dec, spectral_shape=spectral_model)
model = Model(ptsrc)
data = DataList(xrt)
jl = JointLikelihood(model, data, verbose=False)
def test_swift_gbm():
with within_directory(__example_dir):
gbm_dir = "gbm"
bat_dir = "bat"
bat = OGIPLike('BAT',
observation=os.path.join(bat_dir, 'gbm_bat_joint_BAT.pha'),
response=os.path.join(bat_dir, 'gbm_bat_joint_BAT.rsp'))
bat.set_active_measurements('15-150')
bat.view_count_spectrum()
nai6 = OGIPLike('n6',
os.path.join(gbm_dir, 'gbm_bat_joint_NAI_06.pha'),
os.path.join(gbm_dir, 'gbm_bat_joint_NAI_06.bak'),
os.path.join(gbm_dir, 'gbm_bat_joint_NAI_06.rsp'),
spectrum_number=1)
nai6.set_active_measurements('8-900')
nai6.view_count_spectrum()
bgo0 = OGIPLike('b0',
os.path.join(gbm_dir, 'gbm_bat_joint_BGO_00.pha'),
os.path.join(gbm_dir, 'gbm_bat_joint_BGO_00.bak'),
os.path.join(gbm_dir, 'gbm_bat_joint_BGO_00.rsp'),
spectrum_number=1)
bgo0.set_active_measurements('250-10000')
bgo0.view_count_spectrum()
bat.use_effective_area_correction(.2, 1.5)
bat.fix_effective_area_correction(.6)
bat.use_effective_area_correction(.2, 1.5)
band = Band()
model = Model(PointSource('joint_fit', 0, 0, spectral_shape=band))
band.K = .04
band.xp = 300.
data_list = DataList(bat, nai6, bgo0)
jl = JointLikelihood(model, data_list)
_ = jl.fit()
_ = display_spectrum_model_counts(jl, step=False)
def test_pha_write():
with within_directory(__this_dir__):
ogip = OGIPLike('test_ogip', observation='test.pha{1}')
ogip.write_pha('test_write', overwrite=True)
written_ogip = OGIPLike('write_ogip', observation='test_write.pha{1}')
pha_info = written_ogip.get_pha_files()
for key in ['pha', 'bak']:
assert isinstance(pha_info[key], PHASpectrum)
assert pha_info['pha'].background_file == 'test_bak.pha{1}'
assert pha_info['pha'].ancillary_file is None
assert pha_info['pha'].instrument == 'GBM_NAI_03'
assert pha_info['pha'].mission == 'GLAST'
assert pha_info['pha'].is_poisson == True
assert pha_info['pha'].n_channels == len(pha_info['pha'].rates)
def test_pha_write_no_bkg():
with within_directory(__this_dir__):
# custom remove background
f=fits.open("test.pha")
f['SPECTRUM'].data['BACKFILE']="NONE"
f.writeto("test_pha_nobkg.pha",overwrite=True)
ogip = OGIPLike('test_ogip', observation='test_pha_nobkg.pha{1}')
ogip.write_pha('test_write_nobkg', overwrite=True)
written_ogip = OGIPLike('write_ogip', observation='test_write_nobkg.pha{1}')
pha_info = written_ogip.get_pha_files()
for key in ['pha']:
assert isinstance(pha_info[key], PHASpectrum)
f = fits.open("test_write_nobkg.pha")
assert f['SPECTRUM'].data['BACKFILE'][0] == "NONE"
assert pha_info['pha'].background_file is None
assert pha_info['pha'].ancillary_file is None
assert pha_info['pha'].instrument == 'GBM_NAI_03'
assert pha_info['pha'].mission == 'GLAST'
assert pha_info['pha'].is_poisson == True
assert pha_info['pha'].n_channels == len(pha_info['pha'].rates)
def test_likelihood_functions():
obs_cnts = np.array([10])
obs_bkg = np.array([5])
bkg_err = np.array([1])
exp_cnts = np.array([5])
exp_bkg = np.array([5])
ratio = 1
test = poisson_log_likelihood_ideal_bkg(observed_counts=obs_cnts,
expected_bkg_counts=exp_bkg,
expected_model_counts=exp_bkg)
assert test == (-2.0785616431350551, 5)
test = poisson_observed_poisson_background(observed_counts=obs_cnts,
background_counts=obs_bkg,
exposure_ratio=ratio,
expected_model_counts=exp_cnts)
assert test == (-3.8188638237465984, 5.0)
test = poisson_observed_poisson_background_xs(observed_counts=obs_cnts,
background_counts=obs_bkg,
exposure_ratio=ratio,
expected_model_counts=exp_cnts)
assert test == -0.
test = poisson_observed_gaussian_background(observed_counts=obs_cnts,
background_counts=obs_bkg,
background_error=bkg_err,
expected_model_counts=exp_cnts)
assert test == (-2, 5.0)
| bsd-3-clause | 2,276,975,087,790,802,700 | 31.767318 | 116 | 0.590742 | false |
zillolo/vsut-python | vsut/assertion.py | 1 | 6816 | """Assertions for testing of conditions.
This module contains assertions that can be used in unit testing.
"""
def assertEqual(expected, actual, message=""):
"""Checks whether expected is equal to actual.
Args:
expected (object): The expected object.
actual (object): The actual object.
message (Optional[str]): An optional error message,
that is displayed if the assertion fails.
Raises:
AssertResult: If an assertion fails.
"""
if expected != actual:
if message == "":
message = "{0} != {1}".format(expected, actual)
raise AssertResult(assertEqual.__name__, message)
def assertNotEqual(expected, actual, message=""):
"""Checks whether expected is not equal to actual.
Args:
expected (object): The expected object.
actual (object): The actual object.
message (Optional[str]): An optional error message,
that is displayed if the assertion fails.
Raises:
AssertResult: If an assertion fails.
"""
if expected == actual:
if message == "":
message = "{0} == {1}".format(expected, actual)
raise AssertResult(assertNotEqual.__name__, message)
def assertTrue(expected, message=""):
"""Checks whether expected is equal to True.
Args:
expected (object): The expected object.
message (Optional[str]): An optional error message,
that is displayed if the assertion fails.
Raises:
AssertResult: If an assertion fails.
"""
if expected != True:
if message == "":
message = "{0} != True".format(expected)
raise AssertResult(assertTrue.__name__, message)
def assertFalse(expected, message=""):
"""Checks whether expected is equal to False.
Args:
expected (object): The expected object.
message (Optional[str]): An optional error message,
that is displayed if the assertion fails.
Raises:
AssertResult: If an assertion fails.
"""
if expected != False:
if message == "":
message = "{0} != False".format(expected)
raise AssertResult(assertFalse.__name__, message)
def assertIs(expected, actual, message=""):
"""Checks whether expected is actual.
Args:
expected (object): The expected object.
actual (object): The actual object.
message (Optional[str]): An optional error message,
that is displayed if the assertion fails.
Raises:
AssertResult: If an assertion fails.
"""
if expected is not actual:
if message == "":
message = "{0} is not {1}".format(expected, actual)
raise AssertResult(assertIs.__name__, message)
def assertIsNot(expected, actual, message=""):
"""Checks whether expected is not actual.
Args:
expected (object): The expected object.
actual (object): The actual object.
message (Optional[str]): An optional error message,
that is displayed if the assertion fails.
Raises:
AssertResult: If an assertion fails.
"""
if expected is actual:
if message == "":
message = "{0} is {1}".format(expected, actual)
raise AssertResult(assertIsNot.__name__, message)
def assertIsNone(expected, message=""):
"""Checks whether expected is None.
Args:
expected (object): The expected object.
message (Optional[str]): An optional error message,
that is displayed if the assertion fails.
Raises:
AssertResult: If an assertion fails.
"""
if expected != None:
if message == "":
message = "{0} is not None".format(expected)
raise AssertResult(assertIsNone.__name__, message)
def assertIsNotNone(expected, message=""):
"""Checks whether expected is not None.
Args:
expected (object): The expected object.
message (Optional[str]): An optional error message,
that is displayed if the assertion fails.
Raises:
AssertResult: If an assertion fails.
"""
if expected == None:
if message == "":
message = "{0} is None".format(expected)
raise AssertResult(assertIsNotNone.__name__, message)
def assertIn(expected, collection, message=""):
"""Checks whether expected is in collection.
Args:
expected (object): The expected object.
collection (object): The collection to check in.
message (Optional[str]): An optional error message,
that is displayed if the assertion fails.
Raises:
AssertResult: If an assertion fails.
"""
if expected not in collection:
if message == "":
message = "{0} not in {1}".format(expected, collection)
raise AssertResult(assertIn.__name__, message)
def assertNotIn(expected, collection, message=""):
"""Checks whether expected is not in collection.
Args:
expected (object): The expected object.
collection (object): The collection to check in.
message (Optional[str]): An optional error message,
that is displayed if the assertion fails.
Raises:
AssertResult: If an assertion fails.
"""
if expected in collection:
if message == "":
message = "{0} in {1}".format(expected, collection)
raise AssertResult(assertNotIn.__name__, message)
def assertRaises(exception, func, *args, message=""):
"""Checks whether func raises an exception of type 'exception'.
Args:
exception (Exception): The exception to check for.
func (Function): The function to execute.
message (Optional[str]): An optional error message,
that is displayed if the assertion fails.
*args (args): The arguments of the function.
"""
try:
func(*args)
if message == "":
message = "{0} did not raise {1}".format(func.__name__,
exception.__name__)
raise AssertResult(assertRaises.__name__, message)
except exception as e:
pass
class AssertResult(Exception):
"""The result of an assertion.
Attributes:
assertion (str): The name of the assertion that delivered this result.
message (str): A message that came with the result.
"""
def __init__(self, assertion, message):
self.assertion = assertion
self.message = message
| mit | 5,127,995,970,307,482,000 | 31 | 82 | 0.58304 | false |
lukasdragon/PythonScripts | PasteBinScrapper.py | 1 | 4908 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#===============================#
#CODE PROPERTY OF LUKAS G. OLSON#
#https://github.com/lukasdragon #
#===============================#
#Imports
from urllib2 import urlopen
from ConfigParser import SafeConfigParser
import json
import os
import time
import sys
from multiprocessing.dummy import Pool as ThreadPool
######################################
#DONT MODIFY ANYTHING PAST THIS POINT#
###UNLESS YOU KNOW WHAT YOURE DOING###
#ONLY MODIFY SETTINGS WITH CONFIG.INI#
######################################
class Severity():
log = 0;
message = 1;
warning = 2;
failure = 3;
class Settings:
url = "";
Key_Words = [];
filePath = "";
idleTime = 0;
LogSeverity = Severity.log;
threads = 4;
#Color Codes!
COKGREEN = '\033[92m'; #0
COKBLUE = '\033[94m'; #1
CWARNING = '\033[93m'; #2
CFAIL = '\033[91m'; #3
CRESET ='\033[0m';
CBOLD = '\033[01m';
def log_message(severity,string):
color = "";
prefix = "";
if severity == Severity.log:
color = COKGREEN;
prefix = "LOG: "
elif severity == Severity.message:
color = COKBLUE;
prefix = "MSG: "
elif severity == Severity.warning:
color = CBOLD + CWARNING;
elif severity == Severity.failure:
color = CBOLD + CFAIL;
if severity >= Settings.LogSeverity:
print(color + prefix + string + CRESET);
#read and write config file
try:
config = SafeConfigParser();
config.read('config.ini');
Settings.url = config.get('main', 'api_url')
Settings.idleTime = config.getfloat('main', 'waitperiod')
Settings.filePath = config.get('main','filePath')
Settings.LogSeverity = config.getint('main','logLevel')
Settings.Key_Words = [e.strip() for e in config.get('main', 'keyWords').split(',')]
Settings.threads = config.getint('main', 'threads')
except:
log_message(Severity.failure,"config.ini not found! Making one...")
config = SafeConfigParser()
config.read('config.ini')
config.add_section('main')
config.set('main', 'api_url', "https://pastebin.com/api_scraping.php")
config.set('main', 'filePath', '/files/')
config.set('main', 'waitperiod', '40.0')
config.set('main', 'threads', '4')
config.set('main', '; Minimum log severity', '')
config.set('main', '; 0 = log | 1 = message | 2 = warning | 3 = failure', '')
config.set('main', 'logLevel', '0')
config.set('main', '; The keywords required to download a file, seperated by commas', '')
config.set('main', 'keyWords', "discord.gg,bank,leak")
with open('config.ini', 'w') as f:
config.write(f)
log_message(Severity.failure, "Please edit config.ini and relaunch the script!")
sys.exit();
#Gets the json file
def get_jsonparsed_data(url):
response = urlopen(url);
data = str(response.read());
return json.loads(data);
#Saves a text file
def save_file(content, title):
file = open(Settings.filePath + title + ".txt", "w");
file.write(content);
file.close();
#Checks for keywords
def is_interesting(dictionary, content):
for word in dictionary:
if word in content.lower():
return True;
def scanData(page):
rescannedFiles = 0;
if not viewedFiles.__contains__(paste["key"]):
log_message(0,"Found a paste: " + paste["key"])
content = page.read();
viewedFiles.append(paste["key"]);
if is_interesting(Settings.Key_Words, content):
save_file(content, paste["key"])
log_message(1,"Found a match!: " + paste["key"]);
save_file(content, paste["key"])
else:
rescannedFiles += 1;
log_message(0, "Paste already scanned... Increasing wait time!")
return rescannedFiles;
#Startup Image
log_message(1,"========BY=======");
log_message(1,"PasteBin Scrapper");
log_message(1,"======Lukas======");
#Creates the paste save folder if it doesn't exist
Settings.filePath = (os.path.dirname(os.path.realpath(__file__)) + Settings.filePath);
try:
os.makedirs(Settings.filePath);
except:
pass;
starttime=time.time()
ticks, rescannedFiles = 0;
viewedFiles = [];
#starts threading stuff
pool = ThreadPool()
pool = ThreadPool(Settings.threads)
while True:
try:
data = get_jsonparsed_data(Settings.url);
#Loops through each entry
page = [];
for paste in data:
page.append(urlopen(paste["scrape_url"]));
results = pool.map(scanData, page);
pool.close()
pool.join()
page = [];
for x in results:
rescannedFiles += x;
ticks+=1;
idletime = ((rescannedFiles * 2) / ticks );
log_message(1, "Finished Tick!, waiting " + str(Settings.idleTime + idletime) + " seconds!");
except:
pass;
time.sleep((Settings.idleTime +idletime) - ((time.time() - starttime) % (Settings.idleTime + idletime)));
| unlicense | -7,679,235,242,264,746,000 | 26.886364 | 109 | 0.602078 | false |
MoisesTedeschi/python | Scripts-Python/Modulos-Diversos/python-com-scrapy/Lib/site-packages/queuelib/tests/test_rrqueue.py | 1 | 4549 | import os
from queuelib.rrqueue import RoundRobinQueue
from queuelib.queue import (
FifoMemoryQueue, LifoMemoryQueue, FifoDiskQueue, LifoDiskQueue,
FifoSQLiteQueue, LifoSQLiteQueue,
)
from queuelib.tests import (QueuelibTestCase, track_closed)
# hack to prevent py.test from discovering base test class
class base:
class RRQueueTestBase(QueuelibTestCase):
def setUp(self):
QueuelibTestCase.setUp(self)
self.q = RoundRobinQueue(self.qfactory)
def qfactory(self, key):
raise NotImplementedError
def test_len_nonzero(self):
assert not self.q
self.assertEqual(len(self.q), 0)
self.q.push(b'a', '3')
assert self.q
self.q.push(b'b', '1')
self.q.push(b'c', '2')
self.q.push(b'd', '1')
self.assertEqual(len(self.q), 4)
self.q.pop()
self.q.pop()
self.q.pop()
self.q.pop()
assert not self.q
self.assertEqual(len(self.q), 0)
def test_close(self):
self.q.push(b'a', '3')
self.q.push(b'b', '1')
self.q.push(b'c', '2')
self.q.push(b'd', '1')
iqueues = self.q.queues.values()
self.assertEqual(sorted(self.q.close()), ['1', '2', '3'])
assert all(q.closed for q in iqueues)
def test_close_return_active(self):
self.q.push(b'b', '1')
self.q.push(b'c', '2')
self.q.push(b'a', '3')
self.q.pop()
self.assertEqual(sorted(self.q.close()), ['2', '3'])
class FifoTestMixin(object):
def test_push_pop_key(self):
self.q.push(b'a', '1')
self.q.push(b'b', '1')
self.q.push(b'c', '2')
self.q.push(b'd', '2')
self.assertEqual(self.q.pop(), b'a')
self.assertEqual(self.q.pop(), b'c')
self.assertEqual(self.q.pop(), b'b')
self.assertEqual(self.q.pop(), b'd')
self.assertEqual(self.q.pop(), None)
class LifoTestMixin(object):
def test_push_pop_key(self):
self.q.push(b'a', '1')
self.q.push(b'b', '1')
self.q.push(b'c', '2')
self.q.push(b'd', '2')
self.assertEqual(self.q.pop(), b'b')
self.assertEqual(self.q.pop(), b'd')
self.assertEqual(self.q.pop(), b'a')
self.assertEqual(self.q.pop(), b'c')
self.assertEqual(self.q.pop(), None)
class FifoMemoryRRQueueTest(FifoTestMixin, base.RRQueueTestBase):
def qfactory(self, key):
return track_closed(FifoMemoryQueue)()
class LifoMemoryRRQueueTest(LifoTestMixin, base.RRQueueTestBase):
def qfactory(self, key):
return track_closed(LifoMemoryQueue)()
class DiskTestMixin(object):
def test_nonserializable_object_one(self):
self.assertRaises(TypeError, self.q.push, lambda x: x, '0')
self.assertEqual(self.q.close(), [])
def test_nonserializable_object_many_close(self):
self.q.push(b'a', '3')
self.q.push(b'b', '1')
self.assertRaises(TypeError, self.q.push, lambda x: x, '0')
self.q.push(b'c', '2')
self.assertEqual(self.q.pop(), b'a')
self.assertEqual(sorted(self.q.close()), ['1', '2'])
def test_nonserializable_object_many_pop(self):
self.q.push(b'a', '3')
self.q.push(b'b', '1')
self.assertRaises(TypeError, self.q.push, lambda x: x, '0')
self.q.push(b'c', '2')
self.assertEqual(self.q.pop(), b'a')
self.assertEqual(self.q.pop(), b'b')
self.assertEqual(self.q.pop(), b'c')
self.assertEqual(self.q.pop(), None)
self.assertEqual(self.q.close(), [])
class FifoDiskRRQueueTest(FifoTestMixin, DiskTestMixin, base.RRQueueTestBase):
def qfactory(self, key):
path = os.path.join(self.qdir, str(key))
return track_closed(FifoDiskQueue)(path)
class LifoDiskRRQueueTest(LifoTestMixin, DiskTestMixin, base.RRQueueTestBase):
def qfactory(self, key):
path = os.path.join(self.qdir, str(key))
return track_closed(LifoDiskQueue)(path)
class FifoSQLiteRRQueueTest(FifoTestMixin, DiskTestMixin, base.RRQueueTestBase):
def qfactory(self, key):
path = os.path.join(self.qdir, str(key))
return track_closed(FifoSQLiteQueue)(path)
class LifoSQLiteRRQueueTest(LifoTestMixin, DiskTestMixin, base.RRQueueTestBase):
def qfactory(self, key):
path = os.path.join(self.qdir, str(key))
return track_closed(LifoSQLiteQueue)(path)
| gpl-3.0 | 117,973,751,437,886,510 | 30.590278 | 80 | 0.590899 | false |
kubevirt/vAdvisor | tests/store/test_event.py | 1 | 1720 | from vadvisor.store.event import InMemoryStore
import pytest
from freezegun import freeze_time
from datetime import datetime, timedelta
@pytest.fixture
@freeze_time("2012-01-14 03:00:00")
def expired_store():
store = InMemoryStore(60)
# Insert old data
store.put('old')
store.put('old')
store.put('old')
return store
@pytest.fixture
@freeze_time("2012-01-14 03:01:30")
def new_store(expired_store):
# Insert newer data
expired_store.put('new')
expired_store.put('new')
expired_store.put('new')
return expired_store
@pytest.fixture
@freeze_time("2012-01-14 03:01:50")
def newest_store(new_store):
# Insert newer data
new_store.put('newest')
new_store.put('newest')
new_store.put('newest')
return new_store
def test_empty_store():
store = InMemoryStore()
assert store.get() == []
@freeze_time("2012-01-14 03:02:00")
def test_expire_on_get(expired_store):
expired_store.get()
assert expired_store.get() == []
@freeze_time("2012-01-14 03:02:00")
def test_get_all_new(new_store):
assert new_store.get() == ['new', 'new', 'new']
@freeze_time("2012-01-14 03:02:00")
def test_get_two_new(new_store):
assert new_store.get(elements=2) == ['new', 'new']
@freeze_time("2012-01-14 03:02:00")
def test_get_not_older_than(newest_store):
events = newest_store.get(
elements=2,
start_time=datetime.utcnow() - timedelta(seconds=20)
)
assert events == ['newest', 'newest']
@freeze_time("2012-01-14 03:02:00")
def test_get_not_newer_than(newest_store):
events = newest_store.get(
elements=2,
stop_time=datetime.utcnow() - timedelta(seconds=20)
)
assert events == ['new', 'new']
| gpl-3.0 | 427,733,841,917,092,160 | 22.243243 | 60 | 0.651163 | false |
plxaye/chromium | src/tools/perf/perf_tools/startup_benchmark.py | 1 | 1518 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from telemetry.page import page_benchmark
# Test how long Chrome takes to load when warm.
class PerfWarm(page_benchmark.PageBenchmark):
def __init__(self):
super(PerfWarm, self).__init__(needs_browser_restart_after_each_run=True,
discard_first_result=True)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArg('--dom-automation')
options.AppendExtraBrowserArg('--reduce-security-for-dom-automation-tests')
def MeasurePage(self, page, tab, results):
result = tab.EvaluateJavaScript("""
domAutomationController.getBrowserHistogram(
"Startup.BrowserMessageLoopStartTimeFromMainEntry_Exact")
""")
result = json.loads(result)
startup_time_ms = 0
if 'params' in result:
startup_time_ms = result['params']['max']
else:
# Support old reference builds that don't contain the new
# Startup.BrowserMessageLoopStartTimeFromMainEntry_Exact histogram.
result = tab.EvaluateJavaScript("""
domAutomationController.getBrowserHistogram(
"Startup.BrowserMessageLoopStartTimeFromMainEntry")
""")
result = json.loads(result)
startup_time_ms = \
(result['buckets'][0]['high'] + result['buckets'][0]['low']) / 2
results.Add('startup_time', 'ms', startup_time_ms)
| apache-2.0 | 1,906,674,738,062,525,000 | 37.923077 | 79 | 0.68643 | false |
3dfxsoftware/cbss-addons | mrp_default_location/__openerp__.py | 1 | 1638 | # -*- coding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com
# All Rights Reserved.
# [email protected]
############################################################################
# Coded by: julio ([email protected])
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'mrp default location',
"version" : "1.1",
'author': 'Vauxoo',
'depends': ['mrp','product'],
'description': """
Added the Locations by default when select the product in order of production
""",
'update_xml':[
'product_category_view.xml',
],
'active': False,
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 | -5,488,607,742,571,498,000 | 39.95 | 85 | 0.550672 | false |
ianfab/variantfishtest | chess/syzygy.py | 1 | 53977 | # -*- coding: utf-8 -*-
#
# This file is part of the python-chess library.
# Copyright (C) 2012-2015 Niklas Fiekas <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import chess
import mmap
import os
import struct
import sys
import threading
UINT64 = struct.Struct("<Q")
UINT32 = struct.Struct("<I")
USHORT = struct.Struct("<H")
WDL_MAGIC = [0x71, 0xE8, 0x23, 0x5D]
DTZ_MAGIC = [0xD7, 0x66, 0x0C, 0xA5]
OFFDIAG = [
0, -1, -1, -1, -1, -1, -1, -1,
1, 0, -1, -1, -1, -1, -1, -1,
1, 1, 0, -1, -1, -1, -1, -1,
1, 1, 1, 0, -1, -1, -1, -1,
1, 1, 1, 1, 0, -1, -1, -1,
1, 1, 1, 1, 1, 0, -1, -1,
1, 1, 1, 1, 1, 1, 0, -1,
1, 1, 1, 1, 1, 1, 1, 0,
]
TRIANGLE = [
6, 0, 1, 2, 2, 1, 0, 6,
0, 7, 3, 4, 4, 3, 7, 0,
1, 3, 8, 5, 5, 8, 3, 1,
2, 4, 5, 9, 9, 5, 4, 2,
2, 4, 5, 9, 9, 5, 4, 2,
1, 3, 8, 5, 5, 8, 3, 1,
0, 7, 3, 4, 4, 3, 7, 0,
6, 0, 1, 2, 2, 1, 0, 6,
]
FLIPDIAG = [
0, 8, 16, 24, 32, 40, 48, 56,
1, 9, 17, 25, 33, 41, 49, 57,
2, 10, 18, 26, 34, 42, 50, 58,
3, 11, 19, 27, 35, 43, 51, 59,
4, 12, 20, 28, 36, 44, 52, 60,
5, 13, 21, 29, 37, 45, 53, 61,
6, 14, 22, 30, 38, 46, 54, 62,
7, 15, 23, 31, 39, 47, 55, 63,
]
LOWER = [
28, 0, 1, 2, 3, 4, 5, 6,
0, 29, 7, 8, 9, 10, 11, 12,
1, 7, 30, 13, 14, 15, 16, 17,
2, 8, 13, 31, 18, 19, 20, 21,
3, 9, 14, 18, 32, 22, 23, 24,
4, 10, 15, 19, 22, 33, 25, 26,
5, 11, 16, 20, 23, 25, 34, 27,
6, 12, 17, 21, 24, 26, 27, 35,
]
DIAG = [
0, 0, 0, 0, 0, 0, 0, 8,
0, 1, 0, 0, 0, 0, 9, 0,
0, 0, 2, 0, 0, 10, 0, 0,
0, 0, 0, 3, 11, 0, 0, 0,
0, 0, 0, 12, 4, 0, 0, 0,
0, 0, 13, 0, 0, 5, 0, 0,
0, 14, 0, 0, 0, 0, 6, 0,
15, 0, 0, 0, 0, 0, 0, 7,
]
FLAP = [
0, 0, 0, 0, 0, 0, 0, 0,
0, 6, 12, 18, 18, 12, 6, 0,
1, 7, 13, 19, 19, 13, 7, 1,
2, 8, 14, 20, 20, 14, 8, 2,
3, 9, 15, 21, 21, 15, 9, 3,
4, 10, 16, 22, 22, 16, 10, 4,
5, 11, 17, 23, 23, 17, 11, 5,
0, 0, 0, 0, 0, 0, 0, 0,
]
PTWIST = [
0, 0, 0, 0, 0, 0, 0, 0,
47, 35, 23, 11, 10, 22, 34, 46,
45, 33, 21, 9, 8, 20, 32, 44,
43, 31, 19, 7, 6, 18, 30, 42,
41, 29, 17, 5, 4, 16, 28, 40,
39, 27, 15, 3, 2, 14, 26, 38,
37, 25, 13, 1, 0, 12, 24, 36,
0, 0, 0, 0, 0, 0, 0, 0,
]
INVFLAP = [
8, 16, 24, 32, 40, 48,
9, 17, 25, 33, 41, 49,
10, 18, 26, 34, 42, 50,
11, 19, 27, 35, 43, 51,
]
FILE_TO_FILE = [ 0, 1, 2, 3, 3, 2, 1, 0 ]
KK_IDX = [ [
-1, -1, -1, 0, 1, 2, 3, 4,
-1, -1, -1, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57,
], [
58, -1, -1, -1, 59, 60, 61, 62,
63, -1, -1, -1, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75,
76, 77, 78, 79, 80, 81, 82, 83,
84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107,
108, 109, 110, 111, 112, 113, 114, 115,
], [
116, 117, -1, -1, -1, 118, 119, 120,
121, 122, -1, -1, -1, 123, 124, 125,
126, 127, 128, 129, 130, 131, 132, 133,
134, 135, 136, 137, 138, 139, 140, 141,
142, 143, 144, 145, 146, 147, 148, 149,
150, 151, 152, 153, 154, 155, 156, 157,
158, 159, 160, 161, 162, 163, 164, 165,
166, 167, 168, 169, 170, 171, 172, 173,
], [
174, -1, -1, -1, 175, 176, 177, 178,
179, -1, -1, -1, 180, 181, 182, 183,
184, -1, -1, -1, 185, 186, 187, 188,
189, 190, 191, 192, 193, 194, 195, 196,
197, 198, 199, 200, 201, 202, 203, 204,
205, 206, 207, 208, 209, 210, 211, 212,
213, 214, 215, 216, 217, 218, 219, 220,
221, 222, 223, 224, 225, 226, 227, 228,
], [
229, 230, -1, -1, -1, 231, 232, 233,
234, 235, -1, -1, -1, 236, 237, 238,
239, 240, -1, -1, -1, 241, 242, 243,
244, 245, 246, 247, 248, 249, 250, 251,
252, 253, 254, 255, 256, 257, 258, 259,
260, 261, 262, 263, 264, 265, 266, 267,
268, 269, 270, 271, 272, 273, 274, 275,
276, 277, 278, 279, 280, 281, 282, 283,
], [
284, 285, 286, 287, 288, 289, 290, 291,
292, 293, -1, -1, -1, 294, 295, 296,
297, 298, -1, -1, -1, 299, 300, 301,
302, 303, -1, -1, -1, 304, 305, 306,
307, 308, 309, 310, 311, 312, 313, 314,
315, 316, 317, 318, 319, 320, 321, 322,
323, 324, 325, 326, 327, 328, 329, 330,
331, 332, 333, 334, 335, 336, 337, 338,
], [
-1, -1, 339, 340, 341, 342, 343, 344,
-1, -1, 345, 346, 347, 348, 349, 350,
-1, -1, 441, 351, 352, 353, 354, 355,
-1, -1, -1, 442, 356, 357, 358, 359,
-1, -1, -1, -1, 443, 360, 361, 362,
-1, -1, -1, -1, -1, 444, 363, 364,
-1, -1, -1, -1, -1, -1, 445, 365,
-1, -1, -1, -1, -1, -1, -1, 446,
], [
-1, -1, -1, 366, 367, 368, 369, 370,
-1, -1, -1, 371, 372, 373, 374, 375,
-1, -1, -1, 376, 377, 378, 379, 380,
-1, -1, -1, 447, 381, 382, 383, 384,
-1, -1, -1, -1, 448, 385, 386, 387,
-1, -1, -1, -1, -1, 449, 388, 389,
-1, -1, -1, -1, -1, -1, 450, 390,
-1, -1, -1, -1, -1, -1, -1, 451,
], [
452, 391, 392, 393, 394, 395, 396, 397,
-1, -1, -1, -1, 398, 399, 400, 401,
-1, -1, -1, -1, 402, 403, 404, 405,
-1, -1, -1, -1, 406, 407, 408, 409,
-1, -1, -1, -1, 453, 410, 411, 412,
-1, -1, -1, -1, -1, 454, 413, 414,
-1, -1, -1, -1, -1, -1, 455, 415,
-1, -1, -1, -1, -1, -1, -1, 456,
], [
457, 416, 417, 418, 419, 420, 421, 422,
-1, 458, 423, 424, 425, 426, 427, 428,
-1, -1, -1, -1, -1, 429, 430, 431,
-1, -1, -1, -1, -1, 432, 433, 434,
-1, -1, -1, -1, -1, 435, 436, 437,
-1, -1, -1, -1, -1, 459, 438, 439,
-1, -1, -1, -1, -1, -1, 460, 440,
-1, -1, -1, -1, -1, -1, -1, 461,
] ]
BINOMIAL = []
for i in range(5):
BINOMIAL.append([])
for j in range(64):
f = j
l = 1
for k in range(1, i + 1):
f *= j - k
l *= k + 1
BINOMIAL[i].append(f // l)
PAWNIDX = [ [ 0 for _ in range(24) ] for _ in range(5) ]
PFACTOR = [ [ 0 for _ in range(4) ] for _ in range(5) ]
for i in range(5):
j = 0
s = 0
while j < 6:
PAWNIDX[i][j] = s
s += 1 if i == 0 else BINOMIAL[i - 1][PTWIST[INVFLAP[j]]]
j += 1
PFACTOR[i][0] = s
s = 0
while j < 12:
PAWNIDX[i][j] = s
s += 1 if i == 0 else BINOMIAL[i - 1][PTWIST[INVFLAP[j]]]
j += 1
PFACTOR[i][1] = s
s = 0
while j < 18:
PAWNIDX[i][j] = s
s += 1 if i == 0 else BINOMIAL[i - 1][PTWIST[INVFLAP[j]]]
j += 1
PFACTOR[i][2] = s
s = 0
while j < 24:
PAWNIDX[i][j] = s
s += 1 if i == 0 else BINOMIAL[i - 1][PTWIST[INVFLAP[j]]]
j += 1
PFACTOR[i][3] = s
WDL_TO_MAP = [1, 3, 0, 2, 0]
PA_FLAGS = [8, 0, 0, 0, 4]
WDL_TO_DTZ = [-1, -101, 0, 101, 1]
PCHR = ["K", "Q", "R", "B", "N", "P"]
def bswap8(x):
return x & 0xff
def bswap16(x):
return (bswap8(x) << 8) | bswap8(x >> 8)
def bswap32(x):
return (bswap16(x) << 16) | bswap16(x >> 16)
def bswap64(x):
return (bswap32(x) << 32) | bswap32(x >> 32)
def filenames():
for i in range(1, 6):
yield "K%cvK" % (PCHR[i], )
for i in range(1, 6):
for j in range(i, 6):
yield "K%cvK%c" % (PCHR[i], PCHR[j])
for i in range(1, 6):
for j in range(i, 6):
yield "K%c%cvK" % (PCHR[i], PCHR[j])
for i in range(1, 6):
for j in range(i, 6):
for k in range(1, 6):
yield "K%c%cvK%c" % (PCHR[i], PCHR[j], PCHR[k])
for i in range(1, 6):
for j in range(i, 6):
for k in range(j, 6):
yield "K%c%c%cvK" % (PCHR[i], PCHR[j], PCHR[k])
for i in range(1, 6):
for j in range(i, 6):
for k in range(i, 6):
for l in range(j if i == k else k, 6):
yield "K%c%cvK%c%c" % (PCHR[i], PCHR[j], PCHR[k], PCHR[l])
for i in range(1, 6):
for j in range(i, 6):
for k in range(j, 6):
for l in range(1, 6):
yield "K%c%c%cvK%c" % (PCHR[i], PCHR[j], PCHR[k], PCHR[l])
for i in range(1, 6):
for j in range(i, 6):
for k in range(j, 6):
for l in range(k, 6):
yield "K%c%c%c%cvK" % (PCHR[i], PCHR[j], PCHR[k], PCHR[l])
def calc_key(board, mirror=False):
key = 0
for color in chess.COLORS:
mirrored_color = color ^ 1 if mirror else color
for i in range(chess.pop_count(board.pawns & board.occupied_co[color])):
key ^= chess.POLYGLOT_RANDOM_ARRAY[mirrored_color * 6 * 16 + 5 * 16 + i]
for i in range(chess.pop_count(board.knights & board.occupied_co[color])):
key ^= chess.POLYGLOT_RANDOM_ARRAY[mirrored_color * 6 * 16 + 4 * 16 + i]
for i in range(chess.pop_count(board.bishops & board.occupied_co[color])):
key ^= chess.POLYGLOT_RANDOM_ARRAY[mirrored_color * 6 * 16 + 3 * 16 + i]
for i in range(chess.pop_count(board.rooks & board.occupied_co[color])):
key ^= chess.POLYGLOT_RANDOM_ARRAY[mirrored_color * 6 * 16 + 2 * 16 + i]
for i in range(chess.pop_count(board.queens & board.occupied_co[color])):
key ^= chess.POLYGLOT_RANDOM_ARRAY[mirrored_color * 6 * 16 + 1 * 16 + i]
for i in range(chess.pop_count(board.kings & board.occupied_co[color])):
key ^= chess.POLYGLOT_RANDOM_ARRAY[mirrored_color * 6 * 16 + 0 * 16 + i]
return key
def calc_key_from_filename(filename, mirror=False):
white, black = filename.split("v")
color = chess.WHITE
if mirror:
color ^= 1
key = 0
for piece_index, piece in enumerate(PCHR):
for i in range(white.count(piece)):
key ^= chess.POLYGLOT_RANDOM_ARRAY[color * 6 * 16 + piece_index * 16 + i]
color ^= 1
for piece_index, piece in enumerate(PCHR):
for i in range(black.count(piece)):
key ^= chess.POLYGLOT_RANDOM_ARRAY[color * 6 * 16 + piece_index * 16 + i]
return key
def subfactor(k, n):
f = n
l = 1
for i in range(1, k):
f *= n - i
l *= i + 1
return f // l
class PairsData(object):
def __init__(self):
self.indextable = None
self.sizetable = None
self.data = None
self.offset = None
self.symlen = None
self.sympat = None
self.blocksize = None
self.idxbits = None
self.min_len = None
self.base = None
class PawnFileData(object):
def __init__(self):
self.precomp = {}
self.factor = {}
self.pieces = {}
self.norm = {}
class PawnFileDataDtz(object):
def __init__(self):
self.precomp = None
self.factor = None
self.pieces = None
self.norm = None
class Table(object):
def __init__(self, directory, filename, suffix):
self.directory = directory
self.filename = filename
self.suffix = suffix
self.fd = os.open(os.path.join(directory, filename) + suffix, os.O_RDONLY | os.O_BINARY if hasattr(os, "O_BINARY") else os.O_RDONLY)
self.data = mmap.mmap(self.fd, 0, access=mmap.ACCESS_READ)
if sys.version_info >= (3, ):
self.read_ubyte = self.data.__getitem__
else:
def read_ubyte(data_ptr):
return ord(self.data[data_ptr])
self.read_ubyte = read_ubyte
self.key = calc_key_from_filename(filename)
self.mirrored_key = calc_key_from_filename(filename, True)
self.symmetric = self.key == self.mirrored_key
# Leave the v out of the filename to get the number of pieces.
self.num = len(filename) - 1
self.has_pawns = "P" in filename
black_part, white_part = filename.split("v")
if self.has_pawns:
self.pawns = {}
self.pawns[0] = white_part.count("P")
self.pawns[1] = black_part.count("P")
if self.pawns[1] > 0 and (self.pawns[0] == 0 or self.pawns[1] < self.pawns[0]):
self.pawns[0], self.pawns[1] = self.pawns[1], self.pawns[0]
else:
j = 0
for piece_type in PCHR:
if black_part.count(piece_type) == 1:
j += 1
if white_part.count(piece_type) == 1:
j += 1
if j >= 3:
self.enc_type = 0
elif j == 2:
self.enc_type = 2
else:
# Each player will always have a king, unless we're playing
# suicide chess.
# TODO: Could be implemented.
assert False
def setup_pairs(self, data_ptr, tb_size, size_idx, wdl):
d = PairsData()
self._flags = self.read_ubyte(data_ptr)
if self.read_ubyte(data_ptr) & 0x80:
d.idxbits = 0
if wdl:
d.min_len = self.read_ubyte(data_ptr + 1)
else:
d.min_len = 0
self._next = data_ptr + 2
self.size[size_idx + 0] = 0
self.size[size_idx + 1] = 0
self.size[size_idx + 2] = 0
return d
d.blocksize = self.read_ubyte(data_ptr + 1)
d.idxbits = self.read_ubyte(data_ptr + 2)
real_num_blocks = self.read_uint32(data_ptr + 4)
num_blocks = real_num_blocks + self.read_ubyte(data_ptr + 3)
max_len = self.read_ubyte(data_ptr + 8)
min_len = self.read_ubyte(data_ptr + 9)
h = max_len - min_len + 1
num_syms = self.read_ushort(data_ptr + 10 + 2 * h)
d.offset = data_ptr + 10
d.symlen = [0 for _ in range(h * 8 + num_syms)]
d.sympat = data_ptr + 12 + 2 * h
d.min_len = min_len
self._next = data_ptr + 12 + 2 * h + 3 * num_syms + (num_syms & 1)
num_indices = (tb_size + (1 << d.idxbits) - 1) >> d.idxbits
self.size[size_idx + 0] = 6 * num_indices
self.size[size_idx + 1] = 2 * num_blocks
self.size[size_idx + 2] = (1 << d.blocksize) * real_num_blocks
tmp = [0 for _ in range(num_syms)]
for i in range(num_syms):
if not tmp[i]:
self.calc_symlen(d, i, tmp)
d.base = [0 for _ in range(h)]
d.base[h - 1] = 0
for i in range(h - 2, -1, -1):
d.base[i] = (d.base[i + 1] + self.read_ushort(d.offset + i * 2) - self.read_ushort(d.offset + i * 2 + 2)) // 2
for i in range(h):
d.base[i] <<= 64 - (min_len + i)
d.offset -= 2 * d.min_len
return d
def set_norm_piece(self, norm, pieces):
if self.enc_type == 0:
norm[0] = 3
elif self.enc_type == 2:
norm[0] = 2
else:
norm[0] = self.enc_type - 1
i = norm[0]
while i < self.num:
j = i
while j < self.num and pieces[j] == pieces[i]:
norm[i] += 1
j += 1
i += norm[i]
def calc_factors_piece(self, factor, order, norm):
PIVFAC = [31332, 28056, 462]
n = 64 - norm[0]
f = 1
i = norm[0]
k = 0
while i < self.num or k == order:
if k == order:
factor[0] = f
f *= PIVFAC[self.enc_type]
else:
factor[i] = f
f *= subfactor(norm[i], n)
n -= norm[i]
i += norm[i]
k += 1
return f
def calc_factors_pawn(self, factor, order, order2, norm, f):
i = norm[0]
if order2 < 0x0f:
i += norm[i]
n = 64 - i
fac = 1
k = 0
while i < self.num or k == order or k == order2:
if k == order:
factor[0] = fac
fac *= PFACTOR[norm[0] - 1][f]
elif k == order2:
factor[norm[0]] = fac
fac *= subfactor(norm[norm[0]], 48 - norm[0])
else:
factor[i] = fac
fac *= subfactor(norm[i], n)
n -= norm[i]
i += norm[i]
k += 1
return fac
def set_norm_pawn(self, norm, pieces):
norm[0] = self.pawns[0]
if self.pawns[1]:
norm[self.pawns[0]] = self.pawns[1]
i = self.pawns[0] + self.pawns[1]
while i < self.num:
j = i
while j < self.num and pieces[j] == pieces[i]:
norm[i] += 1
j += 1
i += norm[i]
def calc_symlen(self, d, s, tmp):
w = d.sympat + 3 * s
s2 = (self.read_ubyte(w + 2) << 4) | (self.read_ubyte(w + 1) >> 4)
if s2 == 0x0fff:
d.symlen[s] = 0
else:
s1 = ((self.read_ubyte(w + 1) & 0xf) << 8) | self.read_ubyte(w)
if not tmp[s1]:
self.calc_symlen(d, s1, tmp)
if not tmp[s2]:
self.calc_symlen(d, s2, tmp)
d.symlen[s] = d.symlen[s1] + d.symlen[s2] + 1
tmp[s] = 1
def pawn_file(self, pos):
for i in range(1, self.pawns[0]):
if FLAP[pos[0]] > FLAP[pos[i]]:
pos[0], pos[i] = pos[i], pos[0]
return FILE_TO_FILE[pos[0] & 0x07]
def encode_piece(self, norm, pos, factor):
n = self.num
if pos[0] & 0x04:
for i in range(n):
pos[i] ^= 0x07
if pos[0] & 0x20:
for i in range(n):
pos[i] ^= 0x38
for i in range(n):
if OFFDIAG[pos[i]]:
break
if i < (3 if self.enc_type == 0 else 2) and OFFDIAG[pos[i]] > 0:
for i in range(n):
pos[i] = FLIPDIAG[pos[i]]
if self.enc_type == 0: # 111
i = int(pos[1] > pos[0])
j = int(pos[2] > pos[0]) + int(pos[2] > pos[1])
if OFFDIAG[pos[0]]:
idx = TRIANGLE[pos[0]] * 63 * 62 + (pos[1] - i) * 62 + (pos[2] - j)
elif OFFDIAG[pos[1]]:
idx = 6 * 63 * 62 + DIAG[pos[0]] * 28 * 62 + LOWER[pos[1]] * 62 + pos[2] - j
elif OFFDIAG[pos[2]]:
idx = 6 * 63 * 62 + 4 * 28 * 62 + (DIAG[pos[0]]) * 7 * 28 + (DIAG[pos[1]] - i) * 28 + LOWER[pos[2]]
else:
idx = 6 * 63 * 62 + 4 * 28 * 62 + 4 * 7 * 28 + (DIAG[pos[0]] * 7 * 6) + (DIAG[pos[1]] - i) * 6 + (DIAG[pos[2]] - j)
i = 3
elif self.enc_type == 1: # K3
j = int(pos[2] > pos[0]) + int(pos[2] > pos[1])
idx = KK_IDX[TRIANGLE[pos[0]]][pos[1]]
if idx < 441:
idx = idx + 441 * (pos[2] - j)
else:
idx = 441 * 62 + (idx - 441) + 21 * LOWER[pos[2]]
if not OFFDIAG[pos[2]]:
idx -= j * 21
i = 3
else: # K2
idx = KK_IDX[TRIANGLE[pos[0]]][pos[1]]
i = 2
idx *= factor[0]
while i < n:
t = norm[i]
for j in range(i, i + t):
for k in range(j + 1, i + t):
# Swap.
if pos[j] > pos[k]:
pos[j], pos[k] = pos[k], pos[j]
s = 0
for m in range(i, i + t):
p = pos[m]
j = 0
for l in range(i):
j += int(p > pos[l])
s += BINOMIAL[m - i][p - j]
idx += s * factor[i]
i += t
return idx
def encode_pawn(self, norm, pos, factor):
n = self.num
if pos[0] & 0x04:
for i in range(n):
pos[i] ^= 0x07
for i in range(1, self.pawns[0]):
for j in range(i + 1, self.pawns[0]):
if PTWIST[pos[i]] < PTWIST[pos[j]]:
pos[i], pos[j] = pos[j], pos[i]
t = self.pawns[0] - 1
idx = PAWNIDX[t][FLAP[pos[0]]]
for i in range(t, 0, -1):
idx += BINOMIAL[t - i][PTWIST[pos[i]]]
idx *= factor[0]
# Remaining pawns.
i = self.pawns[0]
t = i + self.pawns[1]
if t > i:
for j in range(i, t):
for k in range(j + 1, t):
if pos[j] > pos[k]:
pos[j], pos[k] = pos[k], pos[j]
s = 0
for m in range(i, t):
p = pos[m]
j = 0
for k in range(i):
j += int(p > pos[k])
s += BINOMIAL[m - i][p - j - 8]
idx += s * factor[i]
i = t
while i < n:
t = norm[i]
for j in range(i, i + t):
for k in range(j + 1, i + t):
if pos[j] > pos[k]:
pos[j], pos[k] = pos[k], pos[j]
s = 0
for m in range(i, i + t):
p = pos[m]
j = 0
for k in range(i):
j += int(p > pos[k])
s += BINOMIAL[m - i][p - j]
idx += s * factor[i]
i += t
return idx
def decompress_pairs(self, d, idx):
if not d.idxbits:
return d.min_len
mainidx = idx >> d.idxbits
litidx = (idx & (1 << d.idxbits) - 1) - (1 << (d.idxbits - 1))
block = self.read_uint32(d.indextable + 6 * mainidx)
idx_offset = self.read_ushort(d.indextable + 6 * mainidx + 4)
litidx += idx_offset
if litidx < 0:
while litidx < 0:
block -= 1
litidx += self.read_ushort(d.sizetable + 2 * block) + 1
else:
while litidx > self.read_ushort(d.sizetable + 2 * block):
litidx -= self.read_ushort(d.sizetable + 2 * block) + 1
block += 1
ptr = d.data + (block << d.blocksize)
m = d.min_len
base_idx = -m
symlen_idx = 0
code = self.read_uint64(ptr)
code = bswap64(code) # if little endian
ptr += 2 * 4
bitcnt = 0 # Number of empty bits in code
while True:
l = m
while code < d.base[base_idx + l]:
l += 1
sym = self.read_ushort(d.offset + l * 2)
sym += (code - d.base[base_idx + l]) >> (64 - l)
if litidx < d.symlen[symlen_idx + sym] + 1:
break
litidx -= d.symlen[symlen_idx + sym] + 1
code <<= l
bitcnt += l
if bitcnt >= 32:
bitcnt -= 32
tmp = self.read_uint32(ptr)
ptr += 4
tmp = bswap32(tmp) # if little endian
code |= tmp << bitcnt
# Cut off at 64bit.
code &= 0xffffffffffffffff
sympat = d.sympat
while d.symlen[symlen_idx + sym]:
w = sympat + 3 * sym
s1 = ((self.read_ubyte(w + 1) & 0xf) << 8) | self.read_ubyte(w)
if litidx < d.symlen[symlen_idx + s1] + 1:
sym = s1
else:
litidx -= d.symlen[symlen_idx + s1] + 1
sym = (self.read_ubyte(w + 2) << 4) | (self.read_ubyte(w + 1) >> 4)
return self.read_ubyte(sympat + 3 * sym)
def read_uint64(self, data_ptr):
return UINT64.unpack_from(self.data, data_ptr)[0]
def read_uint32(self, data_ptr):
return UINT32.unpack_from(self.data, data_ptr)[0]
def read_ushort(self, data_ptr):
return USHORT.unpack_from(self.data, data_ptr)[0]
def close(self):
self.data.close()
try:
os.close(self.fd)
except OSError:
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __getstate__(self):
state = self.__dict__.copy()
del state["fd"]
del state["data"]
del state["read_ubyte"]
del state["lock"]
return state
def __setstate__(self, state):
self.__init__(self.directory, self.filename, self.suffix)
self.__dict__.update(state)
class WdlTable(Table):
def __init__(self, directory, filename, suffix=".rtbw"):
super(WdlTable, self).__init__(directory, filename, suffix)
self.initialized = False
self.lock = threading.Lock()
def init_table_wdl(self):
if self.initialized:
return
with self.lock:
if self.initialized:
return
assert WDL_MAGIC[0] == self.read_ubyte(0)
assert WDL_MAGIC[1] == self.read_ubyte(1)
assert WDL_MAGIC[2] == self.read_ubyte(2)
assert WDL_MAGIC[3] == self.read_ubyte(3)
self.tb_size = [0 for _ in range(8)]
self.size = [0 for _ in range(8 * 3)]
# Used if there are only pieces.
self.precomp = {}
self.pieces = {}
self.factor = {}
self.factor[chess.WHITE] = [0, 0, 0, 0, 0, 0]
self.factor[chess.BLACK] = [0, 0, 0, 0, 0, 0]
self.norm = {}
self.norm[chess.WHITE] = [0 for _ in range(self.num)]
self.norm[chess.BLACK] = [0 for _ in range(self.num)]
# Used if there are pawns.
self.files = [PawnFileData() for _ in range(4)]
self._next = None
self._flags = None
self.flags = None
split = self.read_ubyte(4) & 0x01
files = 4 if self.read_ubyte(4) & 0x02 else 1
data_ptr = 5
if not self.has_pawns:
self.setup_pieces_piece(data_ptr)
data_ptr += self.num + 1
data_ptr += data_ptr & 0x01
self.precomp[chess.WHITE] = self.setup_pairs(data_ptr, self.tb_size[0], 0, True)
data_ptr = self._next
if split:
self.precomp[chess.BLACK] = self.setup_pairs(data_ptr, self.tb_size[1], 3, True)
data_ptr = self._next
else:
self.precomp[chess.BLACK] = None
self.precomp[chess.WHITE].indextable = data_ptr
data_ptr += self.size[0]
if split:
self.precomp[chess.BLACK].indextable = data_ptr
data_ptr += self.size[3]
self.precomp[chess.WHITE].sizetable = data_ptr
data_ptr += self.size[1]
if split:
self.precomp[chess.BLACK].sizetable = data_ptr
data_ptr += self.size[4]
data_ptr = (data_ptr + 0x3f) & ~0x3f
self.precomp[chess.WHITE].data = data_ptr
data_ptr += self.size[2]
if split:
data_ptr = (data_ptr + 0x3f) & ~0x3f
self.precomp[chess.BLACK].data = data_ptr
else:
s = 1 + int(self.pawns[1] > 0)
for f in range(4):
self.setup_pieces_pawn(data_ptr, 2 * f, f)
data_ptr += self.num + s
data_ptr += data_ptr & 0x01
for f in range(files):
self.files[f].precomp[chess.WHITE] = self.setup_pairs(data_ptr, self.tb_size[2 * f], 6 * f, True)
data_ptr = self._next
if split:
self.files[f].precomp[chess.BLACK] = self.setup_pairs(data_ptr, self.tb_size[2 * f + 1], 6 * f + 3, True)
data_ptr = self._next
else:
self.files[f].precomp[chess.BLACK] = None
for f in range(files):
self.files[f].precomp[chess.WHITE].indextable = data_ptr
data_ptr += self.size[6 * f]
if split:
self.files[f].precomp[chess.BLACK].indextable = data_ptr
data_ptr += self.size[6 * f + 3]
for f in range(files):
self.files[f].precomp[chess.WHITE].sizetable = data_ptr
data_ptr += self.size[6 * f + 1]
if split:
self.files[f].precomp[chess.BLACK].sizetable = data_ptr
data_ptr += self.size[6 * f + 4]
for f in range(files):
data_ptr = (data_ptr + 0x3f) & ~0x3f
self.files[f].precomp[chess.WHITE].data = data_ptr
data_ptr += self.size[6 * f + 2]
if split:
data_ptr = (data_ptr + 0x3f) & ~0x3f
self.files[f].precomp[chess.BLACK].data = data_ptr
data_ptr += self.size[6 * f + 5]
self.initialized = True
def setup_pieces_pawn(self, p_data, p_tb_size, f):
j = 1 + int(self.pawns[chess.BLACK] > 0)
order = self.read_ubyte(p_data) & 0x0f
order2 = self.read_ubyte(p_data + 1) & 0x0f if self.pawns[chess.BLACK] else 0x0f
self.files[f].pieces[chess.WHITE] = [self.read_ubyte(p_data + i + j) & 0x0f for i in range(self.num)]
self.files[f].norm[chess.WHITE] = [0 for _ in range(self.num)]
self.set_norm_pawn(self.files[f].norm[chess.WHITE], self.files[f].pieces[chess.WHITE])
self.files[f].factor[chess.WHITE] = [0, 0, 0, 0, 0, 0]
self.tb_size[p_tb_size] = self.calc_factors_pawn(self.files[f].factor[chess.WHITE], order, order2, self.files[f].norm[chess.WHITE], f)
order = self.read_ubyte(p_data) >> 4
order2 = self.read_ubyte(p_data + 1) >> 4 if self.pawns[1] else 0x0f
self.files[f].pieces[chess.BLACK] = [self.read_ubyte(p_data + i + j) >> 4 for i in range(self.num)]
self.files[f].norm[chess.BLACK] = [0 for _ in range(self.num)]
self.set_norm_pawn(self.files[f].norm[chess.BLACK], self.files[f].pieces[chess.BLACK])
self.files[f].factor[chess.BLACK] = [0, 0, 0, 0, 0, 0]
self.tb_size[p_tb_size + 1] = self.calc_factors_pawn(self.files[f].factor[chess.BLACK], order, order2, self.files[f].norm[chess.BLACK], f)
def setup_pieces_piece(self, p_data):
self.pieces[chess.WHITE] = [self.read_ubyte(p_data + i + 1) & 0x0f for i in range(self.num)]
order = self.read_ubyte(p_data) & 0x0f
self.set_norm_piece(self.norm[chess.WHITE], self.pieces[chess.WHITE])
self.tb_size[chess.WHITE] = self.calc_factors_piece(self.factor[chess.WHITE], order, self.norm[chess.WHITE])
self.pieces[chess.BLACK] = [self.read_ubyte(p_data + i + 1) >> 4 for i in range(self.num)]
order = self.read_ubyte(p_data) >> 4
self.set_norm_piece(self.norm[chess.BLACK], self.pieces[chess.BLACK])
self.tb_size[chess.BLACK] = self.calc_factors_piece(self.factor[chess.BLACK], order, self.norm[chess.BLACK])
def probe_wdl_table(self, board):
self.init_table_wdl()
key = calc_key(board)
if self.symmetric:
cmirror = 0 if board.turn == chess.WHITE else 8
mirror = 0 if board.turn == chess.WHITE else 0x38
bside = 0
else:
if key != self.key:
cmirror = 8
mirror = 0x38
bside = int(board.turn == chess.WHITE)
else:
cmirror = mirror = 0
bside = int(board.turn != chess.WHITE)
if not self.has_pawns:
p = [0, 0, 0, 0, 0, 0]
i = 0
while i < self.num:
piece_type = self.pieces[bside][i] & 0x07
color = (self.pieces[bside][i] ^ cmirror) >> 3
bb = board.pieces_mask(piece_type, color)
square = chess.bit_scan(bb)
while square != -1 and square is not None:
p[i] = square
i += 1
square = chess.bit_scan(bb, square + 1)
idx = self.encode_piece(self.norm[bside], p, self.factor[bside])
res = self.decompress_pairs(self.precomp[bside], idx)
else:
p = [0, 0, 0, 0, 0, 0]
i = 0
k = self.files[0].pieces[0][0] ^ cmirror
color = k >> 3
piece_type = k & 0x07
bb = board.pieces_mask(piece_type, color)
square = chess.bit_scan(bb)
while square != -1 and square is not None:
p[i] = square ^ mirror
i += 1
square = chess.bit_scan(bb, square + 1)
f = self.pawn_file(p)
pc = self.files[f].pieces[bside]
while i < self.num:
color = (pc[i] ^ cmirror) >> 3
piece_type = pc[i] & 0x07
bb = board.pieces_mask(piece_type, color)
square = chess.bit_scan(bb)
while square != -1 and square is not None:
p[i] = square ^ mirror
i += 1
square = chess.bit_scan(bb, square + 1)
idx = self.encode_pawn(self.files[f].norm[bside], p, self.files[f].factor[bside])
res = self.decompress_pairs(self.files[f].precomp[bside], idx)
return res - 2
class DtzTable(Table):
def __init__(self, directory, filename, suffix=".rtbz"):
super(DtzTable, self).__init__(directory, filename, suffix)
self.initialized = False
self.lock = threading.Lock()
def init_table_dtz(self):
if self.initialized:
return
with self.lock:
if self.initialized:
return
assert DTZ_MAGIC[0] == self.read_ubyte(0)
assert DTZ_MAGIC[1] == self.read_ubyte(1)
assert DTZ_MAGIC[2] == self.read_ubyte(2)
assert DTZ_MAGIC[3] == self.read_ubyte(3)
self.factor = [0, 0, 0, 0, 0, 0]
self.norm = [0 for _ in range(self.num)]
self.tb_size = [0, 0, 0, 0]
self.size = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.files = [PawnFileDataDtz() for f in range(4)]
files = 4 if self.read_ubyte(4) & 0x02 else 1
p_data = 5
if not self.has_pawns:
self.map_idx = [0, 0, 0, 0]
self.setup_pieces_piece_dtz(p_data, 0)
p_data += self.num + 1
p_data += p_data & 0x01
self.precomp = self.setup_pairs(p_data, self.tb_size[0], 0, False)
self.flags = self._flags
p_data = self._next
self.p_map = p_data
if self.flags & 2:
for i in range(4):
self.map_idx[i] = p_data + 1 - self.p_map
p_data += 1 + self.read_ubyte(p_data)
p_data += p_data & 0x01
self.precomp.indextable = p_data
p_data += self.size[0]
self.precomp.sizetable = p_data
p_data += self.size[1]
p_data = (p_data + 0x3f) & ~0x3f
self.precomp.data = p_data
p_data += self.size[2]
else:
s = 1 + int(self.pawns[1] > 0)
for f in range(4):
self.setup_pieces_pawn_dtz(p_data, f, f)
p_data += self.num + s
p_data += p_data & 0x01
self.flags = []
for f in range(files):
self.files[f].precomp = self.setup_pairs(p_data, self.tb_size[f], 3 * f, False)
p_data = self._next
self.flags.append(self._flags)
self.map_idx = []
self.p_map = p_data
for f in range(files):
self.map_idx.append([])
if self.flags[f] & 2:
for i in range(4):
self.map_idx[-1].append(p_data + 1 - self.p_map)
p_data += 1 + self.read_ubyte(p_data)
p_data += p_data & 0x01
for f in range(files):
self.files[f].precomp.indextable = p_data
p_data += self.size[3 * f]
for f in range(files):
self.files[f].precomp.sizetable = p_data
p_data += self.size[3 * f + 1]
for f in range(files):
p_data = (p_data + 0x3f) & ~0x3f
self.files[f].precomp.data = p_data
p_data += self.size[3 * f + 2]
self.initialized = True
def probe_dtz_table(self, board, wdl):
self.init_table_dtz()
key = calc_key(board)
if not self.symmetric:
if key != self.key:
cmirror = 8
mirror = 0x38
bside = int(board.turn == chess.WHITE)
else:
cmirror = mirror = 0
bside = int(board.turn != chess.WHITE)
else:
cmirror = 0 if board.turn == chess.WHITE else 8
mirror = 0 if board.turn == chess.WHITE else 0x38
bside = 0
if not self.has_pawns:
if (self.flags & 1) != bside and not self.symmetric:
return 0, -1
pc = self.pieces
p = [0, 0, 0, 0, 0, 0]
i = 0
while i < self.num:
piece_type = pc[i] & 0x07
color = (pc[i] ^ cmirror) >> 3
bb = board.pieces_mask(piece_type, color)
square = chess.bit_scan(bb)
while square != -1 and square is not None:
p[i] = square
i += 1
square = chess.bit_scan(bb, square + 1)
idx = self.encode_piece(self.norm, p, self.factor)
res = self.decompress_pairs(self.precomp, idx)
if self.flags & 2:
res = self.read_ubyte(self.p_map + self.map_idx[WDL_TO_MAP[wdl + 2]] + res)
if (not (self.flags & PA_FLAGS[wdl + 2])) or (wdl & 1):
res *= 2
else:
k = self.files[0].pieces[0] ^ cmirror
piece_type = k & 0x07
color = k >> 3
bb = board.pieces_mask(piece_type, color)
i = 0
p = [0, 0, 0, 0, 0, 0]
square = chess.bit_scan(bb)
while square != -1 and square is not None:
p[i] = square ^ mirror
i += 1
square = chess.bit_scan(bb, square + 1)
f = self.pawn_file(p)
if self.flags[f] & 1 != bside:
return 0, -1
pc = self.files[f].pieces
while i < self.num:
piece_type = pc[i] & 0x07
color = (pc[i] ^ cmirror) >> 3
bb = board.pieces_mask(piece_type, color)
square = chess.bit_scan(bb)
while square != -1 and square is not None:
p[i] = square ^ mirror
i += 1
square = chess.bit_scan(bb, square + 1)
idx = self.encode_pawn(self.files[f].norm, p, self.files[f].factor)
res = self.decompress_pairs(self.files[f].precomp, idx)
if self.flags[f] & 2:
res = self.read_ubyte(self.p_map + self.map_idx[f][WDL_TO_MAP[wdl + 2]] + res)
if (not (self.flags[f] & PA_FLAGS[wdl + 2])) or (wdl & 1):
res *= 2
return res, 1
def setup_pieces_piece_dtz(self, p_data, p_tb_size):
self.pieces = [self.read_ubyte(p_data + i + 1) & 0x0f for i in range(self.num)]
order = self.read_ubyte(p_data) & 0x0f
self.set_norm_piece(self.norm, self.pieces)
self.tb_size[p_tb_size] = self.calc_factors_piece(self.factor, order, self.norm)
def setup_pieces_pawn_dtz(self, p_data, p_tb_size, f):
j = 1 + int(self.pawns[1] > 0)
order = self.read_ubyte(p_data) & 0x0f
order2 = self.read_ubyte(p_data + 1) & 0x0f if self.pawns[1] else 0x0f
self.files[f].pieces = [self.read_ubyte(p_data + i + j) & 0x0f for i in range(self.num)]
self.files[f].norm = [0 for _ in range(self.num)]
self.set_norm_pawn(self.files[f].norm, self.files[f].pieces)
self.files[f].factor = [0, 0, 0, 0, 0, 0]
self.tb_size[p_tb_size] = self.calc_factors_pawn(self.files[f].factor, order, order2, self.files[f].norm, f)
class Tablebases(object):
"""
Manages a collection of tablebase files for probing.
Syzygy tables come in files like *KQvKN.rtbw* or *KRBvK.rtbz*, one WDL
(*.rtbw*) and DTZ (*.rtbz*) file for each material composition.
Directly loads tables from *directory*. See *open_directory*.
"""
def __init__(self, directory=None, load_wdl=True, load_dtz=True):
self.wdl = {}
self.dtz = {}
if directory:
self.open_directory(directory, load_wdl, load_dtz)
def open_directory(self, directory, load_wdl=True, load_dtz=True):
"""
Loads tables from a directory.
By default all available tables with the correct file names
(e.g. *KQvKN.rtbw* or *KRBvK.rtbz*) are loaded.
Returns the number of successfully openened and loaded tablebase files.
"""
num = 0
for filename in filenames():
if load_wdl and os.path.isfile(os.path.join(directory, filename) + ".rtbw"):
wdl_table = WdlTable(directory, filename)
if wdl_table.key in self.wdl:
self.wdl[wdl_table.key].close()
self.wdl[wdl_table.key] = wdl_table
self.wdl[wdl_table.mirrored_key] = wdl_table
num += 1
if load_dtz and os.path.isfile(os.path.join(directory, filename) + ".rtbz"):
dtz_table = DtzTable(directory, filename)
if dtz_table.key in self.dtz:
self.dtz[dtz_table.key].close()
self.dtz[dtz_table.key] = dtz_table
self.dtz[dtz_table.mirrored_key] = dtz_table
num += 1
return num
def probe_wdl_table(self, board):
# Test for KvK.
if board.kings == board.occupied:
return 0
key = calc_key(board)
if key not in self.wdl:
return None
return self.wdl[key].probe_wdl_table(board)
def probe_ab(self, board, alpha, beta):
for move in board.generate_pseudo_legal_moves():
# Only look at non-ep captures.
if not board.piece_type_at(move.to_square):
continue
# Do the move.
board.push(move)
# Only look at legal moves.
if board.was_into_check():
board.pop()
continue
v_plus, success = self.probe_ab(board, -beta, -alpha)
board.pop()
if v_plus is None or not success:
return None, 0
v = -v_plus
if v > alpha:
if v >= beta:
return v, 2
alpha = v
v = self.probe_wdl_table(board)
if v is None:
return None, 0
if alpha >= v:
return alpha, 1 + int(alpha > 0)
else:
return v, 1
def probe_wdl(self, board):
"""
Probes WDL tables for win/draw/loss-information.
Probing is thread-safe when done with different *board* objects and
if *board* objects are not modified during probing.
Returns *None* if the position was not found in any of the loaded
tables.
Returns *2* if the side to move is winning, *0* if the position is
a draw and *-2* if the side to move is losing.
Returns *1* in case of a cursed win and *-1* in case of a blessed
loss. Mate can be forced but the position can be drawn due to the
fifty-move rule.
>>> with chess.syzygy.Tablebases("data/syzygy") as tablebases:
... tablebases.probe_wdl(chess.Board("8/2K5/4B3/3N4/8/8/4k3/8 b - - 0 1"))
...
-2
"""
# Positions with castling rights are not in the tablebase.
if board.castling_rights != chess.CASTLING_NONE:
return None
# Probe.
v, success = self.probe_ab(board, -2, 2)
if v is None or not success:
return None
# If en-passant is not possible, we are done.
if not board.ep_square:
return v
# Now handle en-passant.
v1 = -3
# Look at least at all legal en-passant captures.
for move in board.generate_pseudo_legal_moves(castling=False, pawns=True, knights=False, bishops=False, rooks=False, queens=False, king=False):
# Filter out non-en-passant moves.
diff = abs(move.to_square - move.from_square)
if not ((diff == 7 or diff == 9) and not board.occupied & chess.BB_SQUARES[move.to_square]):
continue
# Do the move.
board.push(move)
# Filter out illegal moves.
if board.was_into_check():
board.pop()
continue
v0_plus, success = self.probe_ab(board, -2, 2)
board.pop()
if v0_plus is None or not success:
return None
v0 = -v0_plus
if v0 > v1:
v1 = v0
if v1 > -3:
if v1 >= v:
v = v1
elif v == 0:
# Check whether there is at least one legal non-ep move.
found_move = False
for move in board.generate_legal_moves():
if board.piece_type_at(move.from_square) != chess.PAWN:
found_move = True
break
diff = abs(move.to_square - move.from_square)
if not ((diff == 7 or diff == 9) and not board.occupied & chess.BB_SQUARES[move.to_square]):
found_move = True
break
# If not, then we are forced to play the losing ep capture.
if not found_move:
v = v1
return v
def probe_dtz_table(self, board, wdl):
key = calc_key(board)
if key not in self.dtz:
return None, 0
return self.dtz[key].probe_dtz_table(board, wdl)
def probe_dtz_no_ep(self, board):
wdl, success = self.probe_ab(board, -2, 2)
if wdl is None or not success:
return None
if wdl == 0:
return 0
if success == 2:
return 1 if wdl == 2 else 101
if wdl > 0:
# Generate all legal non capturing pawn moves.
for move in board.generate_pseudo_legal_moves(castling=False, pawns=True, knights=False, bishops=False, rooks=False, queens=False, king=False):
diff = abs(move.to_square - move.from_square)
if diff == 7 or diff == 9:
continue
board.push(move)
if board.was_into_check():
board.pop()
continue
v_plus, success = self.probe_ab(board, -2, -wdl + 1)
board.pop()
if v_plus is None or not success:
return None
v = -v_plus
if v == wdl:
return 1 if v == 2 else 101
dtz, success = self.probe_dtz_table(board, wdl)
dtz += 1
if success >= 0:
if wdl & 1:
dtz += 100
return dtz if wdl >= 0 else -dtz
if wdl > 0:
best = 0xffff
for move in board.generate_pseudo_legal_moves(pawns=False):
if board.piece_type_at(move.to_square) != chess.NONE:
continue
board.push(move)
if board.was_into_check():
board.pop()
continue
v_plus = self.probe_dtz(board)
board.pop()
if v_plus is None:
return None
v = -v_plus
if v > 0 and v + 1 < best:
best = v + 1
return best
else:
best = -1
for move in board.generate_pseudo_legal_moves():
board.push(move)
if board.was_into_check():
board.pop()
continue
if board.halfmove_clock == 0:
if wdl == -2:
v = -1
else:
v, success = self.probe_ab(board, 1, 2)
if v is None or not success:
board.pop()
return None
v = 0 if v == 2 else -101
else:
v_plus_one = self.probe_dtz(board)
if v_plus_one is None:
board.pop()
return None
v = -v_plus_one - 1
board.pop()
if v < best:
best = v
return best
def probe_dtz(self, board):
"""
Probes DTZ tables for distance to zero information.
Probing is thread-safe when done with different *board* objects and
if *board* objects are not modified during probing.
Return *None* if the position was not found in any of the loaded tables.
Both DTZ and WDL tables are required in order to probe for DTZ values.
Returns a positive value if the side to move is winning, *0* if the
position is a draw and a negative value if the side to move is losing.
A non-zero distance to zero means the number of halfmoves until the
next pawn move or capture can be forced, keeping a won position.
Minmaxing the DTZ values guarantees winning a won position (and drawing
a drawn position), because it makes progress keeping the win in hand.
However the lines are not always the most straight forward ways to win.
Engines like Stockfish calculate themselves, checking with DTZ, but only
play according to DTZ if they can not manage on their own.
>>> with chess.syzygy.Tablebases("data/syzygy") as tablebases:
... tablebases.probe_dtz(chess.Board("8/2K5/4B3/3N4/8/8/4k3/8 b - - 0 1"))
...
-53
"""
v = self.probe_dtz_no_ep(board)
if v is None:
return None
if not board.ep_square:
return v
v1 = -3
for move in board.generate_pseudo_legal_moves(castling=False, pawns=True, knights=False, bishops=False, rooks=False, queens=False, king=False):
# Filter out non-en-passant moves.
diff = abs(move.to_square - move.from_square)
if not ((diff == 7 or diff == 9) and not board.occupied & chess.BB_SQUARES[move.to_square]):
continue
board.push(move)
if board.was_into_check():
board.pop()
continue
v0_plus, success = self.probe_ab(board, -2, 2)
board.pop()
if v0_plus is None or not success:
return None
v0 = -v0_plus
if v0 > v1:
v1 = v0
if v1 > -3:
v1 = WDL_TO_DTZ[v1 + 2]
if v < -100:
if v1 >= 0:
v = v1
elif v < 0:
if v1 >= 0 or v1 < 100:
v = v1
elif v > 100:
if v1 > 0:
v = v1
elif v > 0:
if v1 == 1:
v = v1
elif v1 >= 0:
v = v1
else:
found_move = False
for move in board.generate_legal_moves():
if board.piece_type_at(move.from_square) != chess.PAWN:
found_move = True
break
diff = abs(move.to_square - move.from_square)
if not ((diff == 7 or diff == 9) and not board.occupied & chess.BB_SQUARES[move.to_square]):
found_move = True
break
if not found_move:
v = v1
return v
def close(self):
"""Closes all loaded tables."""
while self.wdl:
_, wdl = self.wdl.popitem()
wdl.close()
while self.dtz:
_, dtz = self.dtz.popitem()
dtz.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| gpl-3.0 | -9,177,441,311,323,495,000 | 31.892748 | 155 | 0.465624 | false |
tatumdmortimer/popgen-stats | siteFrequencySpectra.py | 1 | 3044 | #!/usr/bin/env python
import sys
import getopt
# This script reads in a vcf that has been processed with snpEff.
# Outgroup should be the reference sequence in the vcf.
# The script outputs the synonymous, nonsynonymous, and combined SFS.
def get_arguments(argv):
if len(argv) == 0:
usage()
sys.exit(2)
vcfFile = None
numStrains = None
try:
opts, args = getopt.getopt(argv, "v:n:")
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-v':
vcfFile = arg
elif opt == '-n':
numStrains = int(arg)
return vcfFile, numStrains
def usage():
print "siteFrequencySpectra.py\n \
-v <vcf file>\n \
-n <number of strains (don't include outgroup)>"
def calc_freqs(vcfFile, numStrains):
nonsynonymous = [0]*numStrains
synonymous = [0]*numStrains
intergenic = [0]*numStrains
nonsense = [0]*numStrains
stop_lost = [0]*numStrains
non_biallelic = 0
vcf = open(vcfFile, 'r')
for line in vcf:
if line[0] == "#":
continue
line = line.strip().split()
POS = line[1]
ALT = line[4]
if len(ALT) > 1: # skip positions that aren't biallelic
non_biallelic += 1
continue
INFO = line[7]
outgroup = line[9]
if outgroup != "0":
print "VCF in incorrect format."
print "Outgroup should be reference & first strain in alleles"
sys.exit()
alleles = line[10:]
freq = len(alleles) - alleles.count("0")
if "synonymous" in INFO or "stop_retained" in INFO:
synonymous[freq-1] += 1
elif "missense" in INFO:
nonsynonymous[freq-1] += 1
elif "stop_gained" in INFO:
nonsense[freq-1] += 1
elif "stop_lost" in INFO:
stop_lost[freq-1] += 1
else:
intergenic[freq-1] += 1
vcf.close()
print("{0} SNPs had multiple alternate alleles".format(non_biallelic))
return synonymous, nonsynonymous, nonsense, stop_lost, intergenic
def write_outfile(s, ns, n, sl, ig):
outfile = open("sfs.txt", "w")
outfile.write(
"Frequency\tSynonymous\tNonsynonymous\tNonsense\tStopLost\tIntergenic\tCombined\n")
for i in range(len(s)):
outfile.write("%i\t%i\t%i\t%i\t%i\t%i\t%i\n" % (i+1,
s[i],
ns[i],
n[i],
sl[i],
ig[i],
s[i] + ns[i] + n[i] + ig[i]))
vcfFile, numStrains = get_arguments(sys.argv[1:])
if vcfFile is None or numStrains is None:
usage()
sys.exit()
synonymous, nonsynonymous, nonsense, stop_lost, intergenic = calc_freqs(vcfFile, numStrains)
write_outfile(synonymous, nonsynonymous, nonsense, stop_lost, intergenic)
| mit | 2,309,766,632,523,727,400 | 30.381443 | 92 | 0.532194 | false |
moralrecordings/mrcrowbar | mrcrowbar/transforms.py | 1 | 1107 | """Definition classes for transformations."""
import collections
import logging
logger = logging.getLogger( __name__ )
TransformResult = collections.namedtuple( 'TransformResult', ['payload', 'end_offset'] )
TransformResult.__new__.__defaults__ = (b'', 0)
class Transform( object ):
"""Base class for defining transformations."""
# pylint: disable=unused-argument,no-self-use
def export_data( self, buffer, parent=None ):
"""Perform a transform on a byte string.
buffer
Source byte string.
parent
Parent object of the source (to provide context for Refs).
"""
logger.warning( '{}: export_data not implemented!'.format( self ) )
return TransformResult()
def import_data( self, buffer, parent=None ):
"""Perform a reverse-transform on a byte string.
buffer
Source byte string.
parent
Parent object of the source (to provide context for Refs).
"""
logger.warning( '{}: import_data not implemented!'.format( self ) )
return TransformResult()
| bsd-3-clause | 8,728,127,484,805,005,000 | 29.75 | 88 | 0.62692 | false |
dparks1134/STAMP | stamp/plugins/multiGroups/postHoc/TukeyKramer.py | 1 | 3302 | #=======================================================================
# Author: Donovan Parks
#
# Perform Tukey-Kramer post-hoc test.
#
# Copyright 2011 Donovan Parks
#
# This file is part of STAMP.
#
# STAMP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# STAMP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with STAMP. If not, see <http://www.gnu.org/licenses/>.
#=======================================================================
import math
from stamp.plugins.multiGroups.AbstractPostHocTestPlugin import AbstractPostHocTestPlugin
from stamp.metagenomics.stats.distributions.QTable import QTable
from numpy import var, mean
class TukeyKramer(AbstractPostHocTestPlugin):
'''
Perform Tukey-Kramer post-hoc test.
'''
def __init__(self, preferences):
AbstractPostHocTestPlugin.__init__(self, preferences)
self.name = 'Tukey-Kramer'
self.qtable = QTable(preferences)
def run(self, data, coverage, groupNames):
note = ''
# calculate critical value
N = 0
for i in xrange(0, len(data)):
N += len(data[i])
k = len(data)
dfD = N - len(data)
q_cv = self.qtable.cv(1.0-coverage, k, dfD)
cv001 = self.qtable.cv(0.001, k, dfD)
cv01 = self.qtable.cv(0.01, k, dfD)
cv02 = self.qtable.cv(0.02, k, dfD)
cv05 = self.qtable.cv(0.05, k, dfD)
cv1 = self.qtable.cv(0.1, k, dfD)
# calculate mean of each group
groupMean = []
for i in xrange(0, len(data)):
groupMean.append(mean(data[i]))
# calculate within group variance
withinGroupVar = 0.0
for i in xrange(0, len(data)):
withinGroupVar += (len(data[i])-1)*var(data[i], ddof=1)
withinGroupVar /= dfD
withinGroupStdDev = math.sqrt(withinGroupVar)
if withinGroupStdDev == 0:
note = 'degenerate case: within group variance is zero; set to 1e-6.'
withinGroupStdDev = 1e-6
# calculate Fs, effect size, and CI for each pair of groups
pValues = []
effectSize = []
lowerCI = []
upperCI = []
labels = []
for i in xrange(0, len(data)):
for j in xrange(i+1, len(data)):
sqrtInvSampleSize = math.sqrt( (1.0/len(data[i]) + 1.0/len(data[j])) / 2.0 )
# effect size
es = groupMean[i] - groupMean[j]
effectSize.append(es)
# p-value
qs = abs(es) / (withinGroupStdDev*sqrtInvSampleSize)
if qs > cv001:
pValue = '< 0.001'
elif qs > cv01:
pValue = '< 0.01'
elif qs > cv02:
pValue = '< 0.02'
elif qs > cv05:
pValue = '< 0.05'
elif qs > cv1:
pValue = '< 0.1'
else:
pValue = '>= 0.1'
pValues.append(pValue)
# confidence interval
confInter = q_cv * withinGroupStdDev * sqrtInvSampleSize
lowerCI.append(es - confInter)
upperCI.append(es + confInter)
labels.append(groupNames[i] + ' : ' + groupNames[j])
return pValues, effectSize, lowerCI, upperCI, labels, note
if __name__ == "__main__":
pass
| gpl-3.0 | -146,486,947,151,380,000 | 26.983051 | 89 | 0.632647 | false |
reinaldomaslim/Singaboat_RobotX2016 | robotx_nav/nodes/mc_deliver.py | 1 | 8296 | #!/usr/bin/env python
""" Mission 7-Detect and Deliver
1. Random walk with gaussian at center of map until station position is acquired
2. loiter around until correct face seen
3. if symbol seen, move towards symbol perpendicularly
4. if close enough, do move_base aiming
task 7:
-----------------
Created by Reinaldo@ 2016-12-07
Authors: Reinaldo
-----------------
"""
import rospy
import multiprocessing as mp
import math
import time
import numpy as np
import os
import tf
import random
from sklearn.cluster import KMeans
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose, Quaternion
from visualization_msgs.msg import MarkerArray, Marker
from move_base_forward import Forward
from move_base_waypoint import MoveTo
from move_base_loiter import Loiter
from move_base_stationkeeping import StationKeeping
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from std_msgs.msg import Int8
class DetectDeliver(object):
map_dim = [[0, 40], [0, 40]]
MAX_DATA=5
x0, y0, yaw0= 0, 0, 0
symbol=[0 , 0]
symbols=np.zeros((MAX_DATA, 2)) #unordered list
symbols_counter=0
angle_threshold=10*math.pi/180
symbol_location=np.zeros((MAX_DATA, 2))
shape_counter=0
distance_to_box=2
def __init__(self, symbol_list):
print("starting task 7")
rospy.init_node('task_7', anonymous=True)
self.symbol=symbol_list
self.symbol_visited=0
self.symbol_seen=False
self.symbol_position=[0, 0, 0]
self.station_seen=False #station here is cluster center of any face
self.station_position=[0, 0]
self.loiter_obj = Loiter("loiter", is_newnode=False, target=None, radius=5, polygon=4, mode=1, mode_param=1, is_relative=False)
self.moveto_obj = MoveTo("moveto", is_newnode=False, target=None, is_relative=False)
self.stationkeep_obj = StationKeeping("station_keeping", is_newnode=False, target=None, radius=2, duration=30)
rospy.Subscriber("/filtered_marker_array", MarkerArray, self.symbol_callback, queue_size = 50)
#rospy.Subscriber("/shoot", MarkerArray, self.symbol_callback, queue_size = 50)
rospy.Subscriber("/finished_search_and_shoot", Int8, self.stop_shoot_callback, queue_size = 5)
self.shooting_pub= rospy.Publisher('/start_search_and_shoot', Int8, queue_size=5)
self.base_frame = rospy.get_param("~base_frame", "base_link")
self.fixed_frame = rospy.get_param("~fixed_frame", "map")
# tf_listener
self.tf_listener = tf.TransformListener()
self.odom_received = False
rospy.wait_for_message("/odometry/filtered/global", Odometry)
rospy.Subscriber("/odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
while not self.odom_received:
rospy.sleep(1)
print("odom received")
print(self.symbol)
d=3
while not rospy.is_shutdown() and not self.station_seen:
target=[self.x0+d*math.cos(self.yaw0), self.y0+d*math.sin(self.yaw0), self.yaw0]
self.moveto_obj.respawn(target, )#forward
print("station: ")
print(self.station_position)
#aiming to the box
self.shooting_complete=False
self.is_aiming=False
#loiter around station until symbol's face seen
while not rospy.is_shutdown():
theta=math.atan2(self.station_position[1]-self.y0, self.station_position[0]-self.x0)
target=[self.station_position[0], self.station_position[1], theta]
self.move_to_goal(target, )
if self.distance_from_boat(target)<6:
self.shooting_pub.publish(1)
break
loiter_radius=math.sqrt((self.x0-self.station_position[0])**2+(self.y0-self.station_position[1])**2)
if loiter_radius>5:
loiter_radius=3
while not rospy.is_shutdown():
print(loiter_radius)
self.loiter_obj.respawn(self.station_position, 4, loiter_radius, )
self.shooting_pub.publish(1)
if loiter_radius>3:
loiter_radius-=1
if self.symbol_seen:
print(self.symbol_position)
print("symbol's position acquired, exit loitering")
break
if self.shooting_complete:
print("shooting done, return to base")
break
time.sleep(1)
print(self.symbol_position)
d=math.sqrt((self.x0-self.symbol_position[0])**2+(self.y0-self.symbol_position[1])**2)
counter=0
print(d)
#moveto an offset, replan in the way
while not rospy.is_shutdown():
self.shooting_pub.publish(1)
alpha=self.yaw0-self.symbol_position[2]
theta=math.atan2(math.fabs(math.sin(alpha)), math.fabs(math.cos(alpha))) #always +ve and 0-pi/2
d=math.sqrt((self.x0-self.symbol_position[0])**2+(self.y0-self.symbol_position[1])**2)
perpendicular_d=0.6*d*math.cos(theta)
if counter ==0 or theta>self.angle_threshold or d>self.distance_to_box:
print("replan")
target=[self.symbol_position[0]+perpendicular_d*math.cos(self.symbol_position[2]),self.symbol_position[1]+perpendicular_d*math.sin(self.symbol_position[2]), -self.symbol_position[2]]
self.moveto_obj.respawn(target, )
counter+=1
if d<self.distance_to_box:
break
time.sleep(1)
if self.shooting_complete:
print("shooting done, return to base")
break
station=[self.x0, self.y0, -self.symbol_position[2]]
radius=2
duration=30
print(self.symbol_position)
print(station)
while not rospy.is_shutdown():
self.shooting_pub.publish(1)
#duration 0 is forever
if not self.is_aiming:
self.stationkeep_obj.respawn(station, radius, duration)
#make aiming respawn
if self.shooting_complete:
print("shooting done, return to base")
break
time.sleep(1)
def distance_from_boat(self, target):
return math.sqrt((target[0]-self.x0)**2+(target[1]-self.y0)**2)
def move_to_goal(self, goal):
print("move to point")
one_third_goal=[2*self.x0/3+goal[0]/3, 2*self.y0/3+goal[1]/3, math.atan2(goal[1]-self.y0, goal[0]-self.x0)]
print(one_third_goal)
self.moveto_obj.respawn(one_third_goal, )
def stop_shoot_callback(self, msg):
if msg.data==1:
#stop aiming station
self.shooting_complete=True
def symbol_callback(self, msg):
if len(msg.markers)>0:
if self.symbols_counter>self.MAX_DATA:
station_kmeans = KMeans(n_clusters=1).fit(self.symbols)
self.station_center=station_kmeans.cluster_centers_
self.station_position[0]=self.station_center[0][0]
self.station_position[1]=self.station_center[0][1]
self.station_seen=True
for i in range(len(msg.markers)):
self.symbols[self.symbols_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.symbols_counter+=1
if msg.markers[i].type==self.symbol[0] and msg.markers[i].id==self.symbol[1]:
#set position_list (not sure)
self.symbol_position[0]=msg.markers[i].pose.position.x
self.symbol_position[1]=msg.markers[i].pose.position.y
x = msg.markers[i].pose.orientation.x
y = msg.markers[i].pose.orientation.y
z = msg.markers[i].pose.orientation.z
w = msg.markers[i].pose.orientation.w
_, _, self.symbol_position[2] = euler_from_quaternion((x, y, z, w))
self.symbol_location[self.shape_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.shape_counter+=1
if self.station_seen and self.shape_counter>self.MAX_DATA:
symbol_kmeans = KMeans(n_clusters=1).fit(self.symbol_location)
self.symbol_center=symbol_kmeans.cluster_centers_
self.symbol_position[0]=self.symbol_center[0][0]
self.symbol_position[1]=self.symbol_center[0][1]
#print(self.symbol_position)
self.symbol_seen=True
#self.pool.apply(cancel_loiter)
def get_tf(self, fixed_frame, base_frame):
""" transform from base_link to map """
trans_received = False
while not trans_received:
try:
(trans, rot) = self.tf_listener.lookupTransform(fixed_frame,
base_frame,
rospy.Time(0))
trans_received = True
return (Point(*trans), Quaternion(*rot))
except (tf.LookupException,
tf.ConnectivityException,
tf.ExtrapolationException):
pass
def odom_callback(self, msg):
trans, rot = self.get_tf("map", "base_link")
self.x0 = trans.x
self.y0 = trans.y
_, _, self.yaw0 = euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
self.odom_received = True
if __name__ == '__main__':
try:
#[id,type]cruciform red
DetectDeliver([1,0])
except rospy.ROSInterruptException:
rospy.loginfo("Task 7 Finished")
| gpl-3.0 | -5,319,351,150,915,771,000 | 29.955224 | 186 | 0.699253 | false |
stevec7/gpfs | contrib/mmpmon/gpfs_mmpmon.py | 1 | 5992 | #!/usr/bin/env python
import argparse
import datetime
import matplotlib
import matplotlib.pyplot as pyp
import os
import subprocess
import sys
from collections import defaultdict
def yodict():
return defaultdict(yodict)
def main(args):
# from cmdline args
inputfile = args.inputfile
if args.hostfile:
try:
h = open(args.hostfile, 'r')
except IOError as ioe:
print "Error opening hosts file: {0}".format(ioe)
sys.exit(1)
hostfilter = [x.strip('\n') for x in h.readlines()]
else:
hostfilter = [] # blank
# figure out which day of the week it is to pass the snapshot name
#dow = days[datetime.datetime.today().weekday()] # total clown
data = yodict()
mmpmon = yodict()
_INTERVAL = args.interval # seconds
_DATA_FIELDS = ['_br_', '_bw_', '_oc_', '_cc_', '_rdc_',
'_wc_', '_dir_', '_iu_']
if args.starttime:
bucket_start = int(args.starttime)
else:
# get timestamp from beginning of file (messy, I know)
with open(inputfile, 'r') as g:
first_line = g.readline()
bucket_start = int(first_line.split()[10])
ticker = 1
current_bucket = 0
num_hosts = len(hostfilter)
try:
with open(inputfile, 'r') as f:
for line in f:
fields = line.split()
if fields[0] != '_io_s_':
continue
elif fields[4] not in hostfilter:
continue
# create temporary dictionary
cdata = dict(zip(fields[1::2], fields[2::2])) # "current data"
host = cdata['_nn_']
t = int(cdata['_t_'])
# compute the buckets
#current_bucket = (t - bucket_start) / _INTERVAL
previous_bucket = current_bucket - 1
# create a filtered dictionary of attributes we want to store
cdc = dict((k,int(v)) for k, v in cdata.iteritems() if k in _DATA_FIELDS)
# first entry for every host in data defaultdict
if current_bucket == 0:
data[current_bucket][host] = cdc
mmpmon[current_bucket][host] = cdc
else:
try:
prev = data[previous_bucket][host]
#print current_bucket, line
#print cdc
#print prev
delta = dict((k,int(cdc[k]) - int(prev[k])) for k in cdc)
data[current_bucket][host] = cdc
# now set the data in the mmpmon_d dictionary
mmpmon[current_bucket][host] = delta
except TypeError as te:
continue
# properly enumarate the bucket numbers
ticker += 1
if ticker > num_hosts:
ticker = 1
current_bucket += 1
#from IPython import embed; embed()
except IOError as ioe:
print "Error opening input file: {0}".format(ioe)
sys.exit(1)
if args.topng:
# make a tuple of two lists, x and y axises
br = ([], [])
bw = ([], [])
tbr = ([], [])
tbw = ([], [])
for k, v in mmpmon.iteritems():
total_br = 0
total_bw = 0
for node in sorted(v):
br[0].append(k)
bw[0].append(k)
br[1].append(float(v[node]['_br_'])/(1048576))
bw[1].append(float(v[node]['_bw_'])/(1048576))
total_br += v[node]['_br_'] / 1048576
total_bw += v[node]['_bw_'] / 1048576
tbr[0].append(k)
tbr[1].append(total_br)
tbw[0].append(k)
tbw[1].append(total_bw)
# draw it up (2 plots, one with totals, one with ALL vals)
pyp.plot(br[0], br[1])
pyp.plot(bw[0], bw[1])
pyp.xlabel('Interval buckets ({0} secs)'.format(_INTERVAL))
pyp.ylabel('MB/s')
pyp.legend(['R', 'W'], loc='upper left')
# save the first figure
pyp.savefig(args.topng + ".png")
pyp.plot(tbr[0], tbr[1])
pyp.plot(tbw[0], tbw[1])
pyp.xlabel('Interval buckets ({0} secs)'.format(_INTERVAL))
pyp.ylabel('MB/s')
pyp.legend(['tR', 'tW'], loc='upper left')
pyp.savefig(args.topng + "_total.png")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-H','--hostfile',
dest='hostfile',
required=False,
help='*file* is a list of hosts (1 per line) \
of the hosts you\'d like to capture. All other hosts \
are filtered out.')
parser.add_argument('-f', '--file',
dest='inputfile',
required=True,
help='path to input file containing an mmpmon trace.')
parser.add_argument('-i', '--interval',
dest='interval',
required=True,
type=int,
help='interval in which mmpmon data was collected')
parser.add_argument('-e','--end',
dest='endtime',
required=False,
help='Dont collect data after YYYY-MM-DD_HH:MM:SS')
parser.add_argument('-s','--start',
dest='starttime',
required=False,
help='Dont collect data before YYYY-MM-DD_HH:MM:SS')
parser.add_argument('--topng',
dest='topng',
required=False,
help='write plot to a png')
args = parser.parse_args()
main(args)
| mit | -3,272,287,698,081,385,000 | 31.565217 | 89 | 0.47213 | false |
stackforge/monasca-notification | tests/test_notification_processor.py | 1 | 5458 | # (C) Copyright 2014-2016 Hewlett Packard Enterprise Development LP
# Copyright 2017 Fujitsu LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests NotificationProcessor"""
import mock
import time
from monasca_notification import notification as m_notification
from monasca_notification.plugins import email_notifier
from monasca_notification.processors import notification_processor as np
from tests import base
class smtpStub(object):
def __init__(self, log_queue):
self.queue = log_queue
def sendmail(self, from_addr, to_addr, msg):
self.queue.put("%s %s %s" % (from_addr, to_addr, msg))
class requestsResponse(object):
def __init__(self, status):
self.status_code = status
class TestNotificationProcessor(base.BaseTestCase):
def setUp(self):
super(TestNotificationProcessor, self).setUp()
self.trap = []
email_notifier.register_opts(base.config.CONF)
self.conf_default(group='email_notifier', server='my.smtp.server',
port=25, user=None, password=None,
timeout=60, from_addr='[email protected]')
self.conf_default(group='mysql', ssl=None, host='localhost',
port='3306', user='mysql_user', db='dbname',
passwd='mysql_passwd')
self.conf_default(group='statsd', host='localhost', port=8125)
self.conf_default(group='notification_types', enabled=[])
# ------------------------------------------------------------------------
# Test helper functions
# ------------------------------------------------------------------------
@mock.patch('pymysql.connect')
@mock.patch('monasca_notification.common.utils.monascastatsd')
@mock.patch('monasca_notification.plugins.email_notifier.smtplib')
@mock.patch('monasca_notification.processors.notification_processor.notifiers.log')
def _start_processor(self, notifications, mock_log, mock_smtp, mock_statsd, mock_pymsql):
"""Start the processor with the proper mocks
"""
# Since the log runs in another thread I can mock it directly,
# instead change the methods to put to a queue
mock_log.warn = self.trap.append
mock_log.error = self.trap.append
mock_smtp.SMTP = self._smtpStub
np.NotificationProcessor.insert_configured_plugins = mock.Mock()
processor = np.NotificationProcessor()
processor.send(notifications)
def _smtpStub(self, *arg, **kwargs):
return smtpStub(self.trap)
def email_setup(self, metric):
alarm_dict = {"tenantId": "0",
"alarmId": "0",
"alarmName": "test Alarm",
"oldState": "OK",
"newState": "ALARM",
"severity": "LOW",
"link": "some-link",
"lifecycleState": "OPEN",
"stateChangeReason": "I am alarming!",
"timestamp": time.time(),
"metrics": metric}
notification = m_notification.Notification(
0, 'email', 'email notification', '[email protected]', 0, 0, alarm_dict)
self._start_processor([notification])
# ------------------------------------------------------------------------
# Unit tests
# ------------------------------------------------------------------------
def test_invalid_notification(self):
"""Verify invalid notification type is rejected.
"""
alarm_dict = {
"tenantId": "0",
"alarmId": "0",
"alarmName": "test Alarm",
"oldState": "OK",
"newState": "ALARM",
"stateChangeReason": "I am alarming!",
"timestamp": time.time(),
"metrics": "cpu_util",
"severity": "LOW",
"link": "http://some-place.com",
"lifecycleState": "OPEN"}
invalid_notification = m_notification.Notification(0, 'invalid', 'test notification',
'[email protected]', 0, 0, alarm_dict)
self._start_processor([invalid_notification])
self.assertIn('attempting to send unconfigured notification: invalid', self.trap)
def test_email_notification_single_host(self):
"""Email with single host
"""
metrics = []
metric_data = {'dimensions': {'hostname': 'foo1', 'service': 'bar1'}}
metrics.append(metric_data)
self.email_setup(metrics)
for msg in self.trap:
if "From: [email protected]" in msg:
self.assertRegex(msg, "From: [email protected]")
self.assertRegex(msg, "To: [email protected]")
self.assertRegex(msg, "Content-Type: text/plain")
self.assertRegex(msg, "Alarm .test Alarm.")
self.assertRegex(msg, "On host .foo1.")
| apache-2.0 | 2,516,695,493,837,505,000 | 37.167832 | 93 | 0.564859 | false |
erdc/proteus | proteus/mprans/MCorr.py | 1 | 80203 | from __future__ import division
from builtins import zip
from builtins import range
from past.utils import old_div
import proteus
from proteus import cfemIntegrals, Norms, Quadrature
from proteus.Comm import globalSum
from proteus.mprans.cMCorr import *
import numpy as np
from proteus.Transport import OneLevelTransport, logEvent, memory, fabs
from proteus.Transport import TC_base, l2Norm, NonlinearEquation
from . import cArgumentsDict
class Coefficients(proteus.TransportCoefficients.TC_base):
from proteus.ctransportCoefficients import levelSetConservationCoefficientsEvaluate
from proteus.ctransportCoefficients import levelSetConservationCoefficientsEvaluate_sd
def __init__(self,
applyCorrection=True,
epsFactHeaviside=0.0,
epsFactDirac=1.0,
epsFactDiffusion=2.0,
LSModel_index=3,
V_model=2,
me_model=5,
VOFModel_index=4,
checkMass=True,
sd=True,
nd=None,
applyCorrectionToDOF=True,
useMetrics=0.0,
useConstantH=False,
# mql. For edge based stabilization methods
useQuadraticRegularization=False,
edgeBasedStabilizationMethods=False,
nullSpace='NoNullSpace',
useExact=False,
initialize=True):
self.useExact=useExact
self.useQuadraticRegularization = useQuadraticRegularization
self.edgeBasedStabilizationMethods = edgeBasedStabilizationMethods
self.useConstantH = useConstantH
self.useMetrics = useMetrics
self.sd = sd
self.nd = nd
self.checkMass = checkMass
self.variableNames = ['phiCorr']
self.useQuadraticRegularization = useQuadraticRegularization
self.edgeBasedStabilizationMethods = edgeBasedStabilizationMethods
self.levelSetModelIndex = LSModel_index
self.flowModelIndex = V_model
self.epsFactHeaviside = epsFactHeaviside
self.epsFactDirac = epsFactDirac
self.epsFactDiffusion = epsFactDiffusion
self.me_model = me_model
self.VOFModelIndex = VOFModel_index
self.useC = True
self.applyCorrection = applyCorrection
if self.applyCorrection:
self.applyCorrectionToDOF = applyCorrectionToDOF
self.massConservationError = 0.0
self.nullSpace = nullSpace
if initialize:
self.initialize()
def initialize(self):
if not self.applyCorrection:
self.applyCorrectionToDOF = False
#
nc = 1
mass = {}
advection = {}
hamiltonian = {}
diffusion = {0: {0: {0: 'constant'}}}
potential = {0: {0: 'u'}}
reaction = {0: {0: 'nonlinear'}}
# reaction={}
nd = self.nd
if self.sd:
assert nd is not None, "You must set the number of dimensions to use sparse diffusion in LevelSetConservationCoefficients"
sdInfo = {(0, 0): (np.arange(start=0, stop=nd + 1, step=1, dtype='i'),
np.arange(start=0, stop=nd, step=1, dtype='i'))}
else:
sdInfo = {}
TC_base.__init__(self,
nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
self.variableNames,
sparseDiffusionTensors=sdInfo,
useSparseDiffusion=self.sd)
def initializeMesh(self, mesh):
self.h = mesh.h
self.epsHeaviside = self.epsFactHeaviside * mesh.h
self.epsDirac = self.epsFactDirac * mesh.h
self.epsDiffusion = (self.epsFactDiffusion * mesh.h *
(mesh.h if self.useQuadraticRegularization == True else 1.))
def attachModels(self, modelList):
import copy
logEvent("Attaching models in LevelSetConservation")
# level set
self.lsModel = modelList[self.levelSetModelIndex]
self.q_u_ls = modelList[self.levelSetModelIndex].q[('u', 0)]
self.q_n_ls = modelList[self.levelSetModelIndex].q[('grad(u)', 0)]
self.ebqe_u_ls = modelList[self.levelSetModelIndex].ebqe[('u', 0)]
self.ebqe_n_ls = modelList[self.levelSetModelIndex].ebqe[('grad(u)', 0)]
if ('u', 0) in modelList[self.levelSetModelIndex].ebq:
self.ebq_u_ls = modelList[self.levelSetModelIndex].ebq[('u', 0)]
else:
self.ebq_u_ls = None
# volume of fluid
self.vofModel = modelList[self.VOFModelIndex]
self.q_H_vof = modelList[self.VOFModelIndex].q[('u', 0)]
self.q_porosity = modelList[self.VOFModelIndex].coefficients.q_porosity
self.ebqe_H_vof = modelList[self.VOFModelIndex].ebqe[('u', 0)]
if ('u', 0) in modelList[self.VOFModelIndex].ebq:
self.ebq_H_vof = modelList[self.VOFModelIndex].ebq[('u', 0)]
else:
self.ebq_H_vof = None
# correction
self.massCorrModel = modelList[self.me_model]
self.massCorrModel.setMassQuadrature()
self.vofModel.q[('m_last', 0)][:] = self.q_porosity*self.q_H_vof
if self.checkMass:
self.m_tmp = copy.deepcopy(self.massCorrModel.q[('r', 0)])
if self.checkMass:
# self.vofGlobalMass = Norms.scalarDomainIntegral(self.vofModel.q['dV'],
# self.vofModel.q[('u',0)],
# self.massCorrModel.mesh.nElements_owned)
# self.lsGlobalMass = Norms.scalarHeavisideDomainIntegral(self.vofModel.q['dV'],
# self.lsModel.q[('u',0)],
# self.massCorrModel.mesh.nElements_owned)
#self.vofGlobalMass = 0.0
#self.lsGlobalMass = self.massCorrModel.calculateMass(self.lsModel.q[('u',0)])
# logEvent("Attach Models MCorr: mass correction %21.16e" % (Norms.scalarDomainIntegral(self.vofModel.q['dV'],
# self.massCorrModel.q[('r',0)],
# self.massCorrModel.mesh.nElements_owned),),level=2)
self.fluxGlobal = 0.0
self.totalFluxGlobal = 0.0
self.vofGlobalMassArray = [] # self.vofGlobalMass]
self.lsGlobalMassArray = [] # self.lsGlobalMass]
self.vofGlobalMassErrorArray = [] # self.vofGlobalMass - self.vofGlobalMassArray[0]]# + self.vofModel.timeIntegration.dt*self.vofModel.coefficients.fluxIntegral]
self.lsGlobalMassErrorArray = [] # self.lsGlobalMass - self.lsGlobalMassArray[0]]# + self.vofModel.timeIntegration.dt*self.vofModel.coefficients.fluxIntegral]
self.fluxArray = [] # 0.0]#self.vofModel.coefficients.fluxIntegral]
self.timeArray = [] # self.vofModel.timeIntegration.t]
#logEvent("Attach Models MCorr: Phase 0 mass after mass correction (VOF) %21.16e" % (self.vofGlobalMass,),level=2)
#logEvent("Attach Models MCorr: Phase 0 mass after mass correction (LS) %21.16e" % (self.lsGlobalMass,),level=2)
#logEvent("Attach Models MCorr: Phase 0 mass conservation (VOF) after step = %21.16e" % (self.vofGlobalMass - self.vofModel.coefficients.m_pre + self.vofModel.timeIntegration.dt*self.vofModel.coefficients.fluxIntegral,),level=2)
#logEvent("Attach Models MCorr: Phase 0 mass conservation (LS) after step = %21.16e" % (self.lsGlobalMass - self.lsModel.coefficients.m_pre + self.vofModel.timeIntegration.dt*self.vofModel.coefficients.fluxIntegral,),level=2)
def initializeElementQuadrature(self, t, cq):
if self.sd and ('a', 0, 0) in cq:
cq[('a', 0, 0)].fill(self.epsDiffusion)
def initializeElementBoundaryQuadrature(self, t, cebq, cebq_global):
if self.sd and ('a', 0, 0) in cebq:
cebq[('a', 0, 0)].fill(self.epsDiffusion)
def initializeGlobalExteriorElementBoundaryQuadrature(self, t, cebqe):
if self.sd and ('a', 0, 0) in cebqe:
cebqe[('a', 0, 0)].fill(self.epsDiffusion)
def preStep(self, t, firstStep=False):
if self.checkMass:
logEvent("Phase 0 mass before mass correction (VOF) %21.16e" % (Norms.scalarDomainIntegral(self.vofModel.q['dV'],
self.vofModel.q[('m', 0)],
self.massCorrModel.mesh.nElements_owned),), level=2)
logEvent("Phase 0 mass (primitive) before mass correction (LS) %21.16e" % (Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFactHeaviside,
self.massCorrModel.elementDiameter,
self.q_porosity*self.vofModel.q['dV'],
self.lsModel.q[('m', 0)],
self.massCorrModel.mesh.nElements_owned),), level=2)
logEvent("Phase 0 mass (consistent) before mass correction (LS) %21.16e" % (self.massCorrModel.calculateMass(self.lsModel.q[('m', 0)]),), level=2)
copyInstructions = {'clear_uList': True}
return copyInstructions
def postStep(self, t, firstStep=False):
if self.applyCorrection:
# ls
self.lsModel.u[0].dof += self.massCorrModel.u[0].dof
self.lsModel.q[('u', 0)] += self.massCorrModel.q[('u', 0)]
self.lsModel.ebqe[('u', 0)] += self.massCorrModel.ebqe[('u', 0)]
self.lsModel.q[('grad(u)', 0)] += self.massCorrModel.q[('grad(u)', 0)]
self.lsModel.ebqe[('grad(u)', 0)] += self.massCorrModel.ebqe[('grad(u)', 0)]
# vof
if self.edgeBasedStabilizationMethods == False:
self.massCorrModel.setMassQuadrature()
self.vofModel.q[('m_tmp',0)][:] = self.vofModel.coefficients.q_porosity*self.vofModel.q[('u',0)]
# else setMassQuadratureEdgeBasedStabilizationMethods is called within specialized nolinear solver
#self.vofModel.q[('u',0)] += self.massCorrModel.q[('r',0)]
# print "********************max VOF************************",max(self.vofModel.q[('u',0)].flat[:])
if self.checkMass:
logEvent("Phase 0 mass after mass correction (VOF) %21.16e" % (Norms.scalarDomainIntegral(self.vofModel.q['dV'],
self.vofModel.q[('m', 0)],
self.massCorrModel.mesh.nElements_owned),), level=2)
logEvent("Phase 0 mass (primitive) after mass correction (LS) %21.16e" % (Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFactHeaviside,
self.massCorrModel.elementDiameter,
self.q_porosity*self.vofModel.q['dV'],
self.lsModel.q[('m', 0)],
self.massCorrModel.mesh.nElements_owned),), level=2)
logEvent("Phase 0 mass (consistent) after mass correction (LS) %21.16e" % (self.massCorrModel.calculateMass(self.lsModel.q[('m', 0)]),), level=2)
copyInstructions = {}
# get the waterline on the obstacle if option set in NCLS (boundary==7)
self.lsModel.computeWaterline(t)
return copyInstructions
def postAdaptStep(self):
if self.applyCorrection:
# ls
self.lsModel.ebqe[('grad(u)', 0)][:] = self.massCorrModel.ebqe[('grad(u)', 0)]
def evaluate(self, t, c):
import math
if c[('u', 0)].shape == self.q_u_ls.shape:
u_ls = self.q_u_ls
H_vof = self.q_H_vof
elif c[('u', 0)].shape == self.ebqe_u_ls.shape:
u_ls = self.ebqe_u_ls
H_vof = self.ebqe_H_vof
elif self.ebq_u_ls is not None and c[('u', 0)].shape == self.ebq_u_ls.shape:
u_ls = self.ebq_u_ls
H_vof = self.ebq_H_vof
else:
#\todo trap errors in TransportCoefficients.py
u_ls = None
H_vof = None
if u_ls is not None and H_vof is not None:
if self.useC:
if self.sd:
self.levelSetConservationCoefficientsEvaluate_sd(self.epsHeaviside,
self.epsDirac,
u_ls,
H_vof,
c[('u', 0)],
c[('r', 0)],
c[('dr', 0, 0)])
else:
self.levelSetConservationCoefficientsEvaluate(self.epsHeaviside,
self.epsDirac,
self.epsDiffusion,
u_ls,
H_vof,
c[('u', 0)],
c[('r', 0)],
c[('dr', 0, 0)],
c[('a', 0, 0)])
if (self.checkMass and c[('u', 0)].shape == self.q_u_ls.shape):
self.m_tmp[:] = H_vof
self.m_tmp += self.massCorrModel.q[('r', 0)]
logEvent("mass correction during Newton %21.16e" % (Norms.scalarDomainIntegral(self.vofModel.q['dV'],
self.massCorrModel.q[('r', 0)],
self.massCorrModel.mesh.nElements_owned),), level=2)
logEvent("Phase 0 mass during Newton %21.16e" % (Norms.scalarDomainIntegral(self.vofModel.q['dV'],
self.m_tmp,
self.massCorrModel.mesh.nElements_owned),), level=2)
class LevelModel(proteus.Transport.OneLevelTransport):
nCalls = 0
def __init__(self,
uDict,
phiDict,
testSpaceDict,
matType,
dofBoundaryConditionsDict,
dofBoundaryConditionsSetterDict,
coefficients,
elementQuadrature,
elementBoundaryQuadrature,
fluxBoundaryConditionsDict=None,
advectiveFluxBoundaryConditionsSetterDict=None,
diffusiveFluxBoundaryConditionsSetterDictDict=None,
stressTraceBoundaryConditionsSetterDict=None,
stabilization=None,
shockCapturing=None,
conservativeFluxDict=None,
numericalFluxType=None,
TimeIntegrationClass=None,
massLumping=False,
reactionLumping=False,
options=None,
name='defaultName',
reuse_trial_and_test_quadrature=True,
sd=True,
movingDomain=False,
bdyNullSpace=False): # ,
self.useConstantH = coefficients.useConstantH
from proteus import Comm
#
# set the objects describing the method and boundary conditions
#
self.movingDomain = movingDomain
self.tLast_mesh = None
#
self.name = name
self.sd = sd
self.Hess = False
self.lowmem = True
self.timeTerm = True # allow turning off the time derivative
# self.lowmem=False
self.testIsTrial = True
self.phiTrialIsTrial = True
self.u = uDict
self.ua = {} # analytical solutions
self.phi = phiDict
self.dphi = {}
self.matType = matType
# mwf try to reuse test and trial information across components if spaces are the same
self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature # True#False
if self.reuse_test_trial_quadrature:
for ci in range(1, coefficients.nc):
assert self.u[ci].femSpace.__class__.__name__ == self.u[0].femSpace.__class__.__name__, "to reuse_test_trial_quad all femSpaces must be the same!"
# Simplicial Mesh
self.mesh = self.u[0].femSpace.mesh # assume the same mesh for all components for now
self.testSpace = testSpaceDict
self.dirichletConditions = dofBoundaryConditionsDict
self.dirichletNodeSetList = None # explicit Dirichlet conditions for now, no Dirichlet BC constraints
self.bdyNullSpace = bdyNullSpace
self.coefficients = coefficients
self.coefficients.initializeMesh(self.mesh)
self.nc = self.coefficients.nc
self.stabilization = stabilization
self.shockCapturing = shockCapturing
self.conservativeFlux = conservativeFluxDict # no velocity post-processing for now
self.fluxBoundaryConditions = fluxBoundaryConditionsDict
self.advectiveFluxBoundaryConditionsSetterDict = advectiveFluxBoundaryConditionsSetterDict
self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict
# determine whether the stabilization term is nonlinear
self.stabilizationIsNonlinear = False
# cek come back
if self.stabilization is not None:
for ci in range(self.nc):
if ci in coefficients.mass:
for flag in list(coefficients.mass[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.advection:
for flag in list(coefficients.advection[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.diffusion:
for diffusionDict in list(coefficients.diffusion[ci].values()):
for flag in list(diffusionDict.values()):
if flag != 'constant':
self.stabilizationIsNonlinear = True
if ci in coefficients.potential:
for flag in list(coefficients.potential[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.reaction:
for flag in list(coefficients.reaction[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.hamiltonian:
for flag in list(coefficients.hamiltonian[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
# determine if we need element boundary storage
self.elementBoundaryIntegrals = {}
for ci in range(self.nc):
self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux is not None) or
(numericalFluxType is not None) or
(self.fluxBoundaryConditions[ci] == 'outFlow') or
(self.fluxBoundaryConditions[ci] == 'mixedFlow') or
(self.fluxBoundaryConditions[ci] == 'setFlow'))
#
# calculate some dimensions
#
self.nSpace_global = self.u[0].femSpace.nSpace_global # assume same space dim for all variables
self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in list(self.u.values())]
self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in list(self.phi.values())]
self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in list(self.phi.values())]
self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in list(self.testSpace.values())]
self.nFreeDOF_global = [dc.nFreeDOF_global for dc in list(self.dirichletConditions.values())]
self.nVDOF_element = sum(self.nDOF_trial_element)
self.nFreeVDOF_global = sum(self.nFreeDOF_global)
#
NonlinearEquation.__init__(self, self.nFreeVDOF_global)
#
# build the quadrature point dictionaries from the input (this
# is just for convenience so that the input doesn't have to be
# complete)
#
elementQuadratureDict = {}
elemQuadIsDict = isinstance(elementQuadrature, dict)
if elemQuadIsDict: # set terms manually
for I in self.coefficients.elementIntegralKeys:
if I in elementQuadrature:
elementQuadratureDict[I] = elementQuadrature[I]
else:
elementQuadratureDict[I] = elementQuadrature['default']
else:
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[I] = elementQuadrature
if self.stabilization is not None:
for I in self.coefficients.elementIntegralKeys:
if elemQuadIsDict:
if I in elementQuadrature:
elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature[I]
else:
elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature['default']
else:
elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature
if self.shockCapturing is not None:
for ci in self.shockCapturing.components:
if elemQuadIsDict:
if ('numDiff', ci, ci) in elementQuadrature:
elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature[('numDiff', ci, ci)]
else:
elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature['default']
else:
elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature
if massLumping:
for ci in list(self.coefficients.mass.keys()):
elementQuadratureDict[('m', ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
if reactionLumping:
for ci in list(self.coefficients.mass.keys()):
elementQuadratureDict[('r', ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
elementBoundaryQuadratureDict = {}
if isinstance(elementBoundaryQuadrature, dict): # set terms manually
for I in self.coefficients.elementBoundaryIntegralKeys:
if I in elementBoundaryQuadrature:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]
else:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']
else:
for I in self.coefficients.elementBoundaryIntegralKeys:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature
#
# find the union of all element quadrature points and
# build a quadrature rule for each integral that has a
# weight at each point in the union
# mwf include tag telling me which indices are which quadrature rule?
(self.elementQuadraturePoints, self.elementQuadratureWeights,
self.elementQuadratureRuleIndeces) = Quadrature.buildUnion(elementQuadratureDict)
self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]
self.nQuadraturePoints_global = self.nQuadraturePoints_element * self.mesh.nElements_global
#
# Repeat the same thing for the element boundary quadrature
#
(self.elementBoundaryQuadraturePoints,
self.elementBoundaryQuadratureWeights,
self.elementBoundaryQuadratureRuleIndeces) = Quadrature.buildUnion(elementBoundaryQuadratureDict)
self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]
self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global *
self.mesh.nElementBoundaries_element *
self.nElementBoundaryQuadraturePoints_elementBoundary)
#
# simplified allocations for test==trial and also check if space is mixed or not
#
self.q = {}
self.ebq = {}
self.ebq_global = {}
self.ebqe = {}
self.phi_ip = {}
# mesh
# uncomment this to store q arrays, see calculateElementQuadrature below
#self.q['x'] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,3),'d')
self.q[('u', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q[('grad(u)', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, self.nSpace_global), 'd')
self.q[('r', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.ebqe[('u', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.ebqe[('grad(u)', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,
self.nElementBoundaryQuadraturePoints_elementBoundary, self.nSpace_global), 'd')
self.points_elementBoundaryQuadrature = set()
self.scalars_elementBoundaryQuadrature = set([('u', ci) for ci in range(self.nc)])
self.vectors_elementBoundaryQuadrature = set()
self.tensors_elementBoundaryQuadrature = set()
logEvent(memory("element and element boundary Jacobians", "OneLevelTransport"), level=4)
self.inflowBoundaryBC = {}
self.inflowBoundaryBC_values = {}
self.inflowFlux = {}
for cj in range(self.nc):
self.inflowBoundaryBC[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global,), 'i')
self.inflowBoundaryBC_values[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nDOF_trial_element[cj]), 'd')
self.inflowFlux[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.internalNodes = set(range(self.mesh.nNodes_global))
# identify the internal nodes this is ought to be in mesh
# \todo move this to mesh
for ebNE in range(self.mesh.nExteriorElementBoundaries_global):
ebN = self.mesh.exteriorElementBoundariesArray[ebNE]
eN_global = self.mesh.elementBoundaryElementsArray[ebN, 0]
ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN, 0]
for i in range(self.mesh.nNodes_element):
if i != ebN_element:
I = self.mesh.elementNodesArray[eN_global, i]
self.internalNodes -= set([I])
self.nNodes_internal = len(self.internalNodes)
self.internalNodesArray = np.zeros((self.nNodes_internal,), 'i')
for nI, n in enumerate(self.internalNodes):
self.internalNodesArray[nI] = n
#
del self.internalNodes
self.internalNodes = None
logEvent("Updating local to global mappings", 2)
self.updateLocal2Global()
logEvent("Building time integration object", 2)
logEvent(memory("inflowBC, internalNodes,updateLocal2Global", "OneLevelTransport"), level=4)
# mwf for interpolating subgrid error for gradients etc
if self.stabilization and self.stabilization.usesGradientStabilization:
self.timeIntegration = TimeIntegrationClass(self, integrateInterpolationPoints=True)
else:
self.timeIntegration = TimeIntegrationClass(self)
if options is not None:
self.timeIntegration.setFromOptions(options)
logEvent(memory("TimeIntegration", "OneLevelTransport"), level=4)
logEvent("Calculating numerical quadrature formulas", 2)
self.calculateQuadrature()
self.setupFieldStrides()
# (MQL)
self.MassMatrix = None # consistent mass matrix
self.LumpedMassMatrix = None
self.rhs_mass_correction = None
self.MassMatrix_sparseFactor = None
self.Jacobian_sparseFactor = None
self.lumped_L2p_vof_mass_correction = None
self.limited_L2p_vof_mass_correction = None
self.L2p_vof_mass_correction = None
comm = Comm.get()
self.comm = comm
if comm.size() > 1:
assert numericalFluxType is not None and numericalFluxType.useWeakDirichletConditions, "You must use a numerical flux to apply weak boundary conditions for parallel runs"
logEvent(memory("stride+offset", "OneLevelTransport"), level=4)
if numericalFluxType is not None:
if options is None or options.periodicDirichletConditions is None:
self.numericalFlux = numericalFluxType(self,
dofBoundaryConditionsSetterDict,
advectiveFluxBoundaryConditionsSetterDict,
diffusiveFluxBoundaryConditionsSetterDictDict)
else:
self.numericalFlux = numericalFluxType(self,
dofBoundaryConditionsSetterDict,
advectiveFluxBoundaryConditionsSetterDict,
diffusiveFluxBoundaryConditionsSetterDictDict,
options.periodicDirichletConditions)
else:
self.numericalFlux = None
# set penalty terms
# cek todo move into numerical flux initialization
if 'penalty' in self.ebq_global:
for ebN in range(self.mesh.nElementBoundaries_global):
for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):
self.ebq_global['penalty'][ebN, k] = old_div(self.numericalFlux.penalty_constant, \
(self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power))
# penalty term
# cek move to Numerical flux initialization
if 'penalty' in self.ebqe:
for ebNE in range(self.mesh.nExteriorElementBoundaries_global):
ebN = self.mesh.exteriorElementBoundariesArray[ebNE]
for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):
self.ebqe['penalty'][ebNE, k] = old_div(self.numericalFlux.penalty_constant, \
self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)
logEvent(memory("numericalFlux", "OneLevelTransport"), level=4)
self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray
# use post processing tools to get conservative fluxes, None by default
from proteus import PostProcessingTools
self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(self)
logEvent(memory("velocity postprocessor", "OneLevelTransport"), level=4)
# helper for writing out data storage
from proteus import Archiver
self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()
self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()
self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()
self.globalResidualDummy = None
compKernelFlag = 0
if self.coefficients.useConstantH:
self.elementDiameter = self.mesh.elementDiametersArray.copy()
self.elementDiameter[:] = max(self.mesh.elementDiametersArray)
else:
self.elementDiameter = self.mesh.elementDiametersArray
self.mcorr = cMCorr_base(self.nSpace_global,
self.nQuadraturePoints_element,
self.u[0].femSpace.elementMaps.localFunctionSpace.dim,
self.u[0].femSpace.referenceFiniteElement.localFunctionSpace.dim,
self.testSpace[0].referenceFiniteElement.localFunctionSpace.dim,
self.nElementBoundaryQuadraturePoints_elementBoundary,
compKernelFlag)
# mwf these are getting called by redistancing classes,
def FCTStep(self):
rowptr, colind, MassMatrix = self.MassMatrix.getCSRrepresentation()
if (self.limited_L2p_vof_mass_correction is None):
self.limited_L2p_vof_mass_correction = np.zeros(self.LumpedMassMatrix.size, 'd')
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["NNZ"] = self.nnz
argsDict["numDOFs"] = len(rowptr) - 1
argsDict["lumped_mass_matrix"] = self.LumpedMassMatrix
argsDict["solH"] = self.L2p_vof_mass_correction
argsDict["solL"] = self.lumped_L2p_vof_mass_correction
argsDict["limited_solution"] = self.limited_L2p_vof_mass_correction
argsDict["csrRowIndeces_DofLoops"] = rowptr
argsDict["csrColumnOffsets_DofLoops"] = colind
argsDict["matrix"] = MassMatrix
self.mcorr.FCTStep(argsDict)
def calculateCoefficients(self):
pass
def calculateElementResidual(self):
if self.globalResidualDummy is not None:
self.getResidual(self.u[0].dof, self.globalResidualDummy)
def getResidual(self, u, r):
import pdb
import copy
"""
Calculate the element residuals and add in to the global residual
"""
r.fill(0.0)
# Load the unknowns into the finite element dof
self.setUnknowns(u)
# no flux boundary conditions
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["x_ref"] = self.elementQuadraturePoints
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["epsFactHeaviside"] = self.coefficients.epsFactHeaviside
argsDict["epsFactDirac"] = self.coefficients.epsFactDirac
argsDict["epsFactDiffusion"] = self.coefficients.epsFactDiffusion
argsDict["u_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["r_l2g"] = self.l2g[0]['freeGlobal']
argsDict["elementDiameter"] = self.elementDiameter
argsDict["nodeDiametersArray"] = self.mesh.nodeDiametersArray
argsDict["u_dof"] = self.u[0].dof
argsDict["phi_dof"] = self.coefficients.lsModel.u[0].dof
argsDict["q_phi"] = self.coefficients.q_u_ls
argsDict["q_normal_phi"] = self.coefficients.q_n_ls
argsDict["ebqe_phi"] = self.coefficients.ebqe_u_ls
argsDict["ebqe_normal_phi"] = self.coefficients.ebqe_n_ls
argsDict["q_H"] = self.coefficients.q_H_vof
argsDict["q_u"] = self.q[('u', 0)]
argsDict["q_n"] = self.q[('grad(u)', 0)]
argsDict["ebqe_u"] = self.ebqe[('u', 0)]
argsDict["ebqe_n"] = self.ebqe[('grad(u)', 0)]
argsDict["q_r"] = self.q[('r', 0)]
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["offset_u"] = self.offset[0]
argsDict["stride_u"] = self.stride[0]
argsDict["globalResidual"] = r
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
self.mcorr.calculateResidual(argsDict,
self.coefficients.useExact)
logEvent("Global residual", level=9, data=r)
self.coefficients.massConservationError = fabs(globalSum(r[:self.mesh.nNodes_owned].sum()))
logEvent(" Mass Conservation Error: ", level=3, data=self.coefficients.massConservationError)
self.nonlinear_function_evaluations += 1
if self.globalResidualDummy is None:
self.globalResidualDummy = np.zeros(r.shape, 'd')
# GET MASS MATRIX # (MQL)
def getMassMatrix(self):
# NOTE. Both, the consistent and the lumped mass matrix must be init to zero
if (self.MassMatrix is None):
rowptr, colind, nzval = self.jacobian.getCSRrepresentation()
self.MassMatrix_a = nzval.copy()
nnz = nzval.shape[-1] # number of non-zero entries in sparse matrix
self.MassMatrix = LinearAlgebraTools.SparseMat(self.nFreeDOF_global[0],
self.nFreeDOF_global[0],
nnz,
self.MassMatrix_a,
colind,
rowptr)
# Lumped mass matrix
self.LumpedMassMatrix = np.zeros(rowptr.size - 1, 'd')
else:
self.LumpedMassMatrix.fill(0.0)
cfemIntegrals.zeroJacobian_CSR(self.nNonzerosInJacobian,
self.MassMatrix)
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["epsFactHeaviside"] = self.coefficients.epsFactHeaviside
argsDict["epsFactDirac"] = self.coefficients.epsFactDirac
argsDict["epsFactDiffusion"] = self.coefficients.epsFactDiffusion
argsDict["u_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["elementDiameter"] = self.elementDiameter
argsDict["nodeDiametersArray"] = self.mesh.nodeDiametersArray
argsDict["u_dof"] = self.u[0].dof
argsDict["q_phi"] = self.coefficients.q_u_ls
argsDict["q_normal_phi"] = self.coefficients.q_n_ls
argsDict["q_H"] = self.coefficients.q_H_vof
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["csrRowIndeces_u_u"] = self.csrRowIndeces[(0, 0)]
argsDict["csrColumnOffsets_u_u"] = self.csrColumnOffsets[(0, 0)]
argsDict["globalMassMatrix"] = self.MassMatrix.getCSRrepresentation()[2]
argsDict["globalLumpedMassMatrix"] = self.LumpedMassMatrix
self.mcorr.calculateMassMatrix(argsDict)
def getJacobian(self, jacobian):
cfemIntegrals.zeroJacobian_CSR(self.nNonzerosInJacobian, jacobian)
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["x_ref"] = self.elementQuadraturePoints
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["epsFactHeaviside"] = self.coefficients.epsFactHeaviside
argsDict["epsFactDirac"] = self.coefficients.epsFactDirac
argsDict["epsFactDiffusion"] = self.coefficients.epsFactDiffusion
argsDict["u_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["elementDiameter"] = self.elementDiameter
argsDict["nodeDiametersArray"] = self.mesh.nodeDiametersArray
argsDict["u_dof"] = self.u[0].dof
argsDict["phi_dof"] = self.coefficients.lsModel.u[0].dof
argsDict["q_phi"] = self.coefficients.q_u_ls
argsDict["q_normal_phi"] = self.coefficients.q_n_ls
argsDict["q_H"] = self.coefficients.q_H_vof
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["csrRowIndeces_u_u"] = self.csrRowIndeces[(0, 0)]
argsDict["csrColumnOffsets_u_u"] = self.csrColumnOffsets[(0, 0)]
argsDict["globalJacobian"] = jacobian.getCSRrepresentation()[2]
self.mcorr.calculateJacobian(argsDict,
self.coefficients.useExact)
logEvent("Jacobian ", level=10, data=jacobian)
# mwf decide if this is reasonable for solver statistics
self.nonlinear_function_jacobian_evaluations += 1
return jacobian
def elementSolve(self, u, r):
import pdb
import copy
"""
Calculate the element residuals and add in to the global residual
"""
r.fill(0.0)
# Load the unknowns into the finite element dof
self.setUnknowns(u)
# no flux boundary conditions
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["epsFactHeaviside"] = self.coefficients.epsFactHeaviside
argsDict["epsFactDirac"] = self.coefficients.epsFactDirac
argsDict["epsFactDiffusion"] = self.coefficients.epsFactDiffusion
argsDict["u_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["elementDiameter"] = self.elementDiameter
argsDict["nodeDiametersArray"] = self.mesh.nodeDiametersArray
argsDict["u_dof"] = self.u[0].dof
argsDict["q_phi"] = self.coefficients.q_u_ls
argsDict["q_normal_phi"] = self.coefficients.q_n_ls
argsDict["ebqe_phi"] = self.coefficients.ebqe_u_ls
argsDict["ebqe_normal_phi"] = self.coefficients.ebqe_n_ls
argsDict["q_H"] = self.coefficients.q_H_vof
argsDict["q_u"] = self.q[('u', 0)]
argsDict["q_n"] = self.q[('grad(u)', 0)]
argsDict["ebqe_u"] = self.ebqe[('u', 0)]
argsDict["ebqe_n"] = self.ebqe[('grad(u)', 0)]
argsDict["q_r"] = self.q[('r', 0)]
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["offset_u"] = self.offset[0]
argsDict["stride_u"] = self.stride[0]
argsDict["globalResidual"] = r
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
argsDict["maxIts"] = self.maxIts
argsDict["atol"] = self.atol
self.mcorr.elementSolve(argsDict)
def elementConstantSolve(self, u, r):
import pdb
import copy
"""
Calculate the element residuals and add in to the global residual
"""
r.fill(0.0)
# Load the unknowns into the finite element dof
self.setUnknowns(u)
# no flux boundary conditions
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["epsFactHeaviside"] = self.coefficients.epsFactHeaviside
argsDict["epsFactDirac"] = self.coefficients.epsFactDirac
argsDict["epsFactDiffusion"] = self.coefficients.epsFactDiffusion
argsDict["u_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["elementDiameter"] = self.elementDiameter
argsDict["nodeDiametersArray"] = self.mesh.nodeDiametersArray
argsDict["u_dof"] = self.u[0].dof
argsDict["q_phi"] = self.coefficients.q_u_ls
argsDict["q_normal_phi"] = self.coefficients.q_n_ls
argsDict["ebqe_phi"] = self.coefficients.ebqe_u_ls
argsDict["ebqe_normal_phi"] = self.coefficients.ebqe_n_ls
argsDict["q_H"] = self.coefficients.q_H_vof
argsDict["q_u"] = self.q[('u', 0)]
argsDict["q_n"] = self.q[('grad(u)', 0)]
argsDict["ebqe_u"] = self.ebqe[('u', 0)]
argsDict["ebqe_n"] = self.ebqe[('grad(u)', 0)]
argsDict["q_r"] = self.q[('r', 0)]
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["offset_u"] = self.offset[0]
argsDict["stride_u"] = self.stride[0]
argsDict["globalResidual"] = r
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
argsDict["maxIts"] = self.maxIts
argsDict["atol"] = self.atol
self.mcorr.elementConstantSolve(argsDict)
def globalConstantRJ(self, u, r, U):
import pdb
import copy
"""
Calculate the element residuals and add in to the global residual
"""
r.fill(0.0)
# Load the unknowns into the finite element dof
self.setUnknowns(u)
# no flux boundary conditions
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_owned"] = self.mesh.nElements_owned
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["epsFactHeaviside"] = self.coefficients.epsFactHeaviside
argsDict["epsFactDirac"] = self.coefficients.epsFactDirac
argsDict["epsFactDiffusion"] = self.coefficients.epsFactDiffusion
argsDict["u_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["elementDiameter"] = self.elementDiameter
argsDict["nodeDiametersArray"] = self.mesh.nodeDiametersArray
argsDict["u_dof"] = self.u[0].dof
argsDict["q_phi"] = self.coefficients.q_u_ls
argsDict["q_normal_phi"] = self.coefficients.q_n_ls
argsDict["ebqe_phi"] = self.coefficients.ebqe_u_ls
argsDict["ebqe_normal_phi"] = self.coefficients.ebqe_n_ls
argsDict["q_H"] = self.coefficients.q_H_vof
argsDict["q_u"] = self.q[('u', 0)]
argsDict["q_n"] = self.q[('grad(u)', 0)]
argsDict["ebqe_u"] = self.ebqe[('u', 0)]
argsDict["ebqe_n"] = self.ebqe[('grad(u)', 0)]
argsDict["q_r"] = self.q[('r', 0)]
argsDict["q_porosity"] = self.offset[0]
argsDict["offset_u"] = self.stride[0]
argsDict["stride_u"] = r
argsDict["globalResidual"] = self.coefficients.q_porosity
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
argsDict["maxIts"] = self.maxIts
argsDict["atol"] = self.atol
argsDict["constant_u"] = U
(R, J) = self.mcorr.globalConstantRJ(argsDict)
R = globalSum(R)
J = globalSum(J)
self.coefficients.massConservationError = fabs(R)
return (R, J)
def globalConstantSolve(self, u, r):
U = 0.0
R = 0.0
J = 0.0
(R, J) = self.globalConstantRJ(u, r, U)
its = 0
logEvent(" Mass Conservation Residual 0 ", level=3, data=R)
RNORM_OLD = fabs(R)
while ((fabs(R) > self.atol and its < self.maxIts) or its < 1):
U -= old_div(R, (J + 1.0e-8))
(R, J) = self.globalConstantRJ(u, r, U)
lsits = 0
while(fabs(R) > 0.99 * RNORM_OLD and lsits < self.maxLSits):
lsits += 1
U += (0.5)**lsits * (old_div(R, (J + 1.0e-8)))
(R, J) = self.globalConstantRJ(u, r, U)
its += 1
logEvent(" Mass Conservation Residual " + repr(its)+" ", level=3, data=R)
self.u[0].dof.flat[:] = U
def calculateElementQuadrature(self):
"""
Calculate the physical location and weights of the quadrature rules
and the shape information at the quadrature points.
This function should be called only when the mesh changes.
"""
# uncomment this to store q arrays
# self.u[0].femSpace.elementMaps.getValues(self.elementQuadraturePoints,
# self.q['x'])
self.u[0].femSpace.elementMaps.getBasisValuesRef(self.elementQuadraturePoints)
self.u[0].femSpace.elementMaps.getBasisGradientValuesRef(self.elementQuadraturePoints)
self.u[0].femSpace.getBasisValuesRef(self.elementQuadraturePoints)
self.u[0].femSpace.getBasisGradientValuesRef(self.elementQuadraturePoints)
self.coefficients.initializeElementQuadrature(self.timeIntegration.t, self.q)
if self.stabilization is not None:
self.stabilization.initializeElementQuadrature(self.mesh, self.timeIntegration.t, self.q)
self.stabilization.initializeTimeIntegration(self.timeIntegration)
if self.shockCapturing is not None:
self.shockCapturing.initializeElementQuadrature(self.mesh, self.timeIntegration.t, self.q)
def calculateElementBoundaryQuadrature(self):
pass
def calculateExteriorElementBoundaryQuadrature(self):
self.u[0].femSpace.elementMaps.getBasisValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.elementMaps.getBasisGradientValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.getBasisValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.getBasisGradientValuesTraceRef(self.elementBoundaryQuadraturePoints)
def estimate_mt(self):
pass
def calculateAuxiliaryQuantitiesAfterStep(self):
pass
def calculateMass(self, q_phi):
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["x_ref"] = self.elementQuadraturePoints
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_owned"] = self.mesh.nElements_owned
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["epsFactHeaviside"] = self.coefficients.epsFactHeaviside
argsDict["epsFactDirac"] = self.coefficients.epsFactDirac
argsDict["epsFactDiffusion"] = self.coefficients.epsFactDiffusion
argsDict["u_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["elementDiameter"] = self.elementDiameter
argsDict["nodeDiametersArray"] = self.mesh.nodeDiametersArray
argsDict["u_dof"] = self.u[0].dof
argsDict["phi_dof"] = self.coefficients.lsModel.u[0].dof
argsDict["q_phi"] = self.coefficients.q_u_ls
argsDict["q_normal_phi"] = self.coefficients.q_n_ls
argsDict["ebqe_phi"] = self.coefficients.ebqe_u_ls
argsDict["ebqe_normal_phi"] = self.coefficients.ebqe_n_ls
argsDict["q_H"] = self.coefficients.q_H_vof
argsDict["q_u"] = self.q[('u', 0)]
argsDict["q_n"] = self.q[('grad(u)', 0)]
argsDict["ebqe_u"] = self.ebqe[('u', 0)]
argsDict["ebqe_n"] = self.ebqe[('grad(u)', 0)]
argsDict["q_r"] = self.q[('r', 0)]
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["offset_u"] = self.offset[0]
argsDict["stride_u"] = self.stride[0]
argsDict["globalResidual"] = self.u[0].dof
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
return globalSum(self.mcorr.calculateMass(argsDict,
self.coefficients.useExact))
def setMassQuadratureEdgeBasedStabilizationMethods(self):
# Compute mass matrix
# Set rhs of mass correction to zero
if self.rhs_mass_correction is None:
self.rhs_mass_correction = np.zeros(self.coefficients.vofModel.u[0].dof.shape, 'd')
self.lumped_L2p_vof_mass_correction = np.zeros(self.coefficients.vofModel.u[0].dof.shape, 'd')
self.L2p_vof_mass_correction = np.zeros(self.coefficients.vofModel.u[0].dof.shape, 'd')
else:
self.rhs_mass_correction.fill(0.0)
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["x_ref"] = self.elementQuadraturePoints
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["epsFactHeaviside"] = self.coefficients.epsFactHeaviside
argsDict["epsFactDirac"] = self.coefficients.epsFactDirac
argsDict["epsFactDiffusion"] = self.coefficients.epsFactDiffusion
argsDict["phi_l2g"] = self.coefficients.lsModel.u[0].femSpace.dofMap.l2g
argsDict["elementDiameter"] = self.elementDiameter
argsDict["nodeDiametersArray"] = self.mesh.nodeDiametersArray
argsDict["phi_dof"] = self.coefficients.lsModel.u[0].dof
argsDict["q_phi"] = self.coefficients.q_u_ls
argsDict["q_normal_phi"] = self.coefficients.q_n_ls
argsDict["ebqe_phi"] = self.coefficients.ebqe_u_ls
argsDict["ebqe_normal_phi"] = self.coefficients.ebqe_n_ls
argsDict["q_H"] = self.coefficients.q_H_vof
argsDict["q_u"] = self.q[('u', 0)]
argsDict["q_n"] = self.q[('grad(u)', 0)]
argsDict["ebqe_u"] = self.ebqe[('u', 0)]
argsDict["ebqe_n"] = self.ebqe[('grad(u)', 0)]
argsDict["q_r"] = self.q[('r', 0)]
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["offset_u"] = self.offset[0]
argsDict["stride_u"] = self.stride[0]
argsDict["globalResidual"] = self.u[0].dof
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
argsDict["rhs_mass_correction"] = self.rhs_mass_correction
argsDict["lumped_L2p_vof_mass_correction"] = self.lumped_L2p_vof_mass_correction
argsDict["lumped_mass_matrix"] = self.LumpedMassMatrix
argsDict["numDOFs"] = self.lumped_L2p_vof_mass_correction.size
self.mcorr.setMassQuadratureEdgeBasedStabilizationMethods(argsDict,
self.coefficients.useExact)
def setMassQuadrature(self):
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["x_ref"] = self.elementQuadraturePoints
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["epsFactHeaviside"] = self.coefficients.epsFactHeaviside
argsDict["epsFactDirac"] = self.coefficients.epsFactDirac
argsDict["epsFactDiffusion"] = self.coefficients.epsFactDiffusion
argsDict["phi_l2g"] = self.coefficients.lsModel.u[0].femSpace.dofMap.l2g
argsDict["elementDiameter"] = self.elementDiameter
argsDict["nodeDiametersArray"] = self.mesh.nodeDiametersArray
argsDict["phi_dof"] = self.coefficients.lsModel.u[0].dof
argsDict["q_phi"] = self.coefficients.q_u_ls
argsDict["q_normal_phi"] = self.coefficients.q_n_ls
argsDict["ebqe_phi"] = self.coefficients.ebqe_u_ls
argsDict["ebqe_normal_phi"] = self.coefficients.ebqe_n_ls
argsDict["q_H"] = self.coefficients.q_H_vof
argsDict["q_u"] = self.q[('u', 0)]
argsDict["q_n"] = self.q[('grad(u)', 0)]
argsDict["ebqe_u"] = self.ebqe[('u', 0)]
argsDict["ebqe_n"] = self.ebqe[('grad(u)', 0)]
argsDict["q_r"] = self.q[('r', 0)]
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["offset_u"] = self.offset[0]
argsDict["stride_u"] = self.stride[0]
argsDict["globalResidual"] = self.u[0].dof
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
argsDict["H_dof"] = self.coefficients.vofModel.u[0].dof
self.mcorr.setMassQuadrature(argsDict,
self.coefficients.useExact)
def calculateSolutionAtQuadrature(self):
pass
def updateAfterMeshMotion(self):
pass
class DummyNewton(proteus.NonlinearSolvers.NonlinearSolver):
def __init__(self,
linearSolver,
F, J=None, du=None, par_du=None,
rtol_r=1.0e-4,
atol_r=1.0e-16,
rtol_du=1.0e-4,
atol_du=1.0e-16,
maxIts=100,
norm=l2Norm,
convergenceTest='r',
computeRates=True,
printInfo=True,
fullNewton=True,
directSolver=False,
EWtol=True,
maxLSits=100):
import copy
self.par_du = par_du
if par_du is not None:
F.dim_proc = par_du.dim_proc
NonlinearSolver.__init__(self, F, J, du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo)
self.updateJacobian = True
self.fullNewton = fullNewton
self.linearSolver = linearSolver
self.directSolver = directSolver
self.lineSearch = True
# mwf turned back on self.lineSearch = False
self.EWtol = EWtol
# mwf added
self.maxLSits = maxLSits
if self.linearSolver.computeEigenvalues:
self.JLast = copy.deepcopy(self.J)
self.J_t_J = copy.deepcopy(self.J)
self.dJ_t_dJ = copy.deepcopy(self.J)
self.JLsolver = LU(self.J_t_J, computeEigenvalues=True)
self.dJLsolver = LU(self.dJ_t_dJ, computeEigenvalues=True)
self.u0 = np.zeros(self.F.dim, 'd')
def info(self):
return "Not Implemented"
def solve(self, u, r=None, b=None, par_u=None, par_r=None):
self.F.q[('r', 0)].flat[:] = 0.0
self.F.q[('u', 0)].flat[:] = 0.0
self.failedFlag = False
return self.failedFlag
class ElementNewton(proteus.NonlinearSolvers.NonlinearSolver):
def __init__(self,
linearSolver,
F, J=None, du=None, par_du=None,
rtol_r=1.0e-4,
atol_r=1.0e-16,
rtol_du=1.0e-4,
atol_du=1.0e-16,
maxIts=100,
norm=l2Norm,
convergenceTest='r',
computeRates=True,
printInfo=True,
fullNewton=True,
directSolver=False,
EWtol=True,
maxLSits=100):
import copy
self.par_du = par_du
if par_du is not None:
F.dim_proc = par_du.dim_proc
NonlinearSolver.__init__(self, F, J, du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo)
self.updateJacobian = True
self.fullNewton = fullNewton
self.linearSolver = linearSolver
self.directSolver = directSolver
self.lineSearch = True
# mwf turned back on self.lineSearch = False
self.EWtol = EWtol
# mwf added
self.maxLSits = maxLSits
if self.linearSolver.computeEigenvalues:
self.JLast = copy.deepcopy(self.J)
self.J_t_J = copy.deepcopy(self.J)
self.dJ_t_dJ = copy.deepcopy(self.J)
self.JLsolver = LU(self.J_t_J, computeEigenvalues=True)
self.dJLsolver = LU(self.dJ_t_dJ, computeEigenvalues=True)
self.u0 = np.zeros(self.F.dim, 'd')
def info(self):
return "Not Implemented"
def solve(self, u, r=None, b=None, par_u=None, par_r=None):
self.F.maxIts = self.maxIts
self.F.maxLSits = self.maxLSits
self.F.atol = self.atol_r
self.F.elementSolve(u, r)
self.failedFlag = False
return self.failedFlag
class ElementConstantNewton(proteus.NonlinearSolvers.NonlinearSolver):
def __init__(self,
linearSolver,
F, J=None, du=None, par_du=None,
rtol_r=1.0e-4,
atol_r=1.0e-16,
rtol_du=1.0e-4,
atol_du=1.0e-16,
maxIts=100,
norm=l2Norm,
convergenceTest='r',
computeRates=True,
printInfo=True,
fullNewton=True,
directSolver=False,
EWtol=True,
maxLSits=100):
import copy
self.par_du = par_du
if par_du is not None:
F.dim_proc = par_du.dim_proc
NonlinearSolver.__init__(self, F, J, du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo)
self.updateJacobian = True
self.fullNewton = fullNewton
self.linearSolver = linearSolver
self.directSolver = directSolver
self.lineSearch = True
# mwf turned back on self.lineSearch = False
self.EWtol = EWtol
# mwf added
self.maxLSits = maxLSits
if self.linearSolver.computeEigenvalues:
self.JLast = copy.deepcopy(self.J)
self.J_t_J = copy.deepcopy(self.J)
self.dJ_t_dJ = copy.deepcopy(self.J)
self.JLsolver = LU(self.J_t_J, computeEigenvalues=True)
self.dJLsolver = LU(self.dJ_t_dJ, computeEigenvalues=True)
self.u0 = np.zeros(self.F.dim, 'd')
def info(self):
return "Not Implemented"
def solve(self, u, r=None, b=None, par_u=None, par_r=None):
self.F.maxIts = self.maxIts
self.F.maxLSits = self.maxLSits
self.F.atol = self.atol_r
self.F.elementConstantSolve(u, r)
self.failedFlag = False
return self.failedFlag
class GlobalConstantNewton(proteus.NonlinearSolvers.NonlinearSolver):
def __init__(self,
linearSolver,
F, J=None, du=None, par_du=None,
rtol_r=1.0e-4,
atol_r=1.0e-16,
rtol_du=1.0e-4,
atol_du=1.0e-16,
maxIts=100,
norm=l2Norm,
convergenceTest='r',
computeRates=True,
printInfo=True,
fullNewton=True,
directSolver=False,
EWtol=True,
maxLSits=100):
import copy
self.par_du = par_du
if par_du is not None:
F.dim_proc = par_du.dim_proc
NonlinearSolver.__init__(self, F, J, du,
rtol_r,
atol_r,
rtol_du,
atol_du,
maxIts,
norm,
convergenceTest,
computeRates,
printInfo)
self.updateJacobian = True
self.fullNewton = fullNewton
self.linearSolver = linearSolver
self.directSolver = directSolver
self.lineSearch = True
# mwf turned back on self.lineSearch = False
self.EWtol = EWtol
# mwf added
self.maxLSits = maxLSits
if self.linearSolver.computeEigenvalues:
self.JLast = copy.deepcopy(self.J)
self.J_t_J = copy.deepcopy(self.J)
self.dJ_t_dJ = copy.deepcopy(self.J)
self.JLsolver = LU(self.J_t_J, computeEigenvalues=True)
self.dJLsolver = LU(self.dJ_t_dJ, computeEigenvalues=True)
self.u0 = np.zeros(self.F.dim, 'd')
def info(self):
return "Not Implemented"
def solve(self, u, r=None, b=None, par_u=None, par_r=None):
self.F.maxIts = self.maxIts
self.F.maxLSits = self.maxLSits
self.F.atol = self.atol_r
self.F.globalConstantSolve(u, r)
self.failedFlag = False
return self.failedFlag
def conservationNorm(x):
return fabs(globalSum(sum(x.flat)))
class Newton_controller(proteus.StepControl.Newton_controller):
def __init__(self, model, nOptions):
proteus.StepControl.Newton_controller.__init__(self, model, nOptions)
def initializeTimeHistory(self):
proteus.StepControl.Newton_controller.initializeTimeHistory(self)
for m, u, r in zip(self.model.levelModelList,
self.model.uList,
self.model.rList):
u.flat[:] = 0.0
m.getResidual(u, r)
m.coefficients.postStep(self.t_model)
m.coefficients.vofModel.updateTimeHistory(self.t_model, resetFromDOF=False)
m.coefficients.vofModel.timeIntegration.updateTimeHistory(resetFromDOF=False)
| mit | 3,438,568,843,681,131,000 | 53.301286 | 245 | 0.592035 | false |
jasonish/suricata-update | suricata/update/commands/addsource.py | 1 | 2157 | # Copyright (C) 2017 Open Information Security Foundation
#
# You can copy, redistribute or modify this Program under the terms of
# the GNU General Public License version 2 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# version 2 along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from __future__ import print_function
import logging
from suricata.update import config
from suricata.update import sources
try:
input = raw_input
except:
pass
logger = logging.getLogger()
def register(parser):
parser.add_argument("name", metavar="<name>", nargs="?",
help="Name of source")
parser.add_argument("url", metavar="<url>", nargs="?", help="Source URL")
parser.add_argument("--http-header", metavar="<http-header>",
help="Additional HTTP header to add to requests")
parser.add_argument("--no-checksum", action="store_false",
help="Skips downloading the checksum URL")
parser.set_defaults(func=add_source)
def add_source():
args = config.args()
if args.name:
name = args.name
else:
while True:
name = input("Name of source: ").strip()
if name:
break
if sources.source_name_exists(name):
logger.error("A source with name %s already exists.", name)
return 1
if args.url:
url = args.url
else:
while True:
url = input("URL: ").strip()
if url:
break
checksum = args.no_checksum
header = args.http_header if args.http_header else None
source_config = sources.SourceConfiguration(
name, header=header, url=url, checksum=checksum)
sources.save_source_config(source_config)
| gpl-2.0 | 276,462,553,250,655,870 | 28.958333 | 77 | 0.651831 | false |
SlugocM/bayesfit | bayesfit/calc_integral.py | 1 | 1738 | """
*******************************************************
*
* calc_integral - APPROXIMATE INTEGRAL OF POSTERIOR
*
* License: Apache 2.0
* Written by: Michael Slugocki
* Created on: September 17, 2018
* Last updated: September 17, 2018
*
*******************************************************
"""
#################################################################
# IMPORT MODULES
#################################################################
import numpy as np
#################################################################
# APPROXIMATE INTEGRAL USING NUMERICAL INTEGRATION
#################################################################
def calc_integral(posterior, options, metrics):
"""Compute integral of posterior surface using numerical
integration.
Keyword arguments:
posterior --
options --
metrics --
"""
# Generate list of free parameters to integrate
list_free = []
for i in range(0,4):
if options['param_free'][i] is True:
if i == 0:
list_free.append('scale')
elif i ==1:
list_free.append('slope')
elif i ==2:
list_free.append('gamma')
elif i ==2:
list_free.append('lambda')
# Calculate step size and mass
list_step = []
for key in list_free:
if metrics['Marginals_X'][key].shape[0] > 1:
step = (metrics['Marginals_X'][key].max() - metrics['Marginals_X'][key].min()) / (len(metrics['Marginals_X'][key]) - 1)
list_step.append(step)
# Compute mass
mass = np.product(list_step)
# COmpute Integral
integral = np.sum(posterior * mass)
return integral | apache-2.0 | 4,034,504,746,930,823,000 | 27.983333 | 131 | 0.447641 | false |
wail007/ml_playground | test_olivettifaces.py | 1 | 2438 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from linear_regression import *
from knn import *
def main():
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train, y_train = np.split(train, [int(0.5 * n_pixels)], axis=1)
X_test , y_test = np.split(test , [int(0.5 * n_pixels)], axis=1)
# Fit estimators
ESTIMATORS = {
"LeastSquareRegression": LeastSquareRegression(),
"RidgeRegression" : RidgeRegression(incr=0.3, min_change=0.1),
"knn" : KNN(k=5)
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
if __name__ == "__main__":
main() | apache-2.0 | 6,522,751,880,474,785,000 | 28.385542 | 75 | 0.558655 | false |
wettenhj/mytardis | docs/conf.py | 1 | 4933 | """
Sphinxdoc configuration. Defines how documentation is built on
readthedocs.org or manually
"""
import os
import sys
import django
import sphinx_rtd_theme
sys.path.append(os.path.abspath('..'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tardis.test_settings")
django.setup()
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MyTardis'
copyright = u'2016, MyTardis Development Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import tardis
version = tardis.__version__
# The full version, including alpha/beta/rc tags.
# TODO add support for dev flag
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Sphinx theme
html_theme = "sphinx_rtd_theme"
# API doc generation
# ------------------
execfile("generate-api-docs.py")
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'tardis'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'tardis.tex', ur'MyTardis Documentation',
ur'', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/dev': None,
'http://docs.djangoproject.com/en/1.11/': 'http://docs.djangoproject.com/en/1.11/_objects'}
| gpl-3.0 | -17,518,657,977,065,484 | 27.680233 | 114 | 0.713967 | false |
UASLab/ImageAnalysis | scripts/archive/3a-detect-features.py | 1 | 4175 | #!/usr/bin/python3
import argparse
import fnmatch
import numpy as np
import os.path
from props import getNode
from lib import project
# for all the images in the project image_dir, detect features using the
# specified method and parameters
#
# Suggests censure/star has good stability between images (highest
# likelihood of finding a match in the target features set:
# http://computer-vision-talks.com/articles/2011-01-04-comparison-of-the-opencv-feature-detection-algorithms/
#
# Suggests censure/star works better than sift in outdoor natural
# environments: http://www.ai.sri.com/~agrawal/isrr.pdf
#
# Basic description of censure/star algorithm: http://www.researchgate.net/publication/221304099_CenSurE_Center_Surround_Extremas_for_Realtime_Feature_Detection_and_Matching
parser = argparse.ArgumentParser(description='Detect features in the project images.')
parser.add_argument('--project', required=True, help='project directory')
parser.add_argument('--scale', type=float, default=0.4, help='scale images before detecting features, this acts much like a noise filter')
parser.add_argument('--detector', default='SIFT',
choices=['SIFT', 'SURF', 'ORB', 'Star'])
#parser.add_argument('--sift-max-features', default=30000,
# help='maximum SIFT features')
parser.add_argument('--surf-hessian-threshold', default=600,
help='hessian threshold for surf method')
parser.add_argument('--surf-noctaves', default=4,
help='use a bigger number to detect bigger features')
parser.add_argument('--orb-max-features', default=2000,
help='maximum ORB features')
parser.add_argument('--grid-detect', default=1,
help='run detect on gridded squares for (maybe) better feature distribution, 4 is a good starting value, only affects ORB method')
parser.add_argument('--star-max-size', default=16,
help='4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128')
parser.add_argument('--star-response-threshold', default=30)
parser.add_argument('--star-line-threshold-projected', default=10)
parser.add_argument('--star-line-threshold-binarized', default=8)
parser.add_argument('--star-suppress-nonmax-size', default=5)
parser.add_argument('--reject-margin', default=0, help='reject features within this distance of the image outer edge margin')
parser.add_argument('--show', action='store_true',
help='show features as we detect them')
args = parser.parse_args()
proj = project.ProjectMgr(args.project)
# load existing images info which could include things like camera pose
proj.load_images_info()
# setup project detector params
detector_node = getNode('/config/detector', True)
detector_node.setString('detector', args.detector)
detector_node.setString('scale', args.scale)
if args.detector == 'SIFT':
#detector_node.setInt('sift_max_features', args.sift_max_features)
pass
elif args.detector == 'SURF':
detector_node.setInt('surf_hessian_threshold', args.surf_hessian_threshold)
detector_node.setInt('surf_noctaves', args.surf_noctaves)
elif args.detector == 'ORB':
detector_node.setInt('grid_detect', args.grid_detect)
detector_node.setInt('orb_max_features', args.orb_max_features)
elif args.detector == 'Star':
detector_node.setInt('star_max_size', args.star_max_size)
detector_node.setInt('star_response_threshold',
args.star_response_threshold)
detector_node.setInt('star_line_threshold_projected',
args.star_response_threshold)
detector_node.setInt('star_line_threshold_binarized',
args.star_line_threshold_binarized)
detector_node.setInt('star_suppress_nonmax_size',
args.star_suppress_nonmax_size)
# find features in the full image set
proj.detect_features(scale=args.scale, force=True, show=args.show)
feature_count = 0
image_count = 0
for image in proj.image_list:
feature_count += len(image.kp_list)
image_count += 1
print("Average # of features per image found = %.0f" % (feature_count / image_count))
print("Saving project configuration")
proj.save()
| mit | -2,305,476,880,242,524,400 | 43.892473 | 173 | 0.707784 | false |
sindhus/lastuser | tests/test_model_client.py | 1 | 4176 | # -*- coding: utf-8 -*-
from lastuserapp import db
import lastuser_core.models as models
from .test_db import TestDatabaseFixture
class TestClient(TestDatabaseFixture):
def setUp(self):
super(TestClient, self).setUp()
self.user = models.User.query.filter_by(username=u"user1").first()
class TestUserClientPermissions(TestDatabaseFixture):
def setUp(self):
super(TestUserClientPermissions, self).setUp()
self.user = models.User.query.filter_by(username=u"user1").first()
self.create_fixtures()
def create_fixtures(self):
# Add permission to the client
client = models.Client.query.filter_by(user=self.user).first()
self.permission = models.UserClientPermissions(user=self.user, client=client)
self.permission.permissions = u"admin"
db.session.add(self.permission)
db.session.commit()
class TestTeamClientPermissions(TestDatabaseFixture):
def setUp(self):
super(TestTeamClientPermissions, self).setUp()
self.user = models.User.query.filter_by(username=u"user1").first()
self.client = models.Client.query.filter_by(user=self.user).first()
self.create_fixtures()
def create_fixtures(self):
self.org = models.Organization(title=u"test", name=u"Test")
self.org.owners.users.append(self.user)
db.session.add(self.org)
self.team = models.Team(userid=self.user.userid, title=u"developers", org=self.org)
db.session.add(self.team)
self.team_client_permission = models.TeamClientPermissions(team=self.team, client=self.client, access_permissions=u"admin")
db.session.add(self.team_client_permission)
db.session.commit()
class TestResource(TestDatabaseFixture):
def setUp(self):
super(TestResource, self).setUp()
self.user = models.User.query.filter_by(username=u"user1").first()
self.client = models.Client.query.filter_by(user=self.user).first()
self.create_fixtures()
def create_fixtures(self):
resource = models.Resource(name=u"resource", title=u"Resource", client=self.client)
db.session.add(resource)
db.session.commit()
def test_find_all(self):
resources = self.client.resources
self.assertEqual(len(resources), 2)
self.assertEqual(set([r.name for r in resources]), set([u'test_resource', u'resource']))
class TestClientTeamAccess(TestDatabaseFixture):
def setUp(self):
super(TestClientTeamAccess, self).setUp()
self.user = models.User.query.filter_by(username=u"user1").first()
self.client = models.Client.query.filter_by(user=self.user).first()
self.client.team_access = True
db.session.commit()
self.create_fixtures()
def create_fixtures(self):
self.org = models.Organization(title=u"test", name=u"Test")
self.org.owners.users.append(self.user)
db.session.add(self.org)
self.team = models.Team(userid=self.user.userid, title=u"developers", org=self.org)
db.session.add(self.team)
self.team_client_permission = models.TeamClientPermissions(team=self.team, client=self.client, access_permissions=u"admin")
db.session.add(self.team_client_permission)
self.client_team_access = models.ClientTeamAccess(org=self.org, client=self.client, access_level=models.CLIENT_TEAM_ACCESS.ALL)
db.session.add(self.client_team_access)
db.session.commit()
def test_find_all(self):
self.assertIs(self.client.org_team_access[0], self.client_team_access)
class TestPermission(TestDatabaseFixture):
def setUp(self):
super(TestPermission, self).setUp()
self.user = models.User.query.filter_by(username=u"user1").first()
self.create_fixtures()
def create_fixtures(self):
self.org = models.Organization(title=u"test", name=u"Test")
self.org.owners.users.append(self.user)
db.session.add(self.org)
self.permission = models.Permission(user=self.user, org=self.org, name=u"admin", title=u"admin", allusers=True)
db.session.add(self.permission)
db.session.commit()
| bsd-2-clause | -182,515,348,962,970,400 | 39.941176 | 135 | 0.680795 | false |
jelly/calibre | src/calibre/ebooks/metadata/book/serialize.py | 2 | 2011 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals
import base64
from calibre.constants import preferred_encoding
from calibre.ebooks.metadata.book import SERIALIZABLE_FIELDS
from calibre.ebooks.metadata.book.base import Metadata
from calibre.utils.imghdr import what
def ensure_unicode(obj, enc=preferred_encoding):
if isinstance(obj, unicode):
return obj
if isinstance(obj, bytes):
return obj.decode(enc, 'replace')
if isinstance(obj, (list, tuple)):
return [ensure_unicode(x) for x in obj]
if isinstance(obj, dict):
return {ensure_unicode(k): ensure_unicode(v) for k, v in obj.iteritems()}
return obj
def read_cover(mi):
if mi.cover_data and mi.cover_data[1]:
return mi
if mi.cover:
try:
with lopen(mi.cover, 'rb') as f:
cd = f.read()
mi.cover_data = what(None, cd), cd
except EnvironmentError:
pass
return mi
def metadata_as_dict(mi, encode_cover_data=False):
if hasattr(mi, 'to_book_metadata'):
mi = mi.to_book_metadata()
ans = {}
for field in SERIALIZABLE_FIELDS:
if field != 'cover' and not mi.is_null(field):
val = getattr(mi, field)
ans[field] = ensure_unicode(val)
if mi.cover_data and mi.cover_data[1]:
if encode_cover_data:
ans['cover_data'] = [mi.cover_data[0], base64.standard_b64encode(bytes(mi.cover_data[1]))]
else:
ans['cover_data'] = mi.cover_data
um = mi.get_all_user_metadata(False)
if um:
ans['user_metadata'] = um
return ans
def metadata_from_dict(src):
ans = Metadata('Unknown')
for key, value in src.iteritems():
if key == 'user_metadata':
ans.set_all_user_metadata(value)
else:
setattr(ans, key, value)
return ans
| gpl-3.0 | -1,328,223,682,139,849,500 | 29.469697 | 102 | 0.624565 | false |
avanov/solo | solo/server/runtime/dependencies.py | 1 | 1516 | import asyncio
from typing import Callable, Mapping, Any, get_type_hints, NamedTuple, Type, TypeVar, Awaitable, Tuple
from pyrsistent import pmap
from solo.configurator.registry import Registry
from solo.server.db.types import SQLEngine
from solo.server.request import Request
from solo.types import IO
from solo.vendor.old_session.old_session import Session, SessionStore
class Runtime(NamedTuple):
registry: Registry
dbengine: SQLEngine
memstore: Any
session_storage: SessionStore
def get_handler_deps(
runtime: Runtime,
handler: Callable,
request: Request,
) -> Tuple[Mapping[str, IO], Mapping[str, Any]]:
""" Returns a tuple of awaitable coroutine dependencies and rest dependencies.
"""
hints = get_type_hints(handler)
hints.pop('return', None)
iter_hints = (x for x in hints.items() if x[0] != 'return')
rv = {True: {}, False:{}}
for arg_name, dep_type in iter_hints:
dependency_getter = DEPENDENCIES[dep_type]
dep = dependency_getter(runtime)
if callable(dep):
dep = dep(request)
rv[asyncio.iscoroutine(dep)][arg_name] = dep
return pmap(rv[True]), pmap(rv[False])
T = TypeVar('T')
DEPENDENCIES: Mapping[Type[T], Callable[[Runtime], T]] = pmap({
Registry: lambda runtime: runtime.registry,
SQLEngine: lambda runtime: runtime.dbengine,
Session: lambda runtime: lambda request: runtime.session_storage.load_session(request),
SessionStore: lambda runtime: runtime.session_storage,
})
| mit | -574,166,168,978,581,570 | 31.255319 | 102 | 0.701847 | false |
mbrucher/ATK-plugins | Dynamics/ATKColoredExpander/update_version.py | 1 | 2105 | #!/usr/bin/python
# this script will update the versions in plist and installer files to match that in resource.h
import plistlib, os, datetime, fileinput, glob, sys, string
scriptpath = os.path.dirname(os.path.realpath(__file__))
def replacestrs(filename, s, r):
files = glob.glob(filename)
for line in fileinput.input(files,inplace=1):
line = line.replace(s, r)
sys.stdout.write(line)
def update_plist(plistpath, CFBundleGetInfoString, CFBundleVersion):
plist = plistlib.readPlist(plistpath)
plist['CFBundleGetInfoString'] = CFBundleGetInfoString
plist['CFBundleVersion'] = CFBundleVersion
plist['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(plist, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
def main():
FullVersionStr = ""
for line in fileinput.input(scriptpath + "/JuceLibraryCode/AppConfig.h",inplace=0):
if "#define JucePlugin_Version " in line:
FullVersionStr = line.lstrip("#define JucePlugin_Version ").strip()
today = datetime.date.today()
CFBundleGetInfoString = FullVersionStr + ", Copyright MatthieuBrucher, " + str(today.year)
CFBundleVersion = FullVersionStr
print("update_version.py - setting version to " + FullVersionStr)
print("Updating plist version info...")
import glob
for plistpath in glob.glob(scriptpath + "/Builds/MacOSX/*.plist"):
update_plist(plistpath, CFBundleGetInfoString, CFBundleVersion)
print("Updating Mac Installer version info...")
plistpath = scriptpath + "/installer/ATKColoredExpander.pkgproj"
installer = plistlib.readPlist(plistpath)
for x in installer['PACKAGES']:
x['PACKAGE_SETTINGS']['VERSION'] = FullVersionStr
plistlib.writePlist(installer, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
print("Updating Windows Installer version info...")
for line in fileinput.input(scriptpath + "/installer/ATKColoredExpander.iss",inplace=1):
if "AppVersion" in line:
line="AppVersion=" + FullVersionStr + "\n"
sys.stdout.write(line)
if __name__ == '__main__':
main()
| bsd-3-clause | -541,562,276,718,384,260 | 34.083333 | 95 | 0.71924 | false |
Azure/azure-sdk-for-python | sdk/edgegateway/azure-mgmt-edgegateway/azure/mgmt/edgegateway/models/periodic_timer_event_trigger.py | 1 | 2668 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .trigger import Trigger
class PeriodicTimerEventTrigger(Trigger):
"""Trigger details.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Constant filled by server.
:type kind: str
:param source_info: Required. Periodic timer details.
:type source_info: ~azure.mgmt.edgegateway.models.PeriodicTimerSourceInfo
:param sink_info: Required. Role Sink information.
:type sink_info: ~azure.mgmt.edgegateway.models.RoleSinkInfo
:param custom_context_tag: A custom context tag typically used to
correlate the trigger against its usage. For example, if a periodic timer
trigger is intended for certain specific IoT modules in the device, the
tag can be the name or the image URL of the module.
:type custom_context_tag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
'source_info': {'required': True},
'sink_info': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'source_info': {'key': 'properties.sourceInfo', 'type': 'PeriodicTimerSourceInfo'},
'sink_info': {'key': 'properties.sinkInfo', 'type': 'RoleSinkInfo'},
'custom_context_tag': {'key': 'properties.customContextTag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(PeriodicTimerEventTrigger, self).__init__(**kwargs)
self.source_info = kwargs.get('source_info', None)
self.sink_info = kwargs.get('sink_info', None)
self.custom_context_tag = kwargs.get('custom_context_tag', None)
self.kind = 'PeriodicTimerEvent'
| mit | -7,387,058,255,938,999,000 | 39.424242 | 91 | 0.610945 | false |
edx/credentials | credentials/apps/credentials/tests/test_views.py | 1 | 18099 | """
Tests for credentials rendering views.
"""
import uuid
from unittest.mock import PropertyMock, patch
import ddt
import responses
from django.template import Context, Template
from django.template.loader import select_template
from django.test import TestCase
from django.urls import reverse
from django.utils.text import slugify
from faker import Faker
from waffle.testutils import override_switch
from credentials.apps.catalog.data import OrganizationDetails, ProgramDetails
from credentials.apps.catalog.tests.factories import CourseFactory, CourseRunFactory, ProgramFactory
from credentials.apps.core.tests.factories import USER_PASSWORD, SiteConfigurationFactory, UserFactory
from credentials.apps.core.tests.mixins import SiteMixin
from credentials.apps.credentials.exceptions import MissingCertificateLogoError
from credentials.apps.credentials.models import ProgramCertificate, UserCredential
from credentials.apps.credentials.templatetags import i18n_assets
from credentials.apps.credentials.tests import factories
@ddt.ddt
class RenderCredentialViewTests(SiteMixin, TestCase):
faker = Faker()
MOCK_USER_DATA = {
"username": "test-user",
"name": "Test User",
"email": "[email protected]",
}
PROGRAM_NAME = "Fake PC"
PROGRAM_TYPE = "Professional Certificate"
CREDENTIAL_TITLE = "Fake Custom Credential Title"
def setUp(self):
super().setUp()
self.course = CourseFactory.create(site=self.site)
self.course_runs = CourseRunFactory.create_batch(2, course=self.course)
self.course_certificates = [
factories.CourseCertificateFactory.create(
course_id=course_run.key, site=self.site, certificate_available_date="1994-05-11T03:14:01Z"
)
for course_run in self.course_runs
]
self.program = ProgramFactory(title="TestProgram1", course_runs=self.course_runs, site=self.site)
self.program_certificate = factories.ProgramCertificateFactory(site=self.site, program_uuid=self.program.uuid)
self.program_certificate.program = self.program
self.program_certificate.save()
self.signatory_1 = factories.SignatoryFactory()
self.signatory_2 = factories.SignatoryFactory()
self.program_certificate.signatories.add(self.signatory_1, self.signatory_2)
self.user_credential = factories.UserCredentialFactory(
username=self.MOCK_USER_DATA["username"], credential=self.program_certificate
)
self.course_user_credentials = [
factories.UserCredentialFactory.create(
username=self.MOCK_USER_DATA["username"],
credential=course_cert,
)
for course_cert in self.course_certificates
]
self.visible_date_attr = factories.UserCredentialAttributeFactory(
user_credential=self.user_credential,
name="visible_date",
value="1970-01-01T01:01:01Z",
)
self.platform_name = self.site.siteconfiguration.platform_name
user = UserFactory(username=self.MOCK_USER_DATA["username"])
self.client.login(username=user.username, password=USER_PASSWORD)
def _render_user_credential(
self, use_proper_logo_url=True, user_credential=None, program_certificate=None, custom_orgs=None
):
"""Helper method to render a user certificate."""
user_credential = user_credential or self.user_credential
program_certificate = program_certificate or self.program_certificate
program_uuid = program_certificate.program_uuid
credential_title = program_certificate.title or self.PROGRAM_NAME
if custom_orgs:
organizations = custom_orgs
else:
organizations = [
self._create_organization_details(use_proper_logo_url),
self._create_organization_details(use_proper_logo_url),
]
mocked_program_data = ProgramDetails(
uuid=str(program_uuid),
title=self.PROGRAM_NAME,
type=self.PROGRAM_TYPE,
type_slug=slugify(self.PROGRAM_TYPE),
credential_title=credential_title,
course_count=2,
organizations=organizations,
hours_of_effort=self.faker.pyint(),
status="active",
)
with patch("credentials.apps.core.models.SiteConfiguration.get_user_api_data") as user_data, patch(
"credentials.apps.credentials.models.ProgramCertificate.program_details", new_callable=PropertyMock
) as mock_program_details:
user_data.return_value = self.MOCK_USER_DATA
mock_program_details.return_value = mocked_program_data
response = self.client.get(user_credential.get_absolute_url())
self.assertEqual(response.status_code, 200)
return response
def _create_organization_details(self, use_proper_logo_url=True):
"""Helper method to create organization details."""
return OrganizationDetails(
uuid=str(uuid.uuid4()),
key=self.faker.word(),
name=self.faker.word(),
display_name=self.faker.word(),
certificate_logo_image_url=self.faker.url() if use_proper_logo_url else None,
)
def assert_matching_template_origin(self, actual, expected_template_name):
expected = select_template([expected_template_name])
self.assertEqual(actual.origin, expected.origin)
@responses.activate
def test_sharing_bar_with_anonymous_user(self):
"""Verify that the view renders certificate without sharing bar."""
self.client.logout()
response = self._render_user_credential()
self.assertNotContains(response, "Print or share your certificate")
@responses.activate
def test_sharing_bar_with_staff_user(self):
"""Verify that the view renders certificate with sharing bar."""
self.client.logout()
staff_user = UserFactory(is_staff=True)
self.client.login(username=staff_user.username, password=USER_PASSWORD)
response = self._render_user_credential()
self.assertContains(response, "Print or share your certificate")
@responses.activate
def test_awarded_with_logged_in_user(self):
"""Verify that the view renders awarded certificates with sharing bar."""
response = self._render_user_credential()
response_context_data = response.context_data
self.assertContains(response, "Print or share your certificate")
self.assertContains(response=response, text=self.PROGRAM_NAME, count=2)
self.assertNotContains(response=response, text=self.CREDENTIAL_TITLE)
self.assertEqual(response_context_data["user_credential"], self.user_credential)
self.assertEqual(response_context_data["user_data"], self.MOCK_USER_DATA)
self.assertEqual(response_context_data["page_title"], self.PROGRAM_TYPE)
self.assertEqual(response_context_data["program_name"], self.PROGRAM_NAME)
actual_child_templates = response_context_data["child_templates"]
expected_credential_template = "openedx/credentials/programs/{}/certificate.html".format(
slugify(self.PROGRAM_TYPE)
)
self.assert_matching_template_origin(actual_child_templates["credential"], expected_credential_template)
self.assert_matching_template_origin(actual_child_templates["footer"], "_footer.html")
self.assert_matching_template_origin(actual_child_templates["header"], "_header.html")
@responses.activate
def test_awarded_with_custom_title(self):
"""Verify that the view renders a custom credential title if one is provided."""
self.program_certificate.title = self.CREDENTIAL_TITLE
self.program_certificate.save()
response = self._render_user_credential()
self.assertContains(response, "Print or share your certificate")
self.assertNotContains(response=response, text=self.PROGRAM_NAME)
self.assertContains(response=response, text=self.CREDENTIAL_TITLE, count=2)
def test_revoked(self):
"""Verify that the view returns 404 when the uuid is valid but certificate status
is 'revoked'.
"""
self.user_credential.status = UserCredential.REVOKED
self.user_credential.save()
response = self.client.get(self.user_credential.get_absolute_url())
self.assertEqual(response.status_code, 404)
def test_invalid_uuid(self):
"""Verify that view returns 404 with invalid uuid."""
path = reverse("credentials:render", kwargs={"uuid": uuid.uuid4().hex})
response = self.client.get(path)
self.assertEqual(response.status_code, 404)
@responses.activate
def test_invalid_site(self):
"""Verify that the view returns a 404 if user_credentials are displayed on a site
they are not associated with.
"""
domain = "unused.testsite"
site_configuration = SiteConfigurationFactory(
site__domain=domain,
)
test_site = site_configuration.site
test_program_certificate = factories.ProgramCertificateFactory(site=test_site)
test_signatory_1 = factories.SignatoryFactory()
test_signatory_2 = factories.SignatoryFactory()
test_program_certificate.signatories.add(test_signatory_1, test_signatory_2)
test_user_credential = factories.UserCredentialFactory(
username=self.MOCK_USER_DATA["username"], credential=test_program_certificate
)
response = self.client.get(test_user_credential.get_absolute_url())
self.assertEqual(response.status_code, 404)
# Change the program certificate site to the client's site and check that the
# response returns the user's certificate.
test_program_certificate.site = self.site
test_program_certificate.save()
response = self._render_user_credential(
user_credential=test_user_credential, program_certificate=test_program_certificate
)
self.assertEqual(response.status_code, 200)
def test_invalid_credential(self):
"""Verify the view returns 404 for attempts to render unsupported credentials."""
self.user_credential = factories.UserCredentialFactory(credential=factories.CourseCertificateFactory())
response = self.client.get(self.user_credential.get_absolute_url())
self.assertEqual(response.status_code, 404)
# These four tests should be removed in MICROBA-1198 in favor of the next
# three tests.
def test_future_visible_date(self):
"""Verify that the view returns 404 when the uuid is valid but certificate is not yet visible."""
self.visible_date_attr.value = "9999-01-01T01:01:01Z"
self.visible_date_attr.save()
response = self.client.get(self.user_credential.get_absolute_url())
self.assertEqual(response.status_code, 404)
# (This test is not replicated below because the certificate_available_date
# field has validation that will prevent non-valid date data.)
@responses.activate
def test_invalid_visible_date(self):
"""Verify that the view just returns normally when the valid_date attribute can't be understood."""
self.visible_date_attr.value = "hello"
self.visible_date_attr.save()
self._render_user_credential() # Will raise exception if not 200 status
@responses.activate
def test_no_visible_date(self):
"""Verify that the view just returns normally when there isn't a valid_date attribute."""
self.visible_date_attr.delete()
self._render_user_credential() # Will raise exception if not 200 status
@responses.activate
def test_visible_date_as_issue_date(self):
"""Verify that the view renders the visible_date as the issue date."""
response = self._render_user_credential()
self.assertContains(response, "Issued January 1970")
# The following three tests are the same as the previous four, but with the
# USE_CERTIFICATE_AVAILABLE_DATE waffle switch enabled. Clean up previous
# tests in MICROBA-1198.
@override_switch("credentials.use_certificate_available_date", True)
def test_future_certificate_available_date(self):
"""Verify that the view returns 404 when the uuid is valid but certificate is not yet visible."""
self.course_certificates[0].certificate_available_date = "9999-05-11T03:14:01Z"
self.course_certificates[0].save()
response = self.client.get(self.user_credential.get_absolute_url())
self.assertEqual(response.status_code, 404)
@override_switch("credentials.use_certificate_available_date", active=True)
@responses.activate
def test_no_certificate_available_date(self):
"""Verify that the view just returns normally when there isn't a valid_date attribute."""
self.course_certificates[0].certificate_available_date = None
self.course_certificates[0].save()
self._render_user_credential() # Will raise exception if not 200 status
@override_switch("credentials.use_certificate_available_date", active=True)
@responses.activate
def test_visible_certificate_available_date(self):
"""Verify that the view renders the visible_date as the issue date."""
response = self._render_user_credential()
self.assertContains(response, "Issued May 1994")
@responses.activate
def test_signatory_organization_name_override(self):
"""Verify that the view response contain signatory organization name if signatory have organization."""
self.signatory_1.organization_name_override = self.faker.word()
self.signatory_1.save()
response = self._render_user_credential()
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.signatory_1.organization_name_override)
self.assertNotContains(response, self.signatory_2.organization_name_override)
@responses.activate
def test_logo_missing_exception(self):
with self.assertRaisesMessage(MissingCertificateLogoError, "No certificate image logo defined for program"):
self._render_user_credential(use_proper_logo_url=False)
@ddt.data((True, 'lang="es-419"'), (False, 'lang="en"'))
@ddt.unpack
@responses.activate
def test_render_language(self, language_set, expected_text):
"""
Verify that the view renders certificates in the configured language when it has been set,
and in the default language (English) when content_language has not been set.
"""
if language_set:
ProgramCertificate.objects.update_or_create(
program_uuid=self.program_certificate.program_uuid, defaults={"language": "es_419"}
)
response = self._render_user_credential()
self.assertContains(response, expected_text)
@ddt.data(1, 2, 3)
@responses.activate
def test_render_multiple_orgs(self, number_of_orgs):
"""
Verify that the view renders certificates correctly with one, two, or
three organizations.
"""
orgs = [self._create_organization_details() for n in range(number_of_orgs)]
response = self._render_user_credential(custom_orgs=orgs)
if number_of_orgs == 1:
self.assertEqual(response.context_data["org_name_string"], orgs[0].display_name)
elif number_of_orgs == 2:
self.assertEqual(
response.context_data["org_name_string"], "{} and {}".format(orgs[0].display_name, orgs[1].display_name)
)
elif number_of_orgs == 3:
self.assertEqual(
response.context_data["org_name_string"],
"{}, {}, and {}".format(orgs[0].display_name, orgs[1].display_name, orgs[2].display_name),
)
@ddt.ddt
class ExampleCredentialTests(SiteMixin, TestCase):
def test_get(self):
"""Verify the view renders a credential."""
response = self.client.get(reverse("credentials:example"))
self.assertEqual(response.status_code, 200)
response = self.client.get("{}?program_type=professional-certificate".format(reverse("credentials:example")))
self.assertEqual(response.status_code, 200)
class I18nAssetsTemplateTagTest(TestCase):
def test_construct_file_language_names(self):
"""Verify that the method for constructing file paths properly creates the set"""
filepath = "some/test/path.svg"
# Verify that for two different, full language codes all paths are generated, including the 2 characters ones
language = "es-419"
default = "en-US"
paths = i18n_assets.construct_file_language_names(filepath, language, default)
self.assertEqual(
paths,
[
"some/test/path-es-419.svg",
"some/test/path-es.svg",
"some/test/path-en-US.svg",
"some/test/path-en.svg",
"some/test/path.svg",
],
)
# Verify that for two identical, 2 character language codes, only that path and the default is generated
language = "en"
default = "en"
paths = i18n_assets.construct_file_language_names(filepath, language, default)
self.assertEqual(
paths,
[
"some/test/path-en.svg",
"some/test/path.svg",
],
)
def test_translate_file_path_filter(self):
"""Verify that the filter correctly filters an image"""
context = Context({})
template_to_render = Template(
"{% load i18n_assets %}" '{{ "openedx/images/example-logo.svg" | translate_file_path}}'
)
rendered_template = template_to_render.render(context)
# Make sure the translated string occurs in the template
self.assertEqual(rendered_template.find("openedx/images/example-logo-en.svg"), 0)
| agpl-3.0 | 3,346,619,357,067,773,000 | 45.170918 | 120 | 0.674623 | false |
beagles/sosreport-neutron | sos/sosreport.py | 1 | 40990 | """
Gather information about a system and report it using plugins
supplied for application-specific information
"""
## sosreport.py
## gather information about a system and report it
## Copyright (C) 2006 Steve Conklin <[email protected]>
### This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# pylint: disable-msg = W0611
# pylint: disable-msg = W0702
# pylint: disable-msg = R0912
# pylint: disable-msg = R0914
# pylint: disable-msg = R0915
# pylint: disable-msg = R0913
# pylint: disable-msg = E0611
# pylint: disable-msg = E1101
# pylint: disable-msg = R0904
# pylint: disable-msg = R0903
import sys
import traceback
import os
import logging
from optparse import OptionParser, Option
import ConfigParser
from sos.plugins import import_plugin
from sos.utilities import ImporterHelper
from stat import ST_UID, ST_GID, ST_MODE, ST_CTIME, ST_ATIME, ST_MTIME, S_IMODE
from time import strftime, localtime
from collections import deque
from itertools import izip
import textwrap
import tempfile
from sos import _sos as _
from sos import __version__
import sos.policies
from sos.archive import TarFileArchive, ZipFileArchive
from sos.reporting import Report, Section, Command, CopiedFile, CreatedFile, Alert, Note, PlainTextReport
class TempFileUtil(object):
def __init__(self, tmp_dir):
self.tmp_dir = tmp_dir
self.files = []
def new(self):
fd, fname = tempfile.mkstemp(dir=self.tmp_dir)
fobj = open(fname, 'w')
self.files.append((fname, fobj))
return fobj
def clean(self):
for fname, f in self.files:
try:
f.flush()
f.close()
except Exception, e:
pass
try:
os.unlink(fname)
except Exception, e:
pass
self.files = []
class OptionParserExtended(OptionParser):
""" Show examples """
def print_help(self, out=sys.stdout):
""" Prints help content including examples """
OptionParser.print_help(self, out)
print
print "Some examples:"
print
print " enable cluster plugin only and collect dlm lockdumps:"
print " # sosreport -o cluster -k cluster.lockdump"
print
print " disable memory and samba plugins, turn off rpm -Va collection:"
print " # sosreport -n memory,samba -k rpm.rpmva=off"
print
class SosOption(Option):
"""Allow to specify comma delimited list of plugins"""
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
""" Performs list extension on plugins """
if action == "extend":
try:
lvalue = value.split(",")
except:
pass
else:
values.ensure_value(dest, deque()).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
class XmlReport(object):
""" Report build class """
def __init__(self):
try:
import libxml2
except ImportError:
self.enabled = False
return
else:
self.enabled = False
return
self.doc = libxml2.newDoc("1.0")
self.root = self.doc.newChild(None, "sos", None)
self.commands = self.root.newChild(None, "commands", None)
self.files = self.root.newChild(None, "files", None)
def add_command(self, cmdline, exitcode, stdout = None, stderr = None,
f_stdout=None, f_stderr=None, runtime=None):
""" Appends command run into report """
if not self.enabled:
return
cmd = self.commands.newChild(None, "cmd", None)
cmd.setNsProp(None, "cmdline", cmdline)
cmdchild = cmd.newChild(None, "exitcode", str(exitcode))
if runtime:
cmd.newChild(None, "runtime", str(runtime))
if stdout or f_stdout:
cmdchild = cmd.newChild(None, "stdout", stdout)
if f_stdout:
cmdchild.setNsProp(None, "file", f_stdout)
if stderr or f_stderr:
cmdchild = cmd.newChild(None, "stderr", stderr)
if f_stderr:
cmdchild.setNsProp(None, "file", f_stderr)
def add_file(self, fname, stats):
""" Appends file(s) added to report """
if not self.enabled:
return
cfile = self.files.newChild(None, "file", None)
cfile.setNsProp(None, "fname", fname)
cchild = cfile.newChild(None, "uid", str(stats[ST_UID]))
cchild = cfile.newChild(None, "gid", str(stats[ST_GID]))
cfile.newChild(None, "mode", str(oct(S_IMODE(stats[ST_MODE]))))
cchild = cfile.newChild(None, "ctime", strftime('%a %b %d %H:%M:%S %Y',
localtime(stats[ST_CTIME])))
cchild.setNsProp(None, "tstamp", str(stats[ST_CTIME]))
cchild = cfile.newChild(None, "atime", strftime('%a %b %d %H:%M:%S %Y',
localtime(stats[ST_ATIME])))
cchild.setNsProp(None, "tstamp", str(stats[ST_ATIME]))
cchild = cfile.newChild(None, "mtime", strftime('%a %b %d %H:%M:%S %Y',
localtime(stats[ST_MTIME])))
cchild.setNsProp(None, "tstamp", str(stats[ST_MTIME]))
def serialize(self):
""" Serializes xml """
if not self.enabled:
return
self.ui_log.info(self.doc.serialize(None, 1))
def serialize_to_file(self, fname):
""" Serializes to file """
if not self.enabled:
return
outf = tempfile.NamedTemporaryFile()
outf.write(self.doc.serialize(None, 1))
outf.flush()
self.archive.add_file(outf.name, dest=fname)
outf.close()
class SoSOptions(object):
_list_plugins = False
_noplugins = []
_enableplugins = []
_onlyplugins = []
_plugopts = []
_usealloptions = False
_upload = False
_batch = False
_verbosity = 0
_quiet = False
_debug = False
_ticket_number = ""
_customer_name = ""
_config_file = ""
_tmp_dir = ""
_report = False
_profiler = False
_compression_type = 'auto'
_options = None
def __init__(self, args=None):
if args:
self._options = self._parse_args(args)
else:
self._options = None
def _check_options_initialized(self):
if self._options != None:
raise ValueError("SoSOptions object already initialized "
+ "from command line")
@property
def list_plugins(self):
if self._options != None:
return self._options.list_plugins
return self._list_plugins
@list_plugins.setter
def list_plugins(self, value):
self._check_options_initialized()
if not isinstance(value, bool):
raise TypeError("SoSOptions.list_plugins expects a boolean")
self._list_plugins = value
@property
def noplugins(self):
if self._options != None:
return self._options.noplugins
return self._noplugins
@noplugins.setter
def noplugins(self, value):
self._check_options_initialized()
self._noplugins = value
@property
def enableplugins(self):
if self._options != None:
return self._options.enableplugins
return self._enableplugins
@enableplugins.setter
def enableplugins(self):
self._check_options_initialized()
self._enableplugins = value
@property
def onlyplugins(self):
if self._options != None:
return self._options.onlyplugins
return self._onlyplugins
@onlyplugins.setter
def onlyplugins(self, value):
self._check_options_initialized()
self._onlyplugins = value
@property
def plugopts(self):
if self._options != None:
return self._options.plugopts
return self._plugopts
@plugopts.setter
def plugopts(self, value):
# If we check for anything it should be itterability.
#if not isinstance(value, list):
# raise TypeError("SoSOptions.plugopts expects a list")
self._plugopts = value
@property
def usealloptions(self):
if self._options != None:
return _options.usealloptions
return self._usealloptions
@usealloptions.setter
def usealloptions(self, value):
self._check_options_initialized()
if not isinsance(value, bool):
raise TypeError("SoSOptions.usealloptions expects a boolean")
self._usealloptions = value
@property
def upload(self):
if self._options != None:
return self._options.upload
return self._upload
@upload.setter
def upload(self, value):
self._check_options_initialized()
if not isinstance(value, bool):
raise TypeError("SoSOptions.upload expects a boolean")
self._upload = value
@property
def batch(self):
if self._options != None:
return self._options.batch
return self._batch
@batch.setter
def batch(self, value):
self._check_options_initialized()
if not isinstance(value, bool):
raise TypeError("SoSOptions.batch expects a boolean")
self._batch = value
@property
def verbosity(self):
if self._options != None:
return self._options.verbosity
return self._verbosity
@verbosity.setter
def verbosity(self, value):
self._check_options_initialized()
if value < 0 or value > 3:
raise ValueError("SoSOptions.verbosity expects a value [0..3]")
self._verbosity = value
@property
def quiet(self):
if self._options != None:
return self._options.quiet
return self._quiet
@quiet.setter
def quiet(self, value):
self._check_options_initialized()
if not isinstance(value, bool):
raise TypeError("SoSOptions.quiet expects a boolean")
self._quiet = value
@property
def debug(self):
if self._options != None:
return self._options.debug
return self._debug
@debug.setter
def debug(self, value):
self._check_options_initialized()
if not isinstance(value, bool):
raise TypeError("SoSOptions.debug expects a boolean")
self._debug = value
@property
def ticket_number(self):
if self._options != None:
return self._options.ticket_number
return self._ticket_number
@ticket_number.setter
def ticket_number(self, value):
self._check_options_initialized()
self._ticket_number = value
@property
def customer_name(self):
if self._options != None:
return self._options.customer_name
return self._customer_name
@customer_name.setter
def customer_name(self, value):
self._check_options_initialized()
self._customer_name = value
@property
def config_file(self):
if self._options != None:
return self._options.config_file
return self._config_file
@config_file.setter
def config_file(self, value):
self._check_options_initialized()
self._config_file = value
@property
def tmp_dir(self):
if self._options != None:
return self._options.tmp_dir
return self._tmp_dir
@tmp_dir.setter
def tmp_dir(self, value):
self._check_options_initialized()
self._tmp_dir = value
@property
def report(self):
if self._options != None:
return self._options.report
return self._report
@report.setter
def report(self, value):
self._check_options_initialized()
if not isinstance(value, bool):
raise TypeError("SoSOptions.report expects a boolean")
self._report = value
@property
def profiler(self):
if self._options != None:
return self._options.profiler
return self._profiler
@profiler.setter
def profiler(self, value):
self._check_options_initialized()
if not isinstance(value, bool):
raise TypeError("SoSOptions.profiler expects a boolean")
self._profiler = value
@property
def compression_type(self):
if self._options != None:
return self._options.compression_type
return self._compression_type
@compression_type.setter
def compression_type(self, value):
self._check_options_initialized()
self._compression_type = value
def _parse_args(self, args):
""" Parse command line options and arguments"""
self.parser = parser = OptionParserExtended(option_class=SosOption)
parser.add_option("-l", "--list-plugins", action="store_true",
dest="list_plugins", default=False,
help="list plugins and available plugin options")
parser.add_option("-n", "--skip-plugins", action="extend",
dest="noplugins", type="string",
help="disable these plugins", default = deque())
parser.add_option("-e", "--enable-plugins", action="extend",
dest="enableplugins", type="string",
help="enable these plugins", default = deque())
parser.add_option("-o", "--only-plugins", action="extend",
dest="onlyplugins", type="string",
help="enable these plugins only", default = deque())
parser.add_option("-k", "--plugin-option", action="append",
dest="plugopts", type="string",
help="plugin options in plugname.option=value format (see -l)")
parser.add_option("-a", "--alloptions", action="store_true",
dest="usealloptions", default=False,
help="enable all options for loaded plugins")
parser.add_option("-u", "--upload", action="store",
dest="upload", default=False,
help="upload the report to an ftp server")
parser.add_option("--batch", action="store_true",
dest="batch", default=False,
help="batch mode - do not prompt interactively")
parser.add_option("-v", "--verbose", action="count",
dest="verbosity",
help="increase verbosity")
parser.add_option("", "--quiet", action="store_true",
dest="quiet", default=False,
help="only print fatal errors")
parser.add_option("--debug", action="count",
dest="debug",
help="enable interactive debugging using the python debugger")
parser.add_option("--ticket-number", action="store",
dest="ticket_number",
help="specify ticket number")
parser.add_option("--name", action="store",
dest="customer_name",
help="specify report name")
parser.add_option("--config-file", action="store",
dest="config_file",
help="specify alternate configuration file")
parser.add_option("--tmp-dir", action="store",
dest="tmp_dir",
help="specify alternate temporary directory", default=None)
parser.add_option("--report", action="store_true",
dest="report",
help="Enable HTML/XML reporting", default=False)
parser.add_option("--profile", action="store_true",
dest="profiler",
help="turn on profiling", default=False)
parser.add_option("-z", "--compression-type", dest="compression_type",
help="compression technology to use [auto, zip, gzip, bzip2, xz] (default=auto)",
default="auto")
return parser.parse_args(args)[0]
class SoSReport(object):
def __init__(self, args):
self.loaded_plugins = deque()
self.skipped_plugins = deque()
self.all_options = deque()
self.xml_report = XmlReport()
self.global_plugin_options = {}
self.archive = None
self.tempfile_util = None
try:
import signal
signal.signal(signal.SIGTERM, self.get_exit_handler())
except Exception:
pass # not available in java, but we don't care
#self.opts = self.parse_options(args)[0]
self.opts = SoSOptions(args)
self._set_debug()
self._read_config()
self.policy = sos.policies.load()
self._is_root = self.policy.is_root()
self.tmpdir = self.policy.get_tmp_dir(self.opts.tmp_dir)
self.tempfile_util = TempFileUtil(self.tmpdir)
self._set_directories()
def print_header(self):
self.ui_log.info("\n%s\n" % _("sosreport (version %s)" % (__version__,)))
def get_commons(self):
return {
'cmddir': self.cmddir,
'logdir': self.logdir,
'rptdir': self.rptdir,
'tmpdir': self.tmpdir,
'soslog': self.soslog,
'proflog' : self.proflog,
'policy': self.policy,
'verbosity': self.opts.verbosity,
'xmlreport': self.xml_report,
'cmdlineopts': self.opts,
'config': self.config,
'global_plugin_options': self.global_plugin_options,
}
def get_temp_file(self):
return self.tempfile_util.new()
def _set_archive(self):
if self.opts.compression_type not in ('auto', 'zip', 'bzip2', 'gzip', 'xz'):
raise Exception("Invalid compression type specified. Options are:" +
"auto, zip, bzip2, gzip and xz")
archive_name = os.path.join(self.tmpdir,self.policy.get_archive_name())
if self.opts.compression_type == 'auto':
auto_archive = self.policy.preferred_archive_name()
self.archive = auto_archive(archive_name, self.tmpdir)
elif self.opts.compression_type == 'zip':
self.archive = ZipFileArchive(archive_name, self.tmpdir)
else:
self.archive = TarFileArchive(archive_name, self.tmpdir)
def _make_archive_paths(self):
self.archive.makedirs(self.cmddir, 0755)
self.archive.makedirs(self.logdir, 0755)
self.archive.makedirs(self.rptdir, 0755)
def _set_directories(self):
self.cmddir = 'sos_commands'
self.logdir = 'sos_logs'
self.rptdir = 'sos_reports'
def _set_debug(self):
if self.opts.debug:
sys.excepthook = self._exception
self.raise_plugins = True
else:
self.raise_plugins = False
@staticmethod
def _exception(etype, eval_, etrace):
""" Wrap exception in debugger if not in tty """
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(etype, eval_, etrace)
else:
import traceback, pdb
# we are NOT in interactive mode, print the exception...
traceback.print_exception(etype, eval_, etrace, limit=2, file=sys.stdout)
print
# ...then start the debugger in post-mortem mode.
pdb.pm()
def _exit(self, error=0):
raise SystemExit()
# sys.exit(error)
def get_exit_handler(self):
def exit_handler(signum, frame):
self._exit()
return exit_handler
def _read_config(self):
self.config = ConfigParser.ConfigParser()
if self.opts.config_file:
config_file = self.opts.config_file
else:
config_file = '/etc/sos.conf'
try:
self.config.readfp(open(config_file))
except IOError:
pass
def _setup_logging(self):
if not sys.stdin.isatty():
self.opts.batch = True
# main soslog
self.soslog = logging.getLogger('sos')
self.soslog.setLevel(logging.DEBUG)
self.sos_log_file = self.get_temp_file()
self.sos_log_file.close()
flog = logging.FileHandler(self.sos_log_file.name)
flog.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
flog.setLevel(logging.INFO)
self.soslog.addHandler(flog)
if not self.opts.quiet:
console = logging.StreamHandler(sys.stderr)
console.setFormatter(logging.Formatter('%(message)s'))
if self.opts.verbosity > 1:
console.setLevel(logging.DEBUG)
flog.setLevel(logging.DEBUG)
elif self.opts.verbosity > 0:
console.setLevel(logging.INFO)
else:
console.setLevel(logging.ERROR)
self.soslog.addHandler(console)
# ui log
self.ui_log = logging.getLogger('sos_ui')
self.ui_log.setLevel(logging.INFO)
self.sos_ui_log_file = self.get_temp_file()
self.sos_ui_log_file.close()
ui_fhandler = logging.FileHandler(self.sos_ui_log_file.name)
ui_fhandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
self.ui_log.addHandler(ui_fhandler)
if not self.opts.quiet:
ui_console = logging.StreamHandler(sys.stdout)
ui_console.setFormatter(logging.Formatter('%(message)s'))
ui_console.setLevel(logging.INFO)
self.ui_log.addHandler(ui_console)
# profile logging
if self.opts.profiler:
self.proflog = logging.getLogger('sosprofile')
self.proflog.setLevel(logging.DEBUG)
self.sos_profile_log_file = self.get_temp_file()
plog = logging.FileHandler(self.sos_profile_log_file.name)
plog.setFormatter(logging.Formatter('%(message)s'))
plog.setLevel(logging.DEBUG)
self.proflog.addHandler(plog)
else:
self.proflog = logging.getLogger('sosprofile')
self.proflog.setLevel(logging.FATAL)
def _finish_logging(self):
logging.shutdown()
# the logging module seems to persist in the jython/jboss/eap world
# so the handlers need to be removed
for logger in [logging.getLogger(x) for x in ('sos', 'sosprofile', 'sos_ui')]:
for h in logger.handlers:
logger.removeHandler(h)
if getattr(self, "sos_log_file", None):
self.archive.add_file(self.sos_log_file.name, dest=os.path.join('sos_logs', 'sos.log'))
if getattr(self, "sos_profile_log_file", None):
self.archive.add_file(self.sos_profile_log_file.name, dest=os.path.join('sos_logs', 'profile.log'))
if getattr(self, "sos_ui_log_file", None):
self.archive.add_file(self.sos_ui_log_file.name, dest=os.path.join('sos_logs', 'ui.log'))
def _get_disabled_plugins(self):
disabled = []
if self.config.has_option("plugins", "disable"):
disabled = [plugin.strip() for plugin in
self.config.get("plugins", "disable").split(',')]
return disabled
def _is_skipped(self, plugin_name):
return (plugin_name in self.opts.noplugins or
plugin_name in self._get_disabled_plugins())
def _is_inactive(self, plugin_name, pluginClass):
return (not pluginClass(self.get_commons()).check_enabled() and
not plugin_name in self.opts.enableplugins and
not plugin_name in self.opts.onlyplugins)
def _is_not_default(self, plugin_name, pluginClass):
return (not pluginClass(self.get_commons()).default_enabled() and
not plugin_name in self.opts.enableplugins and
not plugin_name in self.opts.onlyplugins)
def _is_not_specified(self, plugin_name):
return (self.opts.onlyplugins and
not plugin_name in self.opts.onlyplugins)
def _skip(self, plugin_class, reason="unknown"):
self.skipped_plugins.append((
plugin_class.name(),
plugin_class(self.get_commons()),
reason
))
def _load(self, plugin_class):
self.loaded_plugins.append((
plugin_class.name(),
plugin_class(self.get_commons())
))
def load_plugins(self):
import sos.plugins
helper = ImporterHelper(sos.plugins)
plugins = helper.get_modules()
self.plugin_names = deque()
# validate and load plugins
for plug in plugins:
plugbase, ext = os.path.splitext(plug)
try:
plugin_classes = import_plugin(plugbase,
tuple(self.policy.valid_subclasses))
for plugin_class in plugin_classes:
if not self.policy.validatePlugin(plugin_class):
self.soslog.debug(_("plugin %s does not validate, skipping") % plug)
if self.opts.verbosity > 0:
self._skip(plugin_class, _("does not validate"))
continue
if plugin_class.requires_root and not self._is_root:
self.soslog.debug(_("plugin %s requires root permissions to execute, skipping") % plug)
self._skip(plugin_class, _("requires root"))
continue
# plug-in is valid, let's decide whether run it or not
self.plugin_names.append(plugbase)
if self._is_skipped(plugbase):
self._skip(plugin_class, _("skipped"))
continue
if self._is_inactive(plugbase, plugin_class):
self._skip(plugin_class, _("inactive"))
continue
if self._is_not_default(plugbase, plugin_class):
self._skip(plugin_class, _("not default"))
continue
if self._is_not_specified(plugbase):
self._skip(plugin_class, _("not specified"))
continue
self._load(plugin_class)
except Exception, e:
self.soslog.warning(_("plugin %s does not install, skipping: %s") % (plug, e))
if self.raise_plugins:
raise
def _set_all_options(self):
if self.opts.usealloptions:
for plugname, plug in self.loaded_plugins:
for name, parms in zip(plug.opt_names, plug.opt_parms):
if type(parms["enabled"])==bool:
parms["enabled"] = True
def _set_tunables(self):
if self.config.has_section("tunables"):
if not self.opts.plugopts:
self.opts.plugopts = deque()
for opt, val in self.config.items("tunables"):
if not opt.split('.')[0] in self.disabled:
self.opts.plugopts.append(opt + "=" + val)
if self.opts.plugopts:
opts = {}
for opt in self.opts.plugopts:
# split up "general.syslogsize=5"
try:
opt, val = opt.split("=")
except:
val = True
else:
if val.lower() in ["off", "disable", "disabled", "false"]:
val = False
else:
# try to convert string "val" to int()
try:
val = int(val)
except:
pass
# split up "general.syslogsize"
try:
plug, opt = opt.split(".")
except:
plug = opt
opt = True
try:
opts[plug]
except KeyError:
opts[plug] = deque()
opts[plug].append( (opt, val) )
for plugname, plug in self.loaded_plugins:
if plugname in opts:
for opt, val in opts[plugname]:
if not plug.set_option(opt, val):
self.soslog.error('no such option "%s" for plugin '
'(%s)' % (opt,plugname))
self._exit(1)
del opts[plugname]
for plugname in opts.keys():
self.soslog.error('unable to set option for disabled or non-existing '
'plugin (%s)' % (plugname))
def _check_for_unknown_plugins(self):
import itertools
for plugin in itertools.chain(self.opts.onlyplugins,
self.opts.noplugins,
self.opts.enableplugins):
plugin_name = plugin.split(".")[0]
if not plugin_name in self.plugin_names:
self.soslog.fatal('a non-existing plugin (%s) was specified in the '
'command line' % (plugin_name))
self._exit(1)
def _set_plugin_options(self):
for plugin_name, plugin in self.loaded_plugins:
names, parms = plugin.get_all_options()
for optname, optparm in zip(names, parms):
self.all_options.append((plugin, plugin_name, optname, optparm))
def list_plugins(self):
if not self.loaded_plugins and not self.skipped_plugins:
self.soslog.fatal(_("no valid plugins found"))
return
if self.loaded_plugins:
self.ui_log.info(_("The following plugins are currently enabled:"))
self.ui_log.info("")
for (plugname, plug) in self.loaded_plugins:
self.ui_log.info(" %-15s %s" % (plugname, plug.get_description()))
else:
self.ui_log.info(_("No plugin enabled."))
self.ui_log.info("")
if self.skipped_plugins:
self.ui_log.info(_("The following plugins are currently disabled:"))
self.ui_log.info("")
for (plugname, plugclass, reason) in self.skipped_plugins:
self.ui_log.info(" %-15s %-14s %s" % (plugname,
reason,
plugclass.get_description()))
self.ui_log.info("")
if self.all_options:
self.ui_log.info(_("The following plugin options are available:"))
self.ui_log.info("")
for (plug, plugname, optname, optparm) in self.all_options:
# format option value based on its type (int or bool)
if type(optparm["enabled"]) == bool:
if optparm["enabled"] == True:
tmpopt = "on"
else:
tmpopt = "off"
else:
tmpopt = optparm["enabled"]
self.ui_log.info(" %-25s %-15s %s" % (
plugname + "." + optname, tmpopt, optparm["desc"]))
else:
self.ui_log.info(_("No plugin options available."))
self.ui_log.info("")
def batch(self):
if self.opts.batch:
self.ui_log.info(self.policy.get_msg())
else:
msg = self.policy.get_msg()
msg += _("Press ENTER to continue, or CTRL-C to quit.\n")
try:
raw_input(msg)
except:
self.ui_log.info("")
self._exit()
def _log_plugin_exception(self, plugin_name):
self.soslog.error("%s\n%s" % (plugin_name, traceback.format_exc()))
def prework(self):
try:
self.policy.pre_work()
self._set_archive()
self._make_archive_paths()
except Exception, e:
import traceback
traceback.print_exc(e)
self.ui_log.info(e)
self._exit(0)
def setup(self):
for plugname, plug in self.loaded_plugins:
try:
plug.archive = self.archive
plug.setup()
except KeyboardInterrupt:
raise
except:
if self.raise_plugins:
raise
else:
self._log_plugin_exception(plugname)
def version(self):
"""Fetch version information from all plugins and store in the report
version file"""
versions = []
versions.append("sosreport: %s" % __version__)
for plugname, plug in self.loaded_plugins:
versions.append("%s: %s" % (plugname, plug.version))
self.archive.add_string(content="\n".join(versions), dest='version.txt')
def collect(self):
self.ui_log.info(_(" Running plugins. Please wait ..."))
self.ui_log.info("")
plugruncount = 0
for i in izip(self.loaded_plugins):
plugruncount += 1
plugname, plug = i[0]
if not self.opts.quiet:
sys.stdout.write("\r Running %d/%d: %s... " % (plugruncount, len(self.loaded_plugins), plugname))
sys.stdout.flush()
try:
plug.collect()
except KeyboardInterrupt:
raise
except:
if self.raise_plugins:
raise
else:
self._log_plugin_exception(plugname)
self.ui_log.info("")
def report(self):
for plugname, plug in self.loaded_plugins:
for oneFile in plug.copied_files:
try:
self.xml_report.add_file(oneFile["srcpath"], os.stat(oneFile["srcpath"]))
except:
pass
self.xml_report.serialize_to_file(
os.path.join(self.rptdir, "sosreport.xml"))
def plain_report(self):
report = Report()
for plugname, plug in self.loaded_plugins:
section = Section(name=plugname)
for alert in plug.alerts:
section.add(Alert(alert))
if plug.custom_text:
section.add(Note(plug.custom_text))
for f in plug.copied_files:
section.add(CopiedFile(name=f['srcpath'],
href= ".." + f['dstpath']))
for cmd in plug.executed_commands:
section.add(Command(name=cmd['exe'], return_code=0,
href="../" + cmd['file']))
for content, f in plug.copy_strings:
section.add(CreatedFile(name=f))
report.add(section)
fd = self.get_temp_file()
fd.write(str(PlainTextReport(report)))
fd.flush()
self.archive.add_file(fd.name, dest=os.path.join('sos_reports', 'sos.txt'))
def html_report(self):
# Generate the header for the html output file
rfd = self.get_temp_file()
rfd.write("""
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<link rel="stylesheet" type="text/css" media="screen" href="donot.css" />
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Sos System Report</title>
</head>
<body>
""")
# Make a pass to gather Alerts and a list of module names
allAlerts = deque()
plugNames = deque()
for plugname, plug in self.loaded_plugins:
for alert in plug.alerts:
allAlerts.append('<a href="#%s">%s</a>: %s' % (plugname, plugname,
alert))
plugNames.append(plugname)
# Create a table of links to the module info
rfd.write("<hr/><h3>Loaded Plugins:</h3>")
rfd.write("<table><tr>\n")
rr = 0
for i in range(len(plugNames)):
rfd.write('<td><a href="#%s">%s</a></td>\n' % (plugNames[i],
plugNames[i]))
rr = divmod(i, 4)[1]
if (rr == 3):
rfd.write('</tr>')
if not (rr == 3):
rfd.write('</tr>')
rfd.write('</table>\n')
rfd.write('<hr/><h3>Alerts:</h3>')
rfd.write('<ul>')
for alert in allAlerts:
rfd.write('<li>%s</li>' % alert)
rfd.write('</ul>')
# Call the report method for each plugin
for plugname, plug in self.loaded_plugins:
try:
html = plug.report()
except:
if self.raise_plugins:
raise
else:
rfd.write(html)
rfd.write("</body></html>")
rfd.flush()
self.archive.add_file(rfd.name, dest=os.path.join('sos_reports', 'sos.html'))
def postproc(self):
for plugname, plug in self.loaded_plugins:
try:
plug.postproc()
except:
if self.raise_plugins:
raise
def final_work(self):
# package up the results for the support organization
self.policy.package_results(self.archive.name())
self._finish_logging()
# compression could fail for a number of reasons
try:
final_filename = self.archive.finalize(self.opts.compression_type)
except:
if self.opts.debug:
raise
else:
return False
# automated submission will go here
if not self.opts.upload:
self.policy.display_results(final_filename)
else:
self.policy.upload_results(final_filename)
self.tempfile_util.clean()
return True
def verify_plugins(self):
if not self.loaded_plugins:
self.soslog.error(_("no valid plugins were enabled"))
return False
return True
def set_global_plugin_option(self, key, value):
self.global_plugin_options[key] = value;
def execute(self):
try:
self._setup_logging()
self.policy.set_commons(self.get_commons())
self.print_header()
self.load_plugins()
self._set_tunables()
self._check_for_unknown_plugins()
self._set_plugin_options()
if self.opts.list_plugins:
self.list_plugins()
return True
# verify that at least one plug-in is enabled
if not self.verify_plugins():
return False
self.batch()
self.prework()
self.setup()
self.collect()
if self.opts.report:
self.report()
self.html_report()
self.plain_report()
self.postproc()
self.version()
return self.final_work()
except (SystemExit, KeyboardInterrupt):
if self.archive:
self.archive.cleanup()
if self.tempfile_util:
self.tempfile_util.clean()
return False
def main(args):
"""The main entry point"""
sos = SoSReport(args)
sos.execute()
| gpl-2.0 | 3,668,356,251,538,745,300 | 34.00427 | 121 | 0.542645 | false |
zbigniewwojna/text-rcnn | core/preprocessor.py | 1 | 77352 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocess images and bounding boxes for detection.
We perform two sets of operations in preprocessing stage:
(a) operations that are applied to both training and testing data,
(b) operations that are applied only to training data for the purpose of
data augmentation.
A preprocessing function receives a set of inputs,
e.g. an image and bounding boxes,
performs an operation on them, and returns them.
Some examples are: randomly cropping the image, randomly mirroring the image,
randomly changing the brightness, contrast, hue and
randomly jittering the bounding boxes.
The preprocess function receives a tensor_dict which is a dictionary that maps
different field names to their tensors. For example,
tensor_dict[fields.InputDataFields.image] holds the image tensor.
The image is a rank 4 tensor: [1, height, width, channels] with
dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where
in each row there is a box with [ymin xmin ymax xmax].
Boxes are in normalized coordinates meaning
their coordinate values range in [0, 1]
Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing
functions receive a rank 3 tensor for processing the image. Thus, inside the
preprocess function we squeeze the image to become a rank 3 tensor and then
we pass it to the functions. At the end of the preprocess we expand the image
back to rank 4.
"""
import sys
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
def _apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
rand_sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([func(
control_flow_ops.switch(x, tf.equal(rand_sel, case))[1], case)
for case in range(num_cases)])[0]
def _apply_with_random_selector_tuples(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: A tuple of input tensors.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
num_inputs = len(x)
rand_sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
tuples = [list() for t in x]
for case in range(num_cases):
new_x = [control_flow_ops.switch(t, tf.equal(rand_sel, case))[1] for t in x]
output = func(tuple(new_x), case)
for j in range(num_inputs):
tuples[j].append(output[j])
for i in range(num_inputs):
tuples[i] = control_flow_ops.merge(tuples[i])[0]
return tuple(tuples)
def _random_integer(minval, maxval, seed):
"""Returns a random 0-D tensor between minval and maxval.
Args:
minval: minimum value of the random tensor.
maxval: maximum value of the random tensor.
seed: random seed.
Returns:
A random 0-D tensor between minval and maxval.
"""
return tf.random_uniform(
[], minval=minval, maxval=maxval, dtype=tf.int32, seed=seed)
def normalize_image(image, original_minval, original_maxval, target_minval,
target_maxval):
"""Normalizes pixel values in the image.
Moves the pixel values from the current [original_minval, original_maxval]
range to a the [target_minval, target_maxval] range.
Args:
image: rank 3 float32 tensor containing 1
image -> [height, width, channels].
original_minval: current image minimum value.
original_maxval: current image maximum value.
target_minval: target image minimum value.
target_maxval: target image maximum value.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('NormalizeImage', values=[image]):
original_minval = float(original_minval)
original_maxval = float(original_maxval)
target_minval = float(target_minval)
target_maxval = float(target_maxval)
image = tf.to_float(image)
image = tf.subtract(image, original_minval)
image = tf.multiply(image, (target_maxval - target_minval) /
(original_maxval - original_minval))
image = tf.add(image, target_minval)
return image
def flip_boxes(boxes):
"""Left-right flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
# Flip boxes.
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
return flipped_boxes
def retain_boxes_above_threshold(
boxes, labels, label_scores, masks=None, keypoints=None, threshold=0.0):
"""Retains boxes whose label score is above a given threshold.
If the label score for a box is missing (represented by NaN), the box is
retained. The boxes that don't pass the threshold will not appear in the
returned tensor.
Args:
boxes: float32 tensor of shape [num_instance, 4] representing boxes
location in normalized coordinates.
labels: rank 1 int32 tensor of shape [num_instance] containing the object
classes.
label_scores: float32 tensor of shape [num_instance] representing the
score for each box.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks are of
the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
threshold: scalar python float.
Returns:
retained_boxes: [num_retained_instance, 4]
retianed_labels: [num_retained_instance]
retained_label_scores: [num_retained_instance]
If masks, or keypoints are not None, the function also returns:
retained_masks: [num_retained_instance, height, width]
retained_keypoints: [num_retained_instance, num_keypoints, 2]
"""
with tf.name_scope('RetainBoxesAboveThreshold',
values=[boxes, labels, label_scores]):
indices = tf.where(
tf.logical_or(label_scores > threshold, tf.is_nan(label_scores)))
indices = tf.squeeze(indices, axis=1)
retained_boxes = tf.gather(boxes, indices)
retained_labels = tf.gather(labels, indices)
retained_label_scores = tf.gather(label_scores, indices)
result = [retained_boxes, retained_labels, retained_label_scores]
if masks is not None:
retained_masks = tf.gather(masks, indices)
result.append(retained_masks)
if keypoints is not None:
retained_keypoints = tf.gather(keypoints, indices)
result.append(retained_keypoints)
return result
def _flip_masks(masks):
"""Left-right flips masks.
Args:
masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
Returns:
flipped masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
return masks[:, :, ::-1]
def random_horizontal_flip(
image,
boxes=None,
masks=None,
keypoints=None,
keypoint_flip_permutation=None,
seed=None):
"""Randomly decides whether to mirror the image and detections or not.
The probability of flipping the image is 50%.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4]
containing the bounding boxes.
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_flip_permutation: rank 1 int32 tensor containing keypoint flip
permutation.
seed: random seed
Returns:
image: image which is the same shape as input image.
If boxes, masks, keypoints, and keypoint_flip_permutation is not None,
the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: if keypoints are provided but keypoint_flip_permutation is not.
"""
def _flip_image(image):
# flip image
image_flipped = tf.image.flip_left_right(image)
return image_flipped
if keypoints is not None and keypoint_flip_permutation is None:
raise ValueError(
'keypoints are provided but keypoints_flip_permutation is not provided')
with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]):
result = []
# random variable defining whether to do flip or not
do_a_flip_random = tf.random_uniform([], seed=seed)
# flip only if there are bounding boxes in image!
do_a_flip_random = tf.logical_and(
tf.greater(tf.size(boxes), 0), tf.greater(do_a_flip_random, 0.5))
# flip image
image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(
do_a_flip_random, lambda: flip_boxes(boxes), lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(
do_a_flip_random, lambda: _flip_masks(masks), lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None and keypoint_flip_permutation is not None:
permutation = keypoint_flip_permutation
keypoints = tf.cond(
do_a_flip_random,
lambda: keypoint_ops.flip_horizontal(keypoints, 0.5, permutation),
lambda: keypoints)
result.append(keypoints)
return tuple(result)
def random_pixel_value_scale(image, minval=0.9, maxval=1.1, seed=None):
"""Scales each value in the pixels of the image.
This function scales each pixel independent of the other ones.
For each value in image tensor, draws a random number between
minval and maxval and multiples the values with them.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
minval: lower ratio of scaling pixel values.
maxval: upper ratio of scaling pixel values.
seed: random seed.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomPixelValueScale', values=[image]):
color_coef = tf.random_uniform(
tf.shape(image),
minval=minval,
maxval=maxval,
dtype=tf.float32,
seed=seed)
image = tf.multiply(image, color_coef)
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def random_image_scale(image,
masks=None,
min_scale_ratio=0.5,
max_scale_ratio=2.0,
seed=None):
"""Scales the image size.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels].
masks: (optional) rank 3 float32 tensor containing masks with
size [height, width, num_masks]. The value is set to None if there are no
masks.
min_scale_ratio: minimum scaling ratio.
max_scale_ratio: maximum scaling ratio.
seed: random seed.
Returns:
image: image which is the same rank as input image.
masks: If masks is not none, resized masks which are the same rank as input
masks will be returned.
"""
with tf.name_scope('RandomImageScale', values=[image]):
result = []
image_shape = tf.shape(image)
image_height = image_shape[0]
image_width = image_shape[1]
size_coef = tf.random_uniform([],
minval=min_scale_ratio,
maxval=max_scale_ratio,
dtype=tf.float32, seed=seed)
image_newysize = tf.to_int32(
tf.multiply(tf.to_float(image_height), size_coef))
image_newxsize = tf.to_int32(
tf.multiply(tf.to_float(image_width), size_coef))
image = tf.image.resize_images(
image, [image_newysize, image_newxsize], align_corners=True)
result.append(image)
if masks:
masks = tf.image.resize_nearest_neighbor(
masks, [image_newysize, image_newxsize], align_corners=True)
result.append(masks)
return tuple(result)
def random_rgb_to_gray(image, probability=0.1, seed=None):
"""Changes the image from RGB to Grayscale with the given probability.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
probability: the probability of returning a grayscale image.
The probability should be a number between [0, 1].
seed: random seed.
Returns:
image: image which is the same shape as input image.
"""
def _image_to_gray(image):
image_gray1 = tf.image.rgb_to_grayscale(image)
image_gray3 = tf.image.grayscale_to_rgb(image_gray1)
return image_gray3
with tf.name_scope('RandomRGBtoGray', values=[image]):
# random variable defining whether to do flip or not
do_gray_random = tf.random_uniform([], seed=seed)
image = tf.cond(
tf.greater(do_gray_random, probability), lambda: image,
lambda: _image_to_gray(image))
return image
def random_adjust_brightness(image, max_delta=0.2):
"""Randomly adjusts brightness.
Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
max_delta: how much to change the brightness. A value between [0, 1).
Returns:
image: image which is the same shape as input image.
boxes: boxes which is the same shape as input boxes.
"""
with tf.name_scope('RandomAdjustBrightness', values=[image]):
image = tf.image.random_brightness(image, max_delta)
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
return image
def random_adjust_contrast(image, min_delta=0.8, max_delta=1.25):
"""Randomly adjusts contrast.
Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
min_delta: see max_delta.
max_delta: how much to change the contrast. Contrast will change with a
value between min_delta and max_delta. This value will be
multiplied to the current contrast of the image.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustContrast', values=[image]):
image = tf.image.random_contrast(image, min_delta, max_delta)
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
return image
def random_adjust_hue(image, max_delta=0.02):
"""Randomly adjusts hue.
Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
max_delta: change hue randomly with a value between 0 and max_delta.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustHue', values=[image]):
image = tf.image.random_hue(image, max_delta)
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
return image
def random_adjust_saturation(image, min_delta=0.8, max_delta=1.25):
"""Randomly adjusts saturation.
Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
min_delta: see max_delta.
max_delta: how much to change the saturation. Saturation will change with a
value between min_delta and max_delta. This value will be
multiplied to the current saturation of the image.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustSaturation', values=[image]):
image = tf.image.random_saturation(image, min_delta, max_delta)
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
return image
def random_distort_color(image, color_ordering=0):
"""Randomly distorts color.
Randomly distorts color using a combination of brightness, hue, contrast
and saturation changes. Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0, 1).
Returns:
image: image which is the same shape as input image.
Raises:
ValueError: if color_ordering is not in {0, 1}.
"""
with tf.name_scope('RandomDistortColor', values=[image]):
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
else:
raise ValueError('color_ordering must be in {0, 1}')
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def random_jitter_boxes(boxes, ratio=0.05, seed=None):
"""Randomly jitter boxes in image.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
ratio: The ratio of the box width and height that the corners can jitter.
For example if the width is 100 pixels and ratio is 0.05,
the corners can jitter up to 5 pixels in the x direction.
seed: random seed.
Returns:
boxes: boxes which is the same shape as input boxes.
"""
def random_jitter_box(box, ratio, seed):
"""Randomly jitter box.
Args:
box: bounding box [1, 1, 4].
ratio: max ratio between jittered box and original box,
a number between [0, 0.5].
seed: random seed.
Returns:
jittered_box: jittered box.
"""
rand_numbers = tf.random_uniform(
[1, 1, 4], minval=-ratio, maxval=ratio, dtype=tf.float32, seed=seed)
box_width = tf.subtract(box[0, 0, 3], box[0, 0, 1])
box_height = tf.subtract(box[0, 0, 2], box[0, 0, 0])
hw_coefs = tf.stack([box_height, box_width, box_height, box_width])
hw_rand_coefs = tf.multiply(hw_coefs, rand_numbers)
jittered_box = tf.add(box, hw_rand_coefs)
jittered_box = tf.clip_by_value(jittered_box, 0.0, 1.0)
return jittered_box
with tf.name_scope('RandomJitterBoxes', values=[boxes]):
# boxes are [N, 4]. Lets first make them [N, 1, 1, 4]
boxes_shape = tf.shape(boxes)
boxes = tf.expand_dims(boxes, 1)
boxes = tf.expand_dims(boxes, 2)
distorted_boxes = tf.map_fn(
lambda x: random_jitter_box(x, ratio, seed), boxes, dtype=tf.float32)
distorted_boxes = tf.reshape(distorted_boxes, boxes_shape)
return distorted_boxes
def _strict_random_crop_image(image,
boxes,
labels,
masks=None,
keypoints=None,
transcriptions=None,
min_object_covered=1.0,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.1, 1.0),
overlap_thresh=0.3):
"""Performs random crop.
Note: boxes will be clipped to the crop. Keypoint coordinates that are
outside the crop will be set to NaN, which is consistent with the original
keypoint encoding for non-existing keypoints. This function always crops
the image and is supposed to be used by `random_crop_image` function which
sometimes returns image unchanged.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes with shape
[num_instances, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If masks, or keypoints is not None, the function also returns:
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
"""
with tf.name_scope('RandomCropImage', values=[image, boxes]):
image_shape = tf.shape(image)
# boxes are [N, 4]. Lets first make them [N, 1, 4].
boxes_expanded = tf.expand_dims(
tf.clip_by_value(
boxes, clip_value_min=0.0, clip_value_max=1.0), 1)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
image_shape,
bounding_boxes=boxes_expanded,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=100,
use_image_if_no_bounding_boxes=True)
im_box_begin, im_box_size, im_box = sample_distorted_bounding_box
new_image = tf.slice(image, im_box_begin, im_box_size)
new_image.set_shape([None, None, image.get_shape()[2]])
# [1, 4]
im_box_rank2 = tf.squeeze(im_box, squeeze_dims=[0])
# [4]
im_box_rank1 = tf.squeeze(im_box)
boxlist = box_list.BoxList(boxes)
boxlist.add_field('labels', labels)
im_boxlist = box_list.BoxList(im_box_rank2)
# remove boxes that are outside cropped image
boxlist, inside_window_ids = box_list_ops.prune_completely_outside_window(
boxlist, im_box_rank1)
# remove boxes that are outside image
overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes(
boxlist, im_boxlist, overlap_thresh)
# change the coordinate of the remaining boxes
new_labels = overlapping_boxlist.get_field('labels')
new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist,
im_box_rank1)
new_boxes = new_boxlist.get()
new_boxes = tf.clip_by_value(
new_boxes, clip_value_min=0.0, clip_value_max=1.0)
result = [new_image, new_boxes, new_labels]
if masks is not None:
masks_of_boxes_inside_window = tf.gather(masks, inside_window_ids)
masks_of_boxes_completely_inside_window = tf.gather(
masks_of_boxes_inside_window, keep_ids)
masks_box_begin = [0, im_box_begin[0], im_box_begin[1]]
masks_box_size = [-1, im_box_size[0], im_box_size[1]]
new_masks = tf.slice(
masks_of_boxes_completely_inside_window,
masks_box_begin, masks_box_size)
result.append(new_masks)
if keypoints is not None:
keypoints_of_boxes_inside_window = tf.gather(keypoints, inside_window_ids)
keypoints_of_boxes_completely_inside_window = tf.gather(
keypoints_of_boxes_inside_window, keep_ids)
new_keypoints = keypoint_ops.change_coordinate_frame(
keypoints_of_boxes_completely_inside_window, im_box_rank1)
new_keypoints = keypoint_ops.prune_outside_window(new_keypoints,
[0.0, 0.0, 1.0, 1.0])
result.append(new_keypoints)
if transcriptions is not None:
transcriptions_inside_window = tf.gather(transcriptions, inside_window_ids)
transcriptions_completely_inside_window = tf.gather(
transcriptions_inside_window, keep_ids)
result.append(transcriptions_completely_inside_window)
return tuple(result)
def random_crop_image(image,
boxes,
labels,
masks=None,
keypoints=None,
transcriptions = None,
min_object_covered=1.0,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.1, 1.0),
overlap_thresh=0.3,
random_coef=0.0,
seed=None):
"""Randomly crops the image.
Given the input image and its bounding boxes, this op randomly
crops a subimage. Given a user-provided set of input constraints,
the crop window is resampled until it satisfies these constraints.
If within 100 trials it is unable to find a valid crop, the original
image is returned. See the Args section for a description of the input
constraints. Both input boxes and returned Boxes are in normalized
form (e.g., lie in the unit square [0, 1]).
This function will return the original image with probability random_coef.
Note: boxes will be clipped to the crop. Keypoint coordinates that are
outside the crop will be set to NaN, which is consistent with the original
keypoint encoding for non-existing keypoints.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes with shape
[num_instances, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
seed: random seed.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
labels: new labels.
If masks, or keypoints are not None, the function also returns:
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
"""
def strict_random_crop_image_fn():
return _strict_random_crop_image(
image,
boxes,
labels,
masks=masks,
keypoints=keypoints,
transcriptions=transcriptions,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
overlap_thresh=overlap_thresh)
# avoids tf.cond to make faster RCNN training on borg. See b/140057645.
if random_coef < sys.float_info.min:
result = strict_random_crop_image_fn()
else:
do_a_crop_random = tf.random_uniform([], seed=seed)
do_a_crop_random = tf.greater(do_a_crop_random, random_coef)
outputs = [image, boxes, labels]
if masks is not None:
outputs.append(masks)
if keypoints is not None:
outputs.append(keypoints)
if transcriptions is not None:
outputs.append(transcriptions)
result = tf.cond(do_a_crop_random,
strict_random_crop_image_fn,
lambda: tuple(outputs))
return result
def random_pad_image(image,
boxes,
min_image_size=None,
max_image_size=None,
pad_color=None,
seed=None):
"""Randomly pads the image.
This function randomly pads the image with zeros. The final size of the
padded image will be between min_image_size and max_image_size.
if min_image_size is smaller than the input image size, min_image_size will
be set to the input image size. The same for max_image_size. The input image
will be located at a uniformly random location inside the padded image.
The relative location of the boxes to the original image will remain the same.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
min_image_size: a tensor of size [min_height, min_width], type tf.int32.
If passed as None, will be set to image size
[height, width].
max_image_size: a tensor of size [max_height, max_width], type tf.int32.
If passed as None, will be set to twice the
image [height * 2, width * 2].
pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.
if set as None, it will be set to average color of the input
image.
seed: random seed.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
"""
if pad_color is None:
pad_color = tf.reduce_mean(image, reduction_indices=[0, 1])
image_shape = tf.shape(image)
image_height = image_shape[0]
image_width = image_shape[1]
if max_image_size is None:
max_image_size = tf.stack([image_height * 2, image_width * 2])
max_image_size = tf.maximum(max_image_size,
tf.stack([image_height, image_width]))
if min_image_size is None:
min_image_size = tf.stack([image_height, image_width])
min_image_size = tf.maximum(min_image_size,
tf.stack([image_height, image_width]))
target_height = tf.cond(
max_image_size[0] > min_image_size[0],
lambda: _random_integer(min_image_size[0], max_image_size[0], seed),
lambda: max_image_size[0])
target_width = tf.cond(
max_image_size[1] > min_image_size[1],
lambda: _random_integer(min_image_size[1], max_image_size[1], seed),
lambda: max_image_size[1])
offset_height = tf.cond(
target_height > image_height,
lambda: _random_integer(0, target_height - image_height, seed),
lambda: tf.constant(0, dtype=tf.int32))
offset_width = tf.cond(
target_width > image_width,
lambda: _random_integer(0, target_width - image_width, seed),
lambda: tf.constant(0, dtype=tf.int32))
new_image = tf.image.pad_to_bounding_box(
image, offset_height=offset_height, offset_width=offset_width,
target_height=target_height, target_width=target_width)
# Setting color of the padded pixels
image_ones = tf.ones_like(image)
image_ones_padded = tf.image.pad_to_bounding_box(
image_ones, offset_height=offset_height, offset_width=offset_width,
target_height=target_height, target_width=target_width)
image_color_paded = (1.0 - image_ones_padded) * pad_color
new_image += image_color_paded
# setting boxes
new_window = tf.to_float(
tf.stack([
-offset_height, -offset_width, target_height - offset_height,
target_width - offset_width
]))
new_window /= tf.to_float(
tf.stack([image_height, image_width, image_height, image_width]))
boxlist = box_list.BoxList(boxes)
new_boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window)
new_boxes = new_boxlist.get()
return new_image, new_boxes
def random_crop_pad_image(image,
boxes,
labels,
min_object_covered=1.0,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.1, 1.0),
overlap_thresh=0.3,
random_coef=0.0,
min_padded_size_ratio=None,
max_padded_size_ratio=None,
pad_color=None,
seed=None):
"""Randomly crops and pads the image.
Given an input image and its bounding boxes, this op first randomly crops
the image and then randomly pads the image with background values. Parameters
min_padded_size_ratio and max_padded_size_ratio, determine the range of the
final output image size. Specifically, the final image size will have a size
in the range of min_padded_size_ratio * tf.shape(image) and
max_padded_size_ratio * tf.shape(image). Note that these ratios are with
respect to the size of the original image, so we can't capture the same
effect easily by independently applying RandomCropImage
followed by RandomPadImage.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
min_padded_size_ratio: min ratio of padded image height and width to the
input image's height and width. If None, it will
be set to [0.0, 0.0].
max_padded_size_ratio: max ratio of padded image height and width to the
input image's height and width. If None, it will
be set to [2.0, 2.0].
pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.
if set as None, it will be set to average color of the randomly
cropped image.
seed: random seed.
Returns:
padded_image: padded image.
padded_boxes: boxes which is the same rank as input boxes. Boxes are in
normalized form.
cropped_labels: cropped labels.
"""
image_size = tf.shape(image)
image_height = image_size[0]
image_width = image_size[1]
if min_padded_size_ratio is None:
min_padded_size_ratio = tf.constant([0.0, 0.0], tf.float32)
if max_padded_size_ratio is None:
max_padded_size_ratio = tf.constant([2.0, 2.0], tf.float32)
cropped_image, cropped_boxes, cropped_labels = random_crop_image(
image=image,
boxes=boxes,
labels=labels,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
overlap_thresh=overlap_thresh,
random_coef=random_coef,
seed=seed)
min_image_size = tf.to_int32(
tf.to_float(tf.stack([image_height, image_width])) *
min_padded_size_ratio)
max_image_size = tf.to_int32(
tf.to_float(tf.stack([image_height, image_width])) *
max_padded_size_ratio)
padded_image, padded_boxes = random_pad_image(
cropped_image,
cropped_boxes,
min_image_size=min_image_size,
max_image_size=max_image_size,
pad_color=pad_color,
seed=seed)
return padded_image, padded_boxes, cropped_labels
def random_crop_to_aspect_ratio(image,
boxes,
labels,
masks=None,
keypoints=None,
aspect_ratio=1.0,
overlap_thresh=0.3,
seed=None):
"""Randomly crops an image to the specified aspect ratio.
Randomly crops the a portion of the image such that the crop is of the
specified aspect ratio, and the crop is as large as possible. If the specified
aspect ratio is larger than the aspect ratio of the image, this op will
randomly remove rows from the top and bottom of the image. If the specified
aspect ratio is less than the aspect ratio of the image, this op will randomly
remove cols from the left and right of the image. If the specified aspect
ratio is the same as the aspect ratio of the image, this op will return the
image.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
aspect_ratio: the aspect ratio of cropped image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
seed: random seed.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If masks, or keypoints is not None, the function also returns:
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: If image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('RandomCropToAspectRatio', values=[image]):
image_shape = tf.shape(image)
orig_height = image_shape[0]
orig_width = image_shape[1]
orig_aspect_ratio = tf.to_float(orig_width) / tf.to_float(orig_height)
new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32)
def target_height_fn():
return tf.to_int32(
tf.round(
tf.to_float(orig_height) * orig_aspect_ratio / new_aspect_ratio))
target_height = tf.cond(
orig_aspect_ratio >= new_aspect_ratio,
lambda: orig_height,
target_height_fn)
def target_width_fn():
return tf.to_int32(
tf.round(
tf.to_float(orig_width) * new_aspect_ratio / orig_aspect_ratio))
target_width = tf.cond(
orig_aspect_ratio <= new_aspect_ratio,
lambda: orig_width,
target_width_fn)
# either offset_height = 0 and offset_width is randomly chosen from
# [0, offset_width - target_width), or else offset_width = 0 and
# offset_height is randomly chosen from [0, offset_height - target_height)
offset_height = _random_integer(0, orig_height - target_height + 1, seed)
offset_width = _random_integer(0, orig_width - target_width + 1, seed)
new_image = tf.image.crop_to_bounding_box(
image, offset_height, offset_width, target_height, target_width)
im_box = tf.stack([
tf.to_float(offset_height) / tf.to_float(orig_height),
tf.to_float(offset_width) / tf.to_float(orig_width),
tf.to_float(offset_height + target_height) / tf.to_float(orig_height),
tf.to_float(offset_width + target_width) / tf.to_float(orig_width)
])
boxlist = box_list.BoxList(boxes)
boxlist.add_field('labels', labels)
im_boxlist = box_list.BoxList(tf.expand_dims(im_box, 0))
# remove boxes whose overlap with the image is less than overlap_thresh
overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes(
boxlist, im_boxlist, overlap_thresh)
# change the coordinate of the remaining boxes
new_labels = overlapping_boxlist.get_field('labels')
new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist,
im_box)
new_boxlist = box_list_ops.clip_to_window(new_boxlist,
tf.constant(
[0.0, 0.0, 1.0, 1.0],
tf.float32))
new_boxes = new_boxlist.get()
result = [new_image, new_boxes, new_labels]
if masks is not None:
masks_inside_window = tf.gather(masks, keep_ids)
masks_box_begin = tf.stack([0, offset_height, offset_width])
masks_box_size = tf.stack([-1, target_height, target_width])
new_masks = tf.slice(masks_inside_window, masks_box_begin, masks_box_size)
result.append(new_masks)
if keypoints is not None:
keypoints_inside_window = tf.gather(keypoints, keep_ids)
new_keypoints = keypoint_ops.change_coordinate_frame(
keypoints_inside_window, im_box)
new_keypoints = keypoint_ops.prune_outside_window(new_keypoints,
[0.0, 0.0, 1.0, 1.0])
result.append(new_keypoints)
return tuple(result)
def random_black_patches(image,
max_black_patches=10,
probability=0.5,
size_to_image_ratio=0.1,
random_seed=None):
"""Randomly adds some black patches to the image.
This op adds up to max_black_patches square black patches of a fixed size
to the image where size is specified via the size_to_image_ratio parameter.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
max_black_patches: number of times that the function tries to add a
black box to the image.
probability: at each try, what is the chance of adding a box.
size_to_image_ratio: Determines the ratio of the size of the black patches
to the size of the image.
box_size = size_to_image_ratio *
min(image_width, image_height)
random_seed: random seed.
Returns:
image
"""
def add_black_patch_to_image(image):
"""Function for adding one patch to the image.
Args:
image: image
Returns:
image with a randomly added black box
"""
image_shape = tf.shape(image)
image_height = image_shape[0]
image_width = image_shape[1]
box_size = tf.to_int32(
tf.multiply(
tf.minimum(tf.to_float(image_height), tf.to_float(image_width)),
size_to_image_ratio))
normalized_y_min = tf.random_uniform(
[], minval=0.0, maxval=(1.0 - size_to_image_ratio), seed=random_seed)
normalized_x_min = tf.random_uniform(
[], minval=0.0, maxval=(1.0 - size_to_image_ratio), seed=random_seed)
y_min = tf.to_int32(normalized_y_min * tf.to_float(image_height))
x_min = tf.to_int32(normalized_x_min * tf.to_float(image_width))
black_box = tf.ones([box_size, box_size, 3], dtype=tf.float32)
mask = 1.0 - tf.image.pad_to_bounding_box(black_box, y_min, x_min,
image_height, image_width)
image = tf.multiply(image, mask)
return image
with tf.name_scope('RandomBlackPatchInImage', values=[image]):
for _ in range(max_black_patches):
random_prob = tf.random_uniform([], minval=0.0, maxval=1.0,
dtype=tf.float32, seed=random_seed)
image = tf.cond(
tf.greater(random_prob, probability), lambda: image,
lambda: add_black_patch_to_image(image))
return image
def image_to_float(image):
"""Used in Faster R-CNN. Casts image pixel values to float.
Args:
image: input image which might be in tf.uint8 or sth else format
Returns:
image: image in tf.float32 format.
"""
with tf.name_scope('ImageToFloat', values=[image]):
image = tf.to_float(image)
return image
def random_resize_method(image, target_size):
"""Uses a random resize method to resize the image to target size.
Args:
image: a rank 3 tensor.
target_size: a list of [target_height, target_width]
Returns:
resized image.
"""
resized_image = _apply_with_random_selector(
image,
lambda x, method: tf.image.resize_images(x, target_size, method),
num_cases=4)
return resized_image
def resize_to_range(image,
masks=None,
min_dimension=None,
max_dimension=None,
align_corners=False):
"""Resizes an image so its dimensions are within the provided value.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum dimension is equal to the
provided value without the other dimension exceeding max_dimension,
then do so.
2. Otherwise, resize so the largest dimension is equal to max_dimension.
Args:
image: A 3D tensor of shape [height, width, channels]
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks.
min_dimension: (optional) (scalar) desired size of the smaller image
dimension.
max_dimension: (optional) (scalar) maximum allowed size
of the larger image dimension.
align_corners: bool. If true, exactly align all 4 corners of the input
and output. Defaults to False.
Returns:
A 3D tensor of shape [new_height, new_width, channels],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
If masks is not None, also outputs masks:
A 3D tensor of shape [num_instances, new_height, new_width]
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('ResizeToRange', values=[image, min_dimension]):
image_shape = tf.shape(image)
orig_height = tf.to_float(image_shape[0])
orig_width = tf.to_float(image_shape[1])
orig_min_dim = tf.minimum(orig_height, orig_width)
# Calculates the larger of the possible sizes
min_dimension = tf.constant(min_dimension, dtype=tf.float32)
large_scale_factor = min_dimension / orig_min_dim
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = tf.to_int32(tf.round(orig_height * large_scale_factor))
large_width = tf.to_int32(tf.round(orig_width * large_scale_factor))
large_size = tf.stack([large_height, large_width])
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = tf.maximum(orig_height, orig_width)
max_dimension = tf.constant(max_dimension, dtype=tf.float32)
small_scale_factor = max_dimension / orig_max_dim
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = tf.to_int32(tf.round(orig_height * small_scale_factor))
small_width = tf.to_int32(tf.round(orig_width * small_scale_factor))
small_size = tf.stack([small_height, small_width])
new_size = tf.cond(
tf.to_float(tf.reduce_max(large_size)) > max_dimension,
lambda: small_size, lambda: large_size)
else:
new_size = large_size
new_image = tf.image.resize_images(image, new_size,
align_corners=align_corners)
result = new_image
if masks is not None:
num_instances = tf.shape(masks)[0]
def resize_masks_branch():
new_masks = tf.expand_dims(masks, 3)
new_masks = tf.image.resize_nearest_neighbor(
new_masks, new_size, align_corners=align_corners)
new_masks = tf.squeeze(new_masks, axis=3)
return new_masks
def reshape_masks_branch():
new_masks = tf.reshape(masks, [0, new_size[0], new_size[1]])
return new_masks
masks = tf.cond(num_instances > 0,
resize_masks_branch,
reshape_masks_branch)
result = [new_image, masks]
return result
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):
"""Scales boxes from normalized to pixel coordinates.
Args:
image: A 3D float32 tensor of shape [height, width, channels].
boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding
boxes in normalized coordinates. Each row is of the form
[ymin, xmin, ymax, xmax].
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
Returns:
image: unchanged input image.
scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the
bounding boxes in pixel coordinates.
scaled_keypoints: a 3D float32 tensor with shape
[num_instances, num_keypoints, 2] containing the keypoints in pixel
coordinates.
"""
boxlist = box_list.BoxList(boxes)
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get()
result = [image, scaled_boxes]
if keypoints is not None:
scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width)
result.append(scaled_keypoints)
return tuple(result)
# pylint: disable=g-doc-return-or-yield
def resize_image(image,
masks=None,
new_height=600,
new_width=1024,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False):
"""See `tf.image.resize_images` for detailed doc."""
with tf.name_scope(
'ResizeImage',
values=[image, new_height, new_width, method, align_corners]):
new_image = tf.image.resize_images(image, [new_height, new_width],
method=method,
align_corners=align_corners)
result = new_image
if masks is not None:
num_instances = tf.shape(masks)[0]
new_size = tf.constant([new_height, new_width], dtype=tf.int32)
def resize_masks_branch():
new_masks = tf.expand_dims(masks, 3)
new_masks = tf.image.resize_nearest_neighbor(
new_masks, new_size, align_corners=align_corners)
new_masks = tf.squeeze(new_masks, axis=3)
return new_masks
def reshape_masks_branch():
new_masks = tf.reshape(masks, [0, new_size[0], new_size[1]])
return new_masks
masks = tf.cond(num_instances > 0,
resize_masks_branch,
reshape_masks_branch)
result = [new_image, masks]
return result
def subtract_channel_mean(image, means=None):
"""Normalizes an image by subtracting a mean from each channel.
Args:
image: A 3D tensor of shape [height, width, channels]
means: float list containing a mean for each channel
Returns:
normalized_images: a tensor of shape [height, width, channels]
Raises:
ValueError: if images is not a 4D tensor or if the number of means is not
equal to the number of channels.
"""
with tf.name_scope('SubtractChannelMean', values=[image, means]):
if len(image.get_shape()) != 3:
raise ValueError('Input must be of size [height, width, channels]')
if len(means) != image.get_shape()[-1]:
raise ValueError('len(means) must match the number of channels')
return image - [[means]]
def one_hot_encoding(labels, num_classes=None):
"""One-hot encodes the multiclass labels.
Example usage:
labels = tf.constant([1, 4], dtype=tf.int32)
one_hot = OneHotEncoding(labels, num_classes=5)
one_hot.eval() # evaluates to [0, 1, 0, 0, 1]
Args:
labels: A tensor of shape [None] corresponding to the labels.
num_classes: Number of classes in the dataset.
Returns:
onehot_labels: a tensor of shape [num_classes] corresponding to the one hot
encoding of the labels.
Raises:
ValueError: if num_classes is not specified.
"""
with tf.name_scope('OneHotEncoding', values=[labels]):
if num_classes is None:
raise ValueError('num_classes must be specified')
labels = tf.one_hot(labels, num_classes, 1, 0)
return tf.reduce_max(labels, 0)
def rgb_to_gray(image):
"""Converts a 3 channel RGB image to a 1 channel grayscale image.
Args:
image: Rank 3 float32 tensor containing 1 image -> [height, width, 3]
with pixel values varying between [0, 1].
Returns:
image: A single channel grayscale image -> [image, height, 1].
"""
return tf.image.rgb_to_grayscale(image)
def ssd_random_crop(image,
boxes,
labels,
masks=None,
keypoints=None,
min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
aspect_ratio_range=((0.5, 2.0),) * 7,
area_range=((0.1, 1.0),) * 7,
overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
random_coef=(0.15,) * 7,
seed=None):
"""Random crop preprocessing with default parameters as in SSD paper.
Liu et al., SSD: Single shot multibox detector.
For further information on random crop preprocessing refer to RandomCrop
function above.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
seed: random seed.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If masks, or keypoints is not None, the function also returns:
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
"""
def random_crop_selector(selected_result, index):
"""Applies random_crop_image to selected result.
Args:
selected_result: A tuple containing image, boxes, labels, keypoints (if
not None), and masks (if not None).
index: The index that was randomly selected.
Returns: A tuple containing image, boxes, labels, keypoints (if not None),
and masks (if not None).
"""
i = 3
image, boxes, labels = selected_result[:i]
selected_masks = None
selected_keypoints = None
if masks is not None:
selected_masks = selected_result[i]
i += 1
if keypoints is not None:
selected_keypoints = selected_result[i]
return random_crop_image(
image=image,
boxes=boxes,
labels=labels,
masks=selected_masks,
keypoints=selected_keypoints,
min_object_covered=min_object_covered[index],
aspect_ratio_range=aspect_ratio_range[index],
area_range=area_range[index],
overlap_thresh=overlap_thresh[index],
random_coef=random_coef[index],
seed=seed)
result = _apply_with_random_selector_tuples(
tuple(
t for t in (image, boxes, labels, masks, keypoints) if t is not None),
random_crop_selector,
num_cases=len(min_object_covered))
return result
def ssd_random_crop_pad(image,
boxes,
labels,
min_object_covered=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
aspect_ratio_range=((0.5, 2.0),) * 6,
area_range=((0.1, 1.0),) * 6,
overlap_thresh=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
random_coef=(0.15,) * 6,
min_padded_size_ratio=(None,) * 6,
max_padded_size_ratio=(None,) * 6,
pad_color=(None,) * 6,
seed=None):
"""Random crop preprocessing with default parameters as in SSD paper.
Liu et al., SSD: Single shot multibox detector.
For further information on random crop preprocessing refer to RandomCrop
function above.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
min_padded_size_ratio: min ratio of padded image height and width to the
input image's height and width. If None, it will
be set to [0.0, 0.0].
max_padded_size_ratio: max ratio of padded image height and width to the
input image's height and width. If None, it will
be set to [2.0, 2.0].
pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.
if set as None, it will be set to average color of the randomly
cropped image.
seed: random seed.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
new_labels: new labels.
"""
def random_crop_pad_selector(image_boxes_labels, index):
image, boxes, labels = image_boxes_labels
return random_crop_pad_image(
image,
boxes,
labels,
min_object_covered=min_object_covered[index],
aspect_ratio_range=aspect_ratio_range[index],
area_range=area_range[index],
overlap_thresh=overlap_thresh[index],
random_coef=random_coef[index],
min_padded_size_ratio=min_padded_size_ratio[index],
max_padded_size_ratio=max_padded_size_ratio[index],
pad_color=pad_color[index],
seed=seed)
new_image, new_boxes, new_labels = _apply_with_random_selector_tuples(
(image, boxes, labels),
random_crop_pad_selector,
num_cases=len(min_object_covered))
return new_image, new_boxes, new_labels
def ssd_random_crop_fixed_aspect_ratio(
image,
boxes,
labels,
masks=None,
keypoints=None,
min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
aspect_ratio=1.0,
area_range=((0.1, 1.0),) * 7,
overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
random_coef=(0.15,) * 7,
seed=None):
"""Random crop preprocessing with default parameters as in SSD paper.
Liu et al., SSD: Single shot multibox detector.
For further information on random crop preprocessing refer to RandomCrop
function above.
The only difference is that the aspect ratio of the crops are fixed.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio: aspect ratio of the cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
seed: random seed.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If masks, or keypoints is not None, the function also returns:
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
"""
aspect_ratio_range = ((aspect_ratio, aspect_ratio),) * len(area_range)
crop_result = ssd_random_crop(image, boxes, labels, masks, keypoints,
min_object_covered, aspect_ratio_range,
area_range, overlap_thresh, random_coef, seed)
i = 3
new_image, new_boxes, new_labels = crop_result[:i]
new_masks = None
new_keypoints = None
if masks is not None:
new_masks = crop_result[i]
i += 1
if keypoints is not None:
new_keypoints = crop_result[i]
result = random_crop_to_aspect_ratio(
new_image,
new_boxes,
new_labels,
new_masks,
new_keypoints,
aspect_ratio=aspect_ratio,
seed=seed)
return result
def get_default_func_arg_map(include_instance_masks=False,
include_keypoints=False,
include_transcriptions=False):
"""Returns the default mapping from a preprocessor function to its args.
Args:
include_instance_masks: If True, preprocessing functions will modify the
instance masks, too.
include_keypoints: If True, preprocessing functions will modify the
keypoints, too.
Returns:
A map from preprocessing functions to the arguments they receive.
"""
groundtruth_instance_masks = None
if include_instance_masks:
groundtruth_instance_masks = (
fields.InputDataFields.groundtruth_instance_masks)
groundtruth_keypoints = None
if include_keypoints:
groundtruth_keypoints = fields.InputDataFields.groundtruth_keypoints
prep_func_arg_map = {
normalize_image: (fields.InputDataFields.image,),
random_horizontal_flip: (fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
groundtruth_instance_masks,
groundtruth_keypoints,),
random_pixel_value_scale: (fields.InputDataFields.image,),
random_image_scale: (fields.InputDataFields.image,
groundtruth_instance_masks,),
random_rgb_to_gray: (fields.InputDataFields.image,),
random_adjust_brightness: (fields.InputDataFields.image,),
random_adjust_contrast: (fields.InputDataFields.image,),
random_adjust_hue: (fields.InputDataFields.image,),
random_adjust_saturation: (fields.InputDataFields.image,),
random_distort_color: (fields.InputDataFields.image,),
random_jitter_boxes: (fields.InputDataFields.groundtruth_boxes,),
random_crop_image: (fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_instance_masks,
groundtruth_keypoints,
fields.InputDataFields.groundtruth_texts),
random_pad_image: (fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes),
random_crop_pad_image: (fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes),
random_crop_to_aspect_ratio: (fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_instance_masks,
groundtruth_keypoints,),
random_black_patches: (fields.InputDataFields.image,),
retain_boxes_above_threshold: (
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_label_scores,
groundtruth_instance_masks,
groundtruth_keypoints,),
image_to_float: (fields.InputDataFields.image,),
random_resize_method: (fields.InputDataFields.image,),
resize_to_range: (fields.InputDataFields.image,
groundtruth_instance_masks,),
scale_boxes_to_pixel_coordinates: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
groundtruth_keypoints,),
flip_boxes: (fields.InputDataFields.groundtruth_boxes,),
resize_image: (fields.InputDataFields.image,
groundtruth_instance_masks,),
subtract_channel_mean: (fields.InputDataFields.image,),
one_hot_encoding: (fields.InputDataFields.groundtruth_image_classes,),
rgb_to_gray: (fields.InputDataFields.image,),
ssd_random_crop: (fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_instance_masks,
groundtruth_keypoints,),
ssd_random_crop_pad: (fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes),
ssd_random_crop_fixed_aspect_ratio: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_instance_masks,
groundtruth_keypoints,),
}
return prep_func_arg_map
def preprocess(tensor_dict, preprocess_options, func_arg_map=None):
"""Preprocess images and bounding boxes.
Various types of preprocessing (to be implemented) based on the
preprocess_options dictionary e.g. "crop image" (affects image and possibly
boxes), "white balance image" (affects only image), etc. If self._options
is None, no preprocessing is done.
Args:
tensor_dict: dictionary that contains images, boxes, and can contain other
things as well.
images-> rank 4 float32 tensor contains
1 image -> [1, height, width, 3].
with pixel values varying between [0, 1]
boxes-> rank 2 float32 tensor containing
the bounding boxes -> [N, 4].
Boxes are in normalized form meaning
their coordinates vary between [0, 1].
Each row is in the form
of [ymin, xmin, ymax, xmax].
preprocess_options: It is a list of tuples, where each tuple contains a
function and a dictionary that contains arguments and
their values.
func_arg_map: mapping from preprocessing functions to arguments that they
expect to receive and return.
Returns:
tensor_dict: which contains the preprocessed images, bounding boxes, etc.
Raises:
ValueError: (a) If the functions passed to Preprocess
are not in func_arg_map.
(b) If the arguments that a function needs
do not exist in tensor_dict.
(c) If image in tensor_dict is not rank 4
"""
if func_arg_map is None:
func_arg_map = get_default_func_arg_map()
# changes the images to image (rank 4 to rank 3) since the functions
# receive rank 3 tensor for image
if fields.InputDataFields.image in tensor_dict:
images = tensor_dict[fields.InputDataFields.image]
if len(images.get_shape()) != 4:
raise ValueError('images in tensor_dict should be rank 4')
image = tf.squeeze(images, squeeze_dims=[0])
tensor_dict[fields.InputDataFields.image] = image
# Preprocess inputs based on preprocess_options
for option in preprocess_options:
func, params = option
if func not in func_arg_map:
raise ValueError('The function %s does not exist in func_arg_map' %
(func.__name__))
arg_names = func_arg_map[func]
for a in arg_names:
if a is not None and a not in tensor_dict:
raise ValueError('The function %s requires argument %s' %
(func.__name__, a))
def get_arg(key):
return tensor_dict[key] if key is not None else None
args = [get_arg(a) for a in arg_names]
results = func(*args, **params)
if not isinstance(results, (list, tuple)):
results = (results,)
# Removes None args since the return values will not contain those.
arg_names = [arg_name for arg_name in arg_names if arg_name is not None]
for res, arg_name in zip(results, arg_names):
tensor_dict[arg_name] = res
# changes the image to images (rank 3 to rank 4) to be compatible to what
# we received in the first place
if fields.InputDataFields.image in tensor_dict:
image = tensor_dict[fields.InputDataFields.image]
images = tf.expand_dims(image, 0)
tensor_dict[fields.InputDataFields.image] = images
return tensor_dict
| apache-2.0 | -9,116,560,995,851,932,000 | 38.995863 | 81 | 0.640526 | false |
li-ch/pykka | tests/namespace_test.py | 1 | 2073 | import unittest
class NamespaceTest(unittest.TestCase):
def test_actor_dead_error_import(self):
from pykka import ActorDeadError as ActorDeadError1
from pykka.exceptions import ActorDeadError as ActorDeadError2
self.assertEqual(ActorDeadError1, ActorDeadError2)
def test_timeout_import(self):
from pykka import Timeout as Timeout1
from pykka.exceptions import Timeout as Timeout2
self.assertEqual(Timeout1, Timeout2)
def test_actor_import(self):
from pykka import Actor as Actor1
from pykka.actor import Actor as Actor2
self.assertEqual(Actor1, Actor2)
def test_actor_ref_import(self):
from pykka import ActorRef as ActorRef1
from pykka.actor import ActorRef as ActorRef2
self.assertEqual(ActorRef1, ActorRef2)
def test_threading_actor_import(self):
from pykka import ThreadingActor as ThreadingActor1
from pykka.actor import ThreadingActor as ThreadingActor2
self.assertEqual(ThreadingActor1, ThreadingActor2)
def test_future_import(self):
from pykka import Future as Future1
from pykka.future import Future as Future2
self.assertEqual(Future1, Future2)
def test_get_all_import(self):
from pykka import get_all as get_all1
from pykka.future import get_all as get_all2
self.assertEqual(get_all1, get_all2)
def test_threading_future_import(self):
from pykka import ThreadingFuture as ThreadingFuture1
from pykka.future import ThreadingFuture as ThreadingFuture2
self.assertEqual(ThreadingFuture1, ThreadingFuture2)
def test_actor_proxy_import(self):
from pykka import ActorProxy as ActorProxy1
from pykka.proxy import ActorProxy as ActorProxy2
self.assertEqual(ActorProxy1, ActorProxy2)
def test_actor_registry_import(self):
from pykka import ActorRegistry as ActorRegistry1
from pykka.registry import ActorRegistry as ActorRegistry2
self.assertEqual(ActorRegistry1, ActorRegistry2)
| apache-2.0 | 2,045,727,020,564,520,700 | 38.113208 | 70 | 0.723589 | false |
Alidron/demo-nao | alidron-env/lib/python2.7/site-packages/netcall/threading/client.py | 1 | 9962 | # vim: fileencoding=utf-8 et ts=4 sts=4 sw=4 tw=0
"""
An RPC client class using ZeroMQ as a transport and
the standard Python threading API for concurrency.
Authors
-------
* Alexander Glyzov
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012-2014. Brian Granger, Min Ragan-Kelley, Alexander Glyzov
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
try:
from Queue import Queue
except ImportError: # For python 3
from queue import Queue
from random import randint
from weakref import WeakValueDictionary
import zmq
from ..base_client import RPCClientBase
from ..concurrency import get_tools
from ..utils import get_zmq_classes
#-----------------------------------------------------------------------------
# RPC Service Proxy
#-----------------------------------------------------------------------------
class ThreadingRPCClient(RPCClientBase):
""" An asynchronous RPC client whose requests will not block.
Uses the standard Python threading API for concurrency.
"""
CONCURRENCY = 128
def __init__(self, context=None, executor=None, **kwargs):
"""
Parameters
==========
context : optional ZMQ <Context>
executor : optional task <Executor>
serializer : optional <Serializer> that will be used to serialize
and deserialize args, kwargs and results
"""
Context, _ = get_zmq_classes() # auto detect green env
if context is None:
self.context = Context.instance()
else:
assert isinstance(context, Context), repr(context)
self.context = context
self._tools = get_tools(env=None) # force threading API
self._executor = executor or self._tools.Executor(limit=self.CONCURRENCY)
self._ext_exec = bool(executor)
super(ThreadingRPCClient, self).__init__(**kwargs)
Event = self._tools.Event
self._ready_ev = Event()
self._exit_ev = Event()
self._futures = {} # {<req_id> : <Future>}
self._gen_queues = WeakValueDictionary() # {<req_id> : <Queue>}
# request drainage
self._sync_ev = Event()
self.req_queue = Queue(maxsize=getattr(self._executor, '_limit', self.CONCURRENCY))
self.req_pub = self.context.socket(zmq.PUB)
self.req_addr = 'inproc://%s-%s' % (
self.__class__.__name__,
'%08x' % randint(0, 0xFFFFFFFF)
)
self.req_pub.bind(self.req_addr)
# maintaining threads
self.io_thread = self._executor.submit(self._io_thread)
self.req_thread = self._executor.submit(self._req_thread)
def bind(self, *args, **kwargs):
result = super(ThreadingRPCClient, self).bind(*args, **kwargs)
self._ready_ev.set() # wake up the io_thread
return result
def bind_ports(self, *args, **kwargs):
result = super(ThreadingRPCClient, self).bind_ports(*args, **kwargs)
self._ready_ev.set() # wake up the io_thread
return result
def connect(self, *args, **kwargs):
result = super(ThreadingRPCClient, self).connect(*args, **kwargs)
self._ready_ev.set() # wake up the io_reader
return result
def _send_request(self, request):
""" Send a multipart request to a service.
Here we send the request down the internal req_pub socket
so that an io_thread could send it back to the service.
Notice: request is a list produced by self._build_request()
"""
self.req_queue.put(request)
def _req_thread(self):
""" Forwards results from req_queue to the req_pub socket
so that an I/O thread could send them forth to a service
"""
logger = self.logger
rcv_request = self.req_queue.get
fwd_request = self.req_pub.send_multipart
try:
# synchronizing with the I/O thread
sync = self._sync_ev
while not sync.is_set():
fwd_request([b'SYNC'])
sync.wait(0.05)
logger.debug('REQ thread is synchronized')
while True:
request = rcv_request()
logger.debug('received %r', request)
if request is None:
logger.debug('req_thread received an EXIT signal')
fwd_request([b'']) # pass the EXIT signal to the io_thread
break # and exit
fwd_request(request)
except Exception as e:
logger.error(e, exc_info=True)
logger.debug('req_thread exited')
def _io_thread(self):
""" I/O thread
Waits for a ZMQ socket to become ready (._ready_ev), then processes incoming requests/replies
filling result futures thus passing control to waiting threads (see .call)
"""
logger = self.logger
ready_ev = self._ready_ev
futures = self._futures
g_queues = self._gen_queues
srv_sock = self.socket
req_sub = self.context.socket(zmq.SUB)
req_sub.connect(self.req_addr)
req_sub.setsockopt(zmq.SUBSCRIBE, '')
_, Poller = get_zmq_classes() # auto detect green env
poller = Poller()
poller.register(srv_sock, zmq.POLLIN)
poller.register(req_sub, zmq.POLLIN)
poll = poller.poll
try:
# synchronizing with the req_thread
sync = req_sub.recv_multipart()
assert sync[0] == 'SYNC'
logger.debug('I/O thread is synchronized')
self._sync_ev.set()
running = True
except Exception as e:
running = False
logger.error(e, exc_info=True)
while running:
ready_ev.wait() # block until socket is bound/connected
self._ready_ev.clear()
#if not self._ready:
# break # shutdown was called before connect/bind
while self._ready:
try:
reply_list = None
for socket, _ in poll():
if socket is srv_sock:
reply_list = srv_sock.recv_multipart()
elif socket is req_sub:
request = req_sub.recv_multipart()
if not request[0]:
logger.debug('io_thread received an EXIT signal')
running = False
break
srv_sock.send_multipart(request)
if reply_list is None:
continue
except Exception as e:
# the socket must have been closed
logger.warning(e)
break
logger.debug('received %r', reply_list)
reply = self._parse_reply(reply_list)
if reply is None:
#logger.debug('skipping invalid reply')
continue
req_id = reply['req_id']
msg_type = reply['type']
result = reply['result']
if msg_type == b'ACK':
#logger.debug('skipping ACK, req_id=%r', req_id)
continue
future = futures.pop(req_id, None)
if future is None:
queue = g_queues.get(req_id, None)
if queue is not None:
# existing generator
if msg_type == b'YIELD':
queue.put((result, None))
elif msg_type == b'FAIL':
queue.put((None, result))
del queue # IMPORTANT: clean up references so that
# self._gen_queues empties properly
continue
else:
if msg_type == b'OK':
# normal result
future.set_result(result)
elif msg_type == b'FAIL':
# exception
future.set_exception(result)
elif msg_type == b'YIELD':
# new generator
queue = self._tools.Queue(1)
g_queues[req_id] = queue
future.set_result(self._generator(req_id, queue.get))
if self._exit_ev.is_set():
logger.debug('io_thread received an EXIT signal')
break
# -- cleanup --
req_sub.close(0)
logger.debug('io_thread exited')
def shutdown(self):
"""Close the socket and signal the io_thread to exit"""
self._ready = False
self._exit_ev.set()
self._ready_ev.set()
self.logger.debug('signaling the threads to exit')
self.req_queue.put(None) # signal the req and io threads to exit
self._sync_ev.set()
if self.io_thread:
self.io_thread.exception()
if self.req_thread:
self.req_thread.exception()
self._ready_ev.clear()
self._exit_ev.clear()
self.logger.debug('closing the sockets')
self.socket.close(0)
self.req_pub.close(0)
if not self._ext_exec:
self.logger.debug('shutting down the executor')
self._executor.shutdown(cancel=True)
| mpl-2.0 | 7,783,787,698,816,321,000 | 34.201413 | 105 | 0.502108 | false |
liyatanggithub/futures | python/six2.py | 1 | 3160 | #!/usr/bin/python
#coding=utf-8
#from urllib import urlopen
import urllib2
import socket
#from numpy import *
import string
import time
import matplotlib.pyplot as plt
TIMEVALUE=6.0
URL="http://hq.sinajs.cn/list=AG1512"
xMax=100
yMin=10000
yMax=0
socket.setdefaulttimeout(4)
NowTime=float(int(time.time()))
LastTime=NowTime
FirstTime=NowTime
#FileName=str(NowTime)
dataMat=[[0],[0]]
Aaa=0
Bbb=0
Ccc=0
#m00=time.mktime(time.strptime('2000-01-01 00:00:00',"%Y-%m-%d %H:%M:%S"))
m23=time.mktime(time.strptime('2000-01-01 02:30:00',"%Y-%m-%d %H:%M:%S"))
m90=time.mktime(time.strptime('2000-01-01 09:00:00',"%Y-%m-%d %H:%M:%S"))
m113=time.mktime(time.strptime('2000-01-01 11:30:00',"%Y-%m-%d %H:%M:%S"))
m133=time.mktime(time.strptime('2000-01-01 13:30:00',"%Y-%m-%d %H:%M:%S"))
m150=time.mktime(time.strptime('2000-01-01 15:00:00',"%Y-%m-%d %H:%M:%S"))
m210=time.mktime(time.strptime('2000-01-01 21:00:00',"%Y-%m-%d %H:%M:%S"))
#m235=time.mktime(time.strptime('2000-01-01 23:59:59',"%Y-%m-%d %H:%M:%S"))
while True:
plt.pause(0.00001)
cpStrNowTime=time.strftime("%Y-%m-%d %H:%M:%S")
cpListNowTime=list(cpStrNowTime)
cpListNowTime[:10]=['2','0','0','0','-','0','1','-','0','1']
cpStr2000Time=''.join(cpListNowTime)
cp2000Time=time.mktime(time.strptime(cpStr2000Time,"%Y-%m-%d %H:%M:%S"))
if (cp2000Time>=m23 and cp2000Time<=m90) or (cp2000Time>=m113 and cp2000Time<=m133) or (cp2000Time>=m150 and cp2000Time<=m210):
#print "ÐÝÊÐʱ¼ä\t".decode('gbk')+cpStrNowTime
time.sleep(10)
continue
time.sleep(1)
NowTime=time.time()
if (NowTime-LastTime)>=TIMEVALUE:
LastTime=NowTime
try:
GetStr=urllib2.urlopen(URL).read()
except :
print "Get URL ERROR"
else:
NowPrice = string.atoi(GetStr[65:69])
if NowPrice <= Aaa :
print "*****************Low*****************"
if NowPrice >= Ccc :
print "*****************High*****************"
if NowPrice >= Aaa and NowPrice <= Ccc :
continue
if Bbb == 0 :
Bbb = NowPrice
Ccc = Bbb+3
Aaa = Bbb-3
if Ccc < NowPrice :
Ccc = NowPrice
Bbb = Ccc-3
Aaa = Bbb-3
if Aaa > NowPrice :
Aaa = NowPrice
Bbb = Aaa+3
Ccc = Bbb+3
TimeStyle=time.strftime("%Y-%m-%d %H:%M:%S")
print TimeStyle+"\t%f"%Bbb
dataMat[1].append(Bbb)
dataMat[0].append(dataMat[0][-1]+1)
if dataMat[0][-1]>=xMax :
xMax = xMax +100
plt.axis([0, xMax, yMin, yMax])
if dataMat[1][-1]<=yMin :
yMin = dataMat[1][-1]-10
plt.axis([0, xMax, yMin, yMax])
if dataMat[1][-1]>=yMax :
yMax = dataMat[1][-1]+10
plt.axis([0, xMax, yMin, yMax])
plt.axis([0, xMax, yMin, yMax])
plt.plot(dataMat[0], dataMat[1],color="blue", linewidth=1.0, linestyle="-")
plt.pause(0.00001)
| gpl-2.0 | 7,798,025,000,891,313,000 | 32.978495 | 131 | 0.526899 | false |
hammerhorn/hammerhorn-jive | twelvetone/draw_row.py | 1 | 1911 | #!/usr/bin/env python
"""
Draw tonerow
Generate an ASCII diagram of a 12-tone tonerow (musical serialism).
use: tonerow.py | draw_row.py [-h] [-s SHELL]
"""
__author__ = 'Chris Horn <[email protected]>'
import argparse
from cjh.cli import Cli
from cjh.config import Config
################
# PROCEDURES #
################
def _parse_args():
"""
Parse arguments: -h (help), -s (bash, Tk, etc.)
"""
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--shell', type=str)
return parser.parse_args()
##########
# DATA #
##########
if __name__ == '__main__':
ARGS = _parse_args()
else:
ARGS = None
CONFIG = Config()
if ARGS is not None and ARGS.shell is not None:
SHELL = CONFIG.launch_selected_shell(ARGS.shell)
else:
SHELL = CONFIG.start_user_profile()
## Set up Tk window ##
if SHELL.interface in ['Tk']:
SHELL.center_window(width_=400, height_=300)
SHELL.msg.config(font=('mono', 9, 'bold'))
#if SHELL.interface in ['dialog']:
# w, h = 46, 24
##########
# MAIN #
##########
def main():
"""
Takes a space-delimited int list (e.g., '1 2 3 4 5 6 7 8 9 10 11
12') as input; generates and ouputs an ASCII diagram.
* It might be good to define more functions.
"""
# SHELL.welcome('Draw Tonerow', 'draw a diagram of a 12-tone row')
in_str = Cli().input(prompt='')
str_list = in_str.split()
int_list = [int(s) for s in str_list]
out_str = '\n'
out_str += '\n ' + str(int_list)
out_str += '\n' + '=' * 41 + '\n'
out_str += '\n'
for row in range(12):
str_row = ' {:>2} '.format(12 - row)
for index in range(12):
if int_list[index] == 12 - row:
str_row += '[X]'
else:
str_row += '. .'
out_str += str_row + '\n'
out_str += '\n'
SHELL.output(out_str)
if __name__ == '__main__':
main()
| gpl-2.0 | -2,764,221,121,992,858,600 | 21.482353 | 69 | 0.535322 | false |
hagne/atm-py | atmPy/aerosols/instruments/UHSAS/UHSAS.py | 1 | 8187 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 10 11:43:10 2014
@author: htelg
"""
import datetime
import warnings
from io import StringIO as io
import numpy as np
import pandas as pd
import pylab as plt
from scipy.interpolate import UnivariateSpline
from atmPy.general import timeseries
from atmPy.aerosols.size_distribution import sizedistribution
def read_csv(fname, norm2time = True, norm2flow = True):
uhsas_file_types = ['.xls']
first = True
if type(fname).__name__ == 'list':
for file in fname:
for i in uhsas_file_types:
if i in file:
right_file_format = True
else:
right_file_format = False
if right_file_format:
sdt, hkt= _read_csv(file, norm2time = norm2time, norm2flow = norm2flow)
if first:
sd = sdt.copy()
hk = hkt.copy()
first = False
else:
if not np.array_equal(sd.bincenters, sdt.bincenters):
txt = 'the bincenters changed between files! No good!'
raise ValueError(txt)
sd.data = pd.concat((sd.data,sdt.data))
hk.data = pd.concat((hk.data,hkt.data))
if first:
txt = """Either the prvided list of names is empty, the files are empty, or none of the file names end on
the required ending (*.xls)"""
raise ValueError(txt)
else:
sd, hk= _read_csv(fname, norm2time = norm2time, norm2flow = norm2flow)
return sd, hk
def _read_csv(fname, norm2time = True, norm2flow = True):
uhsas = _readFromFakeXLS(fname)
# return uhsas
sd,hk = _separate_sizedist_and_housekeep(uhsas, norm2time = norm2time, norm2flow = norm2flow)
hk = timeseries.TimeSeries(hk)
# return size_distribution,hk
bins = _get_bins(sd)
# return bins
dist = sizedistribution.SizeDist_TS(sd, bins, "numberConcentration")
return dist, hk
def _readFromFakeXLS(fname):
"""reads and shapes a XLS file produced by the uhsas instruments"""
fr = pd.read_csv(fname, sep='\t')
newcolname = [fr.columns[e] + ' ' + str(fr.values[0][e]) for e, i in enumerate(fr.columns)]
fr.columns = newcolname
fr = fr.drop(fr.index[0])
bla = pd.Series(fr['Date -'].values + ' ' + fr['Time -'].values)
# return bla
try:
fr.index = bla.map(lambda x: datetime.datetime.strptime(x, '%m/%d/%Y %H:%M:%S.%f'))
except ValueError:
fr.index = bla.map(lambda x: datetime.datetime.strptime(x, '%m/%d/%Y %I:%M:%S.%f %p'))
fr = fr.drop(['Date -', 'Time -'], axis=1)
return fr
def _separate_sizedist_and_housekeep(uhsas, norm2time = True, norm2flow = True):
"""Beside separating size distribution and housekeeping this
function also converts the data to a numberconcentration (#/cc)
Parameters
----------
uhsas: pandas.DataFrame"""
# size_distribution = uhsas.copy()
# hk = uhsas.copy()
# # return size_distribution,hk
first = False
for e,col in enumerate(uhsas.columns):
cola = col.split(' ')[0]
try:
float(cola)
float(col.split(' ')[1])
except ValueError:
continue
else:
last = e
if not first:
first = e
# k = size_distribution.keys()
# where = np.argwhere(k == 'Valve 0=bypass') + 1
hk = uhsas.iloc[:,:first]
sd = uhsas.iloc[:,first:last+1]
# khk = k[: first]
# size_distribution = size_distribution.drop(khk, axis=1)
# hsd = k[where:]
# hk = hk.drop(hsd, axis=1)
# return size_distribution,hk
hk['Sample sccm'] = hk['Sample sccm'].astype(float)
hk['Accum. Secs'] = hk['Accum. Secs'].astype(float)
# normalize to time and flow
if norm2time:
sd = sd.mul(1 / hk['Accum. Secs'], axis = 0 )
if norm2flow:
sd = sd.mul(60./hk['Sample sccm'], axis = 0 )
return sd,hk
def _get_bins(frame, log=False):
"""
get the bins from the column labels of the size distribution DataFrame.
"""
frame = frame.copy()
bins = np.zeros(frame.keys().shape[0]+1)
for e, i in enumerate(frame.keys()):
bin_s, bin_e = i.split(' ')
bin_s = float(bin_s)
bin_e = float(bin_e)
bins[e] = bin_s
bins[e+1] = bin_e
return bins #binCenters
def _string2dataframe(data):
sb = io(data)
dataFrame = pd.read_csv(sb,
# sep=' ',
names=('d', 'bin_no')
).sort('d')
return dataFrame
def read_calibration_fromString(data):
'''
unit of diameter must be nm
e.g.:
data = """120., 19.5
130., 22.5
140., 25
150., 27.6
173., 33.
200., 38.
233., 43.4
270., 47.5
315., 53.
365., 58.
420., 62.5
490., 67.
570., 71.
660., 75.
770., 78.
890., 79.
1040., 84."""
'''
dataFrame = _string2dataframe(data)
# return dataFrame
calibrationInstance = calibration(dataFrame)
return calibrationInstance
class calibration:
def __init__(self,dataTabel):
self.data = dataTabel
self.calibrationFunction = self.get_calibrationFunctionSpline()
def save_csv(self,fname):
# save_Calibration(self,test_data_folder)
self.data.to_csv(fname, index = False)
return
def get_calibrationFunctionSpline(self, fitOrder=1):
"""
Performes a spline fit/smoothening (scipy.interpolate.UnivariateSpline) of d over amp (yes this way not the other way around).
Returns (generates): creates a function self.spline which can later be used to calculate d from amp
Optional Parameters:
\t s: int - oder of the spline function
\t noOfPts: int - length of generated graph
\t plot: boolean - if result is supposed to be plotted
"""
# The following two step method is necessary to get a smooth curve.
#When I only do the second step on the cal_curve I get some wired whiggles
##### First Step
if (self.data.bin_no.values[1:]-self.data.bin_no.values[:-1]).min() < 0:
warnings.warn('The data represent a non injective function! This will not work. plot the calibration to see what I meen')
sf = UnivariateSpline(self.data.d.values, self.data.bin_no.values, s=fitOrder)
d = np.logspace(np.log10(self.data.d.values.min()), np.log10(self.data.d.values.max()), 500)
bin_no = sf(d)
# second step
cal_function = UnivariateSpline(bin_no, d, s=fitOrder)
return cal_function
def plot_calibration(self):
"""Plots the calibration function and data
Arguments
------------
cal: calibration instance
Returns
------------
figure
axes
calibration data graph
calibration function graph
"""
cal_function = self.calibrationFunction
bin_no = np.logspace(np.log10(self.data.bin_no.min()), np.log10(self.data.bin_no.max()), 500)
d = cal_function(bin_no)
f, a = plt.subplots()
cal_data, = a.plot(self.data.d, self.data.bin_no, 'o', label='data',)
cal_func, = a.plot(d, bin_no, label='function')
a.loglog()
a.set_xlim(0.9*self.data.d.min(), 1.1*self.data.d.max())
a.set_xlabel('Diameter (nm)')
a.set_ylim(0.9*self.data.bin_no.min(), 1.1*self.data.bin_no.max())
a.set_ylabel('bin number')
a.set_title('Calibration curve')
a.legend(loc = 2)
return f, a, cal_data, cal_func
def apply_on(self, dist, limit_to_cal_range = True):
dist_t = dist.copy()
bins_no = np.arange(dist_t.bins.shape[0])
cal_f = self.get_calibrationFunctionSpline()
new_d = cal_f(bins_no)
df = pd.DataFrame(np.array([bins_no, new_d]).transpose(), columns = ['bin_no','d'])
dist_t.bins = new_d
start_d = self.data.d.iloc[0]
end_d = self.data.d.iloc[-1]
if limit_to_cal_range:
dist_t = dist_t.zoom_diameter(start = start_d, end=end_d)
return dist_t | mit | 7,834,313,386,625,078,000 | 29.552239 | 134 | 0.577379 | false |
Togethere-com/togethere | articles/migrations/0009_auto_20160816_1201.py | 1 | 1035 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-16 10:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('articles', '0008_auto_20160730_1113'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='article',
name='author_profile',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='articles.Profile'),
preserve_default=False,
),
]
| gpl-3.0 | 1,260,396,241,466,786,300 | 32.387097 | 121 | 0.624155 | false |
duaneloh/Dragonfly | utils/py_src/detector.py | 1 | 12507 | '''Module containing detector class'''
import sys
import os
import numpy as np
from numpy import ma
import pandas
try:
import h5py
HDF5_MODE = True
except ImportError:
HDF5_MODE = False
class Detector(object):
"""Dragonfly detector
The detector file format is specified in github.com/duaneloh/Dragonfly/wiki
This class reads the file and provides numpy arrays which can be used for
further processing.
__init__ arguments (optional):
det_fname (string) - Path to detector file to populate attributes
detd_pix (float) - Detector distance in pixels (detd/pixsize)
ewald_rad (float) - Ewald sphere radius in voxels. If in doubt, = detd_pix
mask_flag (bool) - Whether to read the mask column for each pixel
keep_mask_1 (bool) - Whether to consider mask=1 pixels as good
For the new ASCII format, detd_pix and ewald_rad numbers are read from the file \
but for the old file, they must be provided.
Methods:
parse(fname, mask_flag=False, keep_mask_1=True)
write(fname)
assemble_frame(data, zoomed=False, sym=False)
calc_from_coords()
On parsing, it produces the following numpy arrays (each of length num_pix)
Attributes:
self.qx, self.qy, self.qz - Voxel space coordinates (origin at (0,0,0))
self.cx, self.cy - Floating point 2D coordinates (origin at (0,0))
self.x, self.y - Integer and shifted 2D coordinates (corner at (0,0))
self.mask - Assembled mask
self.raw_mask - Unassembled mask as stored in detector file
self.unassembled_mask - Unassembled mask (1=good, 0=bad)
"""
def __init__(self, det_fname=None, detd_pix=None,
ewald_rad=None, mask_flag=False, keep_mask_1=True):
self.detd = detd_pix
self.ewald_rad = ewald_rad
self.background = None
self._sym_shape = None
if det_fname is not None:
self.parse(det_fname, mask_flag, keep_mask_1)
def parse(self, fname, mask_flag=False, keep_mask_1=True):
""" Parse Dragonfly detector from file
File can either be in the HDF5 or ASCII format
"""
self.det_fname = fname
if HDF5_MODE and h5py.is_hdf5(self.det_fname):
self._parse_h5det(mask_flag, keep_mask_1)
elif os.path.splitext(self.det_fname)[1] == '.h5':
fheader = np.fromfile(self.det_fname, '=c', count=8)
if fheader == chr(137)+'HDF\r\n'+chr(26)+'\n':
if not HDF5_MODE:
raise IOError('Unable to parse HDF5 detector')
else:
self._parse_h5det(mask_flag, keep_mask_1)
else:
self._parse_asciidet(mask_flag, keep_mask_1)
else:
self._parse_asciidet(mask_flag, keep_mask_1)
def write(self, fname):
""" Write Dragonfly detector to file
If h5py is available and the file name as a '.h5' extension,
an HDF5 detector will be written, otherwise an ASCII file will be generated.
Note that the background array can only be stored in an HDF5 detector
"""
try:
val = self.qx + self.qy + self.qz + self.corr + self.raw_mask
val = self.detd + self.ewald_rad
except AttributeError:
print('Detector attributes not populated. Cannot write to file')
print('Need qx, qy, qz, corr, raw_mask, detd and ewald_rad')
return
if os.path.splitext(fname)[1] == '.h5':
if HDF5_MODE:
self._write_h5det(fname)
else:
raise IOError('Unable to write HDF5 detector without h5py')
else:
print('Writing ASCII detector file')
self._write_asciidet(fname)
def assemble_frame(self, data, zoomed=False, sym=False):
''' Assemble given raw image
Arguments:
data - array of num_pix values
zoomed (bool) - Restrict assembled image to non-masked pixels
sym (bool) - Centro-symmetrize image
Returns:
Numpy masked array representing assembled image
'''
if sym:
self._init_sym()
img = ma.masked_array(np.zeros(self._sym_shape, dtype='f8'), mask=1-self._sym_mask)
np.add.at(img, (self._sym_x, self._sym_y), data*self.unassembled_mask)
np.add.at(img, (self._sym_fx, self._sym_fy), data*self.unassembled_mask)
img.data[self._sym_bothgood] /= 2.
if zoomed:
b = self._sym_zoom_bounds
return img[b[0]:b[1], b[2]:b[3]]
else:
img = ma.masked_array(np.zeros(self.frame_shape, dtype='f8'), mask=1-self.mask)
np.add.at(img, (self.x, self.y), data*self.unassembled_mask)
if zoomed:
b = self.zoom_bounds
return img[b[0]:b[1], b[2]:b[3]]
return img
def calc_from_coords(self):
''' Calculate essential detector attributes from pixel coordinates
Needs:
cx, cy, detd, ewald_rad
Calculates:
qx, qy, qz and corr
'''
try:
val = self.cx + self.cy
val = self.detd + self.ewald_rad
except AttributeError:
print('Need cx, cy, detd and ewald_rad to be defined')
print('detd must have same units as cx and cy')
print('ewald_rad should be in voxel units')
return
fac = np.sqrt(self.cx**2 + self.cy**2 + self.detd**2)
self.qx = self.cx * self.ewald_rad / fac
self.qy = self.cy * self.ewald_rad / fac
self.qz = self.ewald_rad * (self.detd/fac - 1.)
self.corr = self.detd / fac**3 * (1. - self.cx**2 / fac**2)
def _parse_asciidet(self, mask_flag, keep_mask_1):
""" (Internal) Detector file parser
Arguments:
mask_flag (bool, optional) - Whether to read the mask column
keep_mask_1 (bool, optional) - Whether to keep mask=1 within the boolean mask
"""
print('Parsing ASCII detector file')
self._check_header()
sys.stderr.write('Reading %s...'%self.det_fname)
if mask_flag:
sys.stderr.write('with mask...')
dframe = pandas.read_csv(
self.det_fname,
delim_whitespace=True, skiprows=1, engine='c', header=None,
names=['qx', 'qy', 'qz', 'corr', 'mask'],
dtype={'qx':'f8', 'qy':'f8', 'qz':'f8', 'corr':'f8', 'mask':'u1'})
self.qx, self.qy, self.qz, self.corr = tuple([np.array(dframe[key]) # pylint: disable=C0103
for key in ['qx', 'qy', 'qz', 'corr']])
self.raw_mask = np.array(dframe['mask']).astype('u1')
sys.stderr.write('done\n')
self._process_det(mask_flag, keep_mask_1)
def _parse_h5det(self, mask_flag, keep_mask_1):
print('Parsing HDF5 detector file')
sys.stderr.write('Reading %s...'%self.det_fname)
if mask_flag:
sys.stderr.write('with mask...')
with h5py.File(self.det_fname, 'r') as fptr:
self.qx = fptr['qx'][:]
self.qy = fptr['qy'][:]
self.qz = fptr['qz'][:]
self.corr = fptr['corr'][:]
self.raw_mask = fptr['mask'][:].astype('u1')
self.detd = fptr['detd'][()]
self.ewald_rad = fptr['ewald_rad'][()]
if 'background' in fptr:
self.background = fptr['background'][:]
sys.stderr.write('done\n')
self._process_det(mask_flag, keep_mask_1)
def _write_asciidet(self, fname):
print('Writing ASCII detector file')
qx = self.qx.ravel()
qy = self.qy.ravel()
qz = self.qz.ravel()
corr = self.corr.ravel()
mask = self.raw_mask.ravel().astype('u1')
with open(fname, "w") as fptr:
fptr.write("%d %.6f %.6f\n" % (qx.size, self.detd, self.ewald_rad))
for par0, par1, par2, par3, par4 in zip(qx, qy, qz, corr, mask):
txt = "%21.15e %21.15e %21.15e %21.15e %d\n" % (par0, par1, par2, par3, par4)
fptr.write(txt)
def _write_h5det(self, fname):
print('Writing HDF5 detector file')
with h5py.File(fname, "w") as fptr:
fptr['qx'] = self.qx.ravel().astype('f8')
fptr['qy'] = self.qy.ravel().astype('f8')
fptr['qz'] = self.qz.ravel().astype('f8')
fptr['corr'] = self.corr.ravel().astype('f8')
fptr['mask'] = self.raw_mask.ravel().astype('u1')
fptr['detd'] = float(self.detd)
fptr['ewald_rad'] = float(self.ewald_rad)
if self.background is not None:
fptr['background'] = self.background.ravel().astype('f8')
def _check_header(self):
with open(self.det_fname, 'r') as fptr:
line = fptr.readline().rstrip().split()
if len(line) > 1:
self.detd = float(line[1])
self.ewald_rad = float(line[2])
else:
if self.detd is None:
raise TypeError('Old type detector file. Need detd_pix')
if self.ewald_rad is None:
raise TypeError('Old type detector file. Need ewald_rad')
def _process_det(self, mask_flag, keep_mask_1):
if mask_flag:
mask = np.copy(self.raw_mask)
if keep_mask_1:
mask[mask == 1] = 0 # To keep both 0 and 1
mask = mask // 2 # To keep both 0 and 1
else:
mask[mask == 2] = 1 # To keep only mask==0
mask = 1 - mask
else:
self.raw_mask = np.zeros(self.qx.shape, dtype='u1')
mask = np.ones(self.qx.shape, dtype='u1')
if self.qz.mean() > 0:
self.cx = self.qx * self.detd / (self.ewald_rad - self.qz) # pylint: disable=C0103
self.cy = self.qy * self.detd / (self.ewald_rad - self.qz) # pylint: disable=C0103
else:
self.cx = self.qx * self.detd / (self.ewald_rad + self.qz) # pylint: disable=C0103
self.cy = self.qy * self.detd / (self.ewald_rad + self.qz) # pylint: disable=C0103
self.x = np.round(self.cx - self.cx.min()).astype('i4')
self.y = np.round(self.cy - self.cy.min()).astype('i4')
self.unassembled_mask = mask.ravel()
self._init_assem()
def _init_assem(self):
# Calculate attributes given self.x and self.y
mask = self.unassembled_mask
self.frame_shape = (self.x.max()+1, self.y.max()+1)
self.mask = np.zeros(self.frame_shape, dtype='u1')
self.mask[self.x, self.y] = mask
self.mask = np.sign(self.mask)
xsel = self.x[mask.astype(np.bool)]
ysel = self.y[mask.astype(np.bool)]
self.zoom_bounds = (xsel.min(), xsel.max()+1, ysel.min(), ysel.max()+1)
def _init_sym(self, force=False):
if self._sym_shape is not None and not force:
return
self._sym_shape = (2*int(np.ceil(np.abs(self.cx).max()))+1,
2*int(np.ceil(np.abs(self.cy).max()))+1)
self._sym_x = np.round(self.cx + self._sym_shape[0]//2).astype('i4')
self._sym_y = np.round(self.cy + self._sym_shape[1]//2).astype('i4')
self._sym_fx = self._sym_shape[0] - 1 - self._sym_x
self._sym_fy = self._sym_shape[1] - 1 - self._sym_y
self._sym_mask = np.zeros(self._sym_shape, dtype='u1')
np.add.at(self._sym_mask, (self._sym_x, self._sym_y), self.unassembled_mask)
np.add.at(self._sym_mask, (self._sym_fx, self._sym_fy), self.unassembled_mask)
self._sym_bothgood = (self._sym_mask == 2)
self._sym_mask = np.sign(self._sym_mask)
mask = self.unassembled_mask
xsel = np.concatenate((self._sym_x[mask.astype('bool')], self._sym_fx[mask.astype('bool')]))
ysel = np.concatenate((self._sym_y[mask.astype('bool')], self._sym_fy[mask.astype('bool')]))
self._sym_zoom_bounds = (xsel.min(), xsel.max()+1, ysel.min(), ysel.max()+1)
@property
def coords_xy(self):
'''Return 2D pixel coordinates'''
return self.cx, self.cy
@property
def qvals_xyz(self):
'''Return 3D voxel values'''
return self.qx, self.qy, self.qz
@property
def indices_xy(self):
'''Return 2D integer coordinates (for assembly)
Corner of the detector at (0,0)'''
return self.x, self.y
| gpl-3.0 | -6,666,683,929,173,246,000 | 40.006557 | 100 | 0.557368 | false |
henrysher/opslib | opslib/icsr53.py | 1 | 12439 | """
IcsR53: Library for Route53
---------------------------
+--------------------+------------+--+
| This is the IcsR53 common library. |
+--------------------+------------+--+
"""
import time
import string
from boto.route53 import Route53Connection
#from boto.route53.zone import Zone
from boto.route53.record import ResourceRecordSets
from boto.route53 import exception
from opslib.icsexception import IcsR53Exception
from opslib.zone import Zone
import logging
log = logging.getLogger(__name__)
class IcsR53(object):
"""
ICS Library for R53
"""
def __init__(self, dns_name=None, **kwargs):
self.r53 = Route53Connection(**kwargs)
if dns_name is not None:
self.zone = self.get_zone(dns_name)
if not self.zone:
raise IcsR53Exception(
"Can't find DNS Zone for '%s'" % (dns_name))
@staticmethod
def parse_dns_name(name):
"""
Parse the value of Tag "DnsName"
:type name: string
:param name: the value of Instance Tag "DnsName"
for example, "test.example.com:A:Public:1"
:rtype: tuple
:return: a tuple containing (DnsName, DnsType, Public/Private, Weight)
for example, ("test.example.com", "A", True, "10")
"""
if name is None or not isinstance(name, basestring):
raise IcsR53Exception(
"DnsName should be a 'str' not %s" % type(name))
name = name.lstrip(" ").rstrip(" ").split(":", 3)
if len(name) < 3 or len(name) > 4:
raise IcsR53Exception(
"Invalid number of sub-strings: '%s'" % len(name))
if name[1].upper() not in ("A", "CNAME"):
raise IcsR53Exception(
"Invalid DNS type: 'A' or 'CNAME', not '%s'" % name[1])
if name[2].lower() not in ("public", "private"):
raise IcsR53Exception(
"Invalid DNS value: 'public' or 'private', not '%s'" % name[2])
if len(name) == 4:
return(name[0].lower(), name[1].upper(),
name[2].lower() == "public",
string.atoi(name[3]).__str__())
return(name[0].lower(), name[1].upper(),
name[2].lower() == "public",
None)
def get_zone_id(self):
"""
Get the hosted zone ID for the specified domain name
:rtype: string
:return: a string containing the ID of the specified hosted zone
"""
return self.zone.id
def get_zone(self, name):
"""
Get the hosted zone for the specified domain name
:type name: string
:param name: the specified domain name
:rtype: class
:return: a class containing the specified hosted zone
"""
zone_dict = self.get_zone_dict(name)
return Zone(self.r53, zone_dict)
def set_zone(self, name):
"""
Set the hosted zone for the specified domain name
:type name: string
:param name: the specified domain name
"""
self.zone = self.get_zone(name)
def get_zone_dict(self, name):
"""
Get the hosted zone info for the specified domain name
:type name: string
:param name: the specified domain name
:rtype: dict
:return: a dict containing the specified hosted zone info
"""
if name is None or not isinstance(name, basestring):
raise IcsR53Exception(
"DnsName should be a 'str' not %s" % type(name))
name = name.lower()
name = self.r53._make_qualified(name)
results = self.r53.get_all_hosted_zones()
zones = results['ListHostedZonesResponse']['HostedZones']
zones.sort(key=len, reverse=True)
zones_matched = {}
for zone in zones:
zname = zone['Name'].lower()
if len(zname) > len(name):
continue
if len(zname) < len(name) and name[-1 - len(zname)] != '.':
continue
if zname == name[-len(zname):]:
zones_matched[zname] = zone
if zones_matched:
znames = zones_matched.keys()
znames.sort(key=len, reverse=True)
return zones_matched[znames[0]]
return None
def get_records(self):
"""
Return a ResourceRecordsSets for all of the records in this zone.
"""
return self.zone.get_records()
def find_all_records(self):
"""
Search all records in this zone.
"""
return self.zone.find_all_records()
def find_records(self, name, type, desired=1, all=False, identifier=None):
"""
Search this Zone for records that match given parameters.
Returns None if no results, a ResourceRecord if one result, or
a ResourceRecordSets if more than one result.
:type name: str
:param name: The name of the records should match this parameter
:type type: str
:param type: The type of the records should match this parameter
:type desired: int
:param desired: The number of desired results. If the number of
matching records in the Zone exceeds the value of this parameter,
throw TooManyRecordsException
:type all: Boolean
:param all: If true return all records that match name, type, and
identifier parameters
:type identifier: Tuple
:param identifier: A tuple specifying WRR or LBR attributes. Valid
forms are:
* (str, str): WRR record [e.g. ('foo','10')]
* (str, str): LBR record [e.g. ('foo','us-east-1')
"""
return self.zone.find_records(name, type, desired,
all, identifier)
def wait_to_complete(self, status=None, timeout=120):
"""
Wait for the Route53 commit change to complete
:type status: class
:param status: the instance initializing ``boto.route53.status.Status``
"""
for i in xrange(timeout / 5):
result = status.update()
if result == 'INSYNC':
return True
elif result != 'PENDING':
raise IcsR53Exception("Unexpected status found: %s" % result)
time.sleep(5)
result = status.update()
if result == 'INSYNC':
return True
else:
raise IcsR53Exception("Wait until timeout: %ss" % timeout)
def add_record(self, resource_type, name, value, ttl=60,
identifier=None):
"""
Add a new record to this Zone. See _new_record for parameter
documentation. Returns a Status object.
"""
return self.zone.add_record(self,
resource_type,
name, value, ttl,
identifier)
def update_record(self, old_record, new_value, new_ttl=None,
new_identifier=None):
"""
Update an existing record in this Zone. Returns a Status object.
:type old_record: ResourceRecord
:param old_record: A ResourceRecord (e.g. returned by find_records)
See _new_record for additional parameter documentation.
"""
return self.zone.update_record(old_record, new_value,
new_ttl, new_identifier)
def delete_record(self, record):
"""
Delete one or more records from this Zone. Returns a Status object.
:param record: A ResourceRecord (e.g. returned by
find_records) or list, tuple, or set of ResourceRecords.
"""
return self.zone.delete_record(record)
def add_cname(self, name, value, ttl=None, identifier=None):
"""
Add a new CNAME record to this Zone. See _new_record for
parameter documentation. Returns a Status object.
"""
return self.zone.add_cname(name, value, ttl, identifier)
def add_a(self, name, value, ttl=None, identifier=None):
"""
Add a new A record to this Zone. See _new_record for
parameter documentation. Returns a Status object.
"""
return self.zone.add_a(name, value, ttl, identifier)
def add_alias(self, name, type, alias_hosted_zone_id,
alias_dns_name, identifier=None):
"""
Add a new alias record to this Zone. See _new_alias_record for
parameter documentation. Returns a Status object.
"""
return self.zone.add_alias(name, type, alias_hosted_zone_id,
alias_dns_name, identifier)
def get_cname(self, name, identifier=None, all=False):
"""
Search this Zone for CNAME records that match name.
Returns a ResourceRecord.
If there is more than one match return all as a
ResourceRecordSets if all is True, otherwise throws
TooManyRecordsException.
"""
return self.zone.find_records(name, 'CNAME',
identifier=identifier,
all=all)
def get_a(self, name, identifier=None, all=False):
"""
Search this Zone for A records that match name.
Returns a ResourceRecord.
If there is more than one match return all as a
ResourceRecordSets if all is True, otherwise throws
TooManyRecordsException.
"""
return self.zone.find_records(name, 'A',
identifier=identifier,
all=all)
def update_cname(self, name, value, ttl=None, identifier=None):
"""
Update the given CNAME record in this Zone to a new value, ttl,
and identifier. Returns a Status object.
Will throw TooManyRecordsException is name, value does not match
a single record.
"""
name = self.r53._make_qualified(name)
value = self.r53._make_qualified(value)
old_record = self.get_cname(name, identifier)
if old_record is None:
return None
else:
ttl = ttl or old_record.ttl
return self.update_record(old_record,
new_value=value,
new_ttl=ttl,
new_identifier=identifier)
def update_a(self, name, value, ttl=None, identifier=None):
"""
Update the given A record in this Zone to a new value, ttl,
and identifier. Returns a Status object.
Will throw TooManyRecordsException is name, value does not match
a single record.
"""
name = self.r53._make_qualified(name)
old_record = self.get_a(name, identifier)
if old_record is None:
return None
else:
ttl = ttl or old_record.ttl
return self.update_record(old_record,
new_value=value,
new_ttl=ttl,
new_identifier=identifier)
def update_alias(self, name, type, identifier=None, alias_dns_name=None):
"""
Update the given alias record in this Zone to a new routing policy
Returns a Status object.
Will throw TooManyRecordsException is name, value does not match
a single record.
"""
return self.zone.update_alias(name, type, identifier, alias_dns_name)
def delete_cname(self, name, identifier=None, all=False):
"""
Delete a CNAME record matching name and identifier from
this Zone. Returns a Status object.
If there is more than one match delete all matching records if
all is True, otherwise throws TooManyRecordsException.
"""
return self.zone.delete_cname(name, identifier)
def delete_a(self, name, identifier=None, all=False):
"""
Delete an A record matching name and identifier from this
Zone. Returns a Status object.
If there is more than one match delete all matching records if
all is True, otherwise throws TooManyRecordsException.
"""
return self.zone.delete_a(name, identifier, all)
# vim: tabstop=4 shiftwidth=4 softtabstop=4
| apache-2.0 | 7,279,076,845,178,610,000 | 33.649025 | 79 | 0.561942 | false |
SebWouters/PyDMET | oldstuff/GroundState1D.py | 1 | 1418 | '''
PyDMET: a python implementation of density matrix embedding theory
Copyright (C) 2014, 2015 Sebastian Wouters
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
import numpy as np
import HubbardDMET
lattice_size = np.array( [ 4 ], dtype=int )
cluster_size = np.array( [ 1 ], dtype=int )
Nelectrons = np.prod( lattice_size ) # Half-filling
antiPeriodic = True
Uvalues = []
Energies = []
for HubbardU in np.arange( -8.0, 8.1, 1.0 ):
theDMET = HubbardDMET.HubbardDMET( lattice_size, cluster_size, HubbardU, antiPeriodic )
EnergyPerSite, umatrix = theDMET.SolveGroundState( Nelectrons )
Uvalues.append( HubbardU )
Energies.append( EnergyPerSite )
print np.column_stack((Uvalues, Energies))
| gpl-2.0 | -3,204,541,395,736,918,500 | 35.358974 | 91 | 0.720028 | false |
nassar/yamz | ice.py | 1 | 31923 | #!/usr/bin/python
#
# ice - web frontend for SeaIce, based on the Python-Flask framework.
#
# Copyright (c) 2013, Christopher Patton, all rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import seaice
import ConfigParser
from flask import Markup
from flask import render_template, render_template_string
from flask import url_for, redirect, flash
from flask import request, session, g
from flask.ext import login as l
from urllib2 import Request, urlopen, URLError
import os, sys, optparse, re
import json, psycopg2 as pgdb
## Parse command line options. ##
parser = optparse.OptionParser()
parser.description="""\
This program is a Python/Flask-based web frontend for the SeaIce metadictionary.
SeaIce is a database comprised of a set of user-defined, crowd-sourced terms and
relations. The goal of SeaIce is to develop a succinct and complete set of
metadata terms to register just about any type of file or data set. 'ice' is
distributed under the terms of the BSD license with the hope that it will be
# useful, but without warranty. You should have received a copy of the BSD
license with this program; otherwise, visit
http://opensource.org/licenses/BSD-3-Clause.
"""
parser.add_option("--config", dest="config_file", metavar="FILE",
help="User credentials for local PostgreSQL database. " +
"If 'heroku' is given, then a connection to a foreign host specified by " +
"DATABASE_URL is established.",
default='heroku')
parser.add_option('--credentials', dest='credentials_file', metavar='FILE',
help='File with OAuth-2.0 credentials. (Defaults to `.seaice_auth`.)',
default='.seaice_auth')
parser.add_option('--deploy', dest='deployment_mode',
help='Deployment mode, used to choose OAuth parameters in credentials file.',
default='heroku')
parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False,
help="Start flask in debug mode.")
parser.add_option("--role", dest="db_role", metavar="USER",
help="Specify the database role to use for the DB connector pool. These roles " +
"are specified in the configuration file (see --config).",
default="default")
(options, args) = parser.parse_args()
# Figure out if we're in production mode. Look in 'heroku' section only.
config = ConfigParser.ConfigParser()
config.read('.seaice_auth')
if config.has_option('heroku', 'prod_mode'):
prod_mode = config.getboolean('heroku', 'prod_mode')
else:
prod_mode = False # default
## Setup flask application ##
print "ice: starting ..."
db_config = None
try:
if options.config_file == "heroku":
app = seaice.SeaIceFlask(__name__)
else:
db_config = seaice.auth.get_config(options.config_file)
app = seaice.SeaIceFlask(__name__, db_user = db_config.get(options.db_role, 'user'),
db_password = db_config.get(options.db_role, 'password'),
db_name = db_config.get(options.db_role, 'dbname'))
except pgdb.DatabaseError, e:
print >>sys.stderr, "error: %s" % e
sys.exit(1)
try:
credentials = seaice.auth.get_config(options.credentials_file)
google = seaice.auth.get_google_auth(credentials.get(options.deployment_mode, 'google_client_id'),
credentials.get(options.deployment_mode, 'google_client_secret'))
except OSError:
print >>sys.stderr, "error: config file '%s' not found" % options.config_file
sys.exit(1)
app.debug = True
app.use_reloader = True
app.secret_key = credentials.get(options.deployment_mode, 'app_secret')
## Session logins ##
login_manager = l.LoginManager()
login_manager.init_app(app)
login_manager.anonymous_user = seaice.user.AnonymousUser
## Prescore terms ##
# This will be used to check for consistency errors in live scoring
# and isn't needed until I implement O(1) scoring.
#print "ice: checking term score consistnency (dev)" TODO
#for term in db_con.getAllTerms():
# if not db_con.checkTermConsistency(term['id']):
# print "warning: corrected inconsistent consensus score for term %d" % term['id']
# db_con.commit()
print "ice: setup complete."
@login_manager.user_loader
def load_user(id):
return app.SeaIceUsers.get(int(id))
## Request wrappers (may have use for these later) ##
@app.before_request
def before_request():
pass
@app.teardown_request
def teardown_request(exception):
pass
## HTTP request handlers ##
@app.errorhandler(404)
def pageNotFound(e):
return render_template('basic_page.html', user_name = l.current_user.name,
title = "Oops! - 404",
headline = "404",
content = "The page you requested doesn't exist."), 404
# home page
@app.route("/")
def index():
if l.current_user.id:
g.db = app.dbPool.getScoped()
# TODO Store these values in class User in order to prevent
# these queries every time the homepage is accessed.
my = seaice.pretty.printTermsAsLinks(g.db,
g.db.getTermsByUser(l.current_user.id))
star = seaice.pretty.printTermsAsLinks(g.db,
g.db.getTermsByTracking(l.current_user.id))
notify = l.current_user.getNotificationsAsHTML(g.db)
return render_template("index.html", user_name = l.current_user.name,
my = Markup(my.decode('utf-8')) if my else None,
star = Markup(star.decode('utf-8')) if star else None,
notify = Markup(notify.decode('utf-8')) if notify else None)
return render_template("index.html", user_name = l.current_user.name)
@app.route("/about")
def about():
return render_template("about.html", user_name = l.current_user.name)
@app.route("/guidelines")
def guidelines():
return render_template("guidelines.html", user_name = l.current_user.name)
@app.route("/api")
def api():
return redirect(url_for('static', filename='api/index.html'))
@app.route("/contact")
def contact():
return render_template("contact.html", user_name = l.current_user.name)
## Login and logout ##
@app.route("/login")
def login():
if l.current_user.id:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Oops!",
content = "You are already logged in!")
form = '''
<p>
In order to propose new terms or comment on others, you must first
sign in.
<li>Sign in with <a href="/login/google">Google</a>.</li>
</p>
'''
return render_template("basic_page.html", title = "Login page",
headline = "Login",
content = Markup(form.decode('utf-8')))
@app.route("/login/google")
def login_google():
callback=url_for('authorized', _external=True)
return google.authorize(callback=callback)
@app.route(seaice.auth.REDIRECT_URI)
@google.authorized_handler
def authorized(resp):
access_token = resp['access_token']
session['access_token'] = access_token, ''
headers = {'Authorization': 'OAuth '+access_token}
req = Request('https://www.googleapis.com/oauth2/v1/userinfo', None, headers)
try:
res = urlopen(req)
except URLError, e:
if e.code == 401: # Unauthorized - bad token
session.pop('access_token', None)
return 'l'
g_user = json.load(res)
g.db = app.dbPool.getScoped()
user = g.db.getUserByAuth('google', g_user['id'])
if not user: # not seen this person before, so create user
g_user['authority'] = 'google'
g_user['auth_id'] = g_user['id']
g_user['id'] = app.userIdPool.ConsumeId()
g_user['last_name'] = "nil"
g_user['first_name'] = "nil"
g_user['reputation'] = "30"
g.db.insertUser(g_user)
g.db.commit()
user = g.db.getUserByAuth('google', g_user['auth_id'])
app.SeaIceUsers[user['id']] = seaice.user.User(user['id'], user['first_name'])
l.login_user(app.SeaIceUsers.get(user['id']))
return render_template("account.html", user_name = l.current_user.name,
email = g_user['email'],
message = """
According to our records, this is the first time you've logged onto
SeaIce with this account. Please provide your first and last name as
you would like it to appear with your contributions. Thank you!""")
l.login_user(app.SeaIceUsers.get(user['id']))
flash("Logged in successfully")
return redirect(url_for('index'))
@google.tokengetter
def get_access_token():
return session.get('access_token')
@app.route('/logout')
@l.login_required
def logout():
l.logout_user()
return redirect(url_for('index'))
@login_manager.unauthorized_handler
def unauthorized():
return redirect(url_for('login'))
## Users ##
@app.route("/account", methods = ['POST', 'GET'])
@l.login_required
def settings():
g.db = app.dbPool.dequeue()
if request.method == "POST":
g.db.updateUser(l.current_user.id,
request.form['first_name'],
request.form['last_name'],
True if request.form.get('enotify') else False)
g.db.commit()
app.dbPool.enqueue(g.db)
l.current_user.name = request.form['first_name']
return getUser(str(l.current_user.id))
# method was GET
user = g.db.getUser(l.current_user.id)
app.dbPool.enqueue(g.db)
return render_template("account.html", user_name = l.current_user.name,
email = user['email'].decode('utf-8'),
last_name_edit = user['last_name'].decode('utf-8'),
first_name_edit = user['first_name'].decode('utf-8'),
reputation = user['reputation'] + ' *' if user['super_user'] else ' _',
enotify = 'yes' if user['enotify'] else 'no',
message = """
Here you can change how your name will appear to other users.
Navigating away from this page will safely discard any changes.""")
@app.route("/user=<int:user_id>")
def getUser(user_id = None):
g.db = app.dbPool.getScoped()
try:
user = g.db.getUser(int(user_id))
if user:
result = """<hr>
<table cellpadding=12>
<tr><td valign=top width="40%">First name:</td><td>{0}</td></tr>
<tr><td valign=top>Last name:</td><td>{1}</td></tr>
<tr><td valign=top>Email:</td><td>{2}</td></td>
<tr><td valign=top>Reputation:</td><td>{3}</td></td>
<tr><td valign=top>Receive email notifications:</td><td>{4}</td>
</table> """.format(user['first_name'], user['last_name'],
user['email'],
user['reputation'] + \
' *' if user['super_user'] else '',
user['enotify'])
return render_template("basic_page.html",
user_name = l.current_user.name,
title = "User - %s" % user_id,
headline = "User",
content = Markup(result.decode('utf')))
except IndexError: pass
return render_template("basic_page.html", user_name = l.current_user.name,
title = "User not found",
headline = "User",
content = Markup("User <strong>#%s</strong> not found!" % user_id))
@app.route("/user=<int:user_id>/notif=<int:notif_index>/remove", methods=['GET'])
@l.login_required
def remNotification(user_id, notif_index):
try:
assert user_id == l.current_user.id
app.SeaIceUsers[user_id].remove(notif_index, app.dbPool.getScoped())
return redirect("/")
except AssertionError:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Oops!",
content = 'You may only delete your own notifications.')
except AssertionError:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Oops!",
content = 'Index out of range.')
## Look up terms ##
@app.route("/term/concept=<term_concept_id>")
@app.route("/term=<term_concept_id>")
def getTerm(term_concept_id = None, message = ""):
# NOTE: this getTerm is called with concept_id, the other getTerm with id
g.db = app.dbPool.getScoped()
term = g.db.getTermByConceptId(term_concept_id)
if not term:
return render_template("basic_page.html",
user_name = l.current_user.name,
title = "Term not found",
headline = "Term",
content = Markup("Term <strong>#%s</strong> not found!" \
% term_concept_id))
result = seaice.pretty.printTermAsHTML(g.db, term, l.current_user.id)
result = message + "<hr>" + result + "<hr>"
result += seaice.pretty.printCommentsAsHTML(g.db, g.db.getCommentHistory(term['id']),
l.current_user.id)
if l.current_user.id:
result += """
<form action="/term={0}/comment" method="post">
<table cellpadding=16 width=60%>
<tr><td><textarea type="text" name="comment_string" rows=3
style="width:100%; height:100%"
placeholder="Add comment"></textarea></td></tr>
<tr><td align=right><input type="submit" value="Comment"><td>
</td>
</table>
</form>""".format(term['id'])
else:
result += """
<form action="/login" method="get">
<table cellpadding=16 width=60%>
<tr><td><textarea type="text" rows=3
style="width:100%; height:100%"
placeholder="Log in to comment." readonly></textarea></td></tr>
<tr><td align=right><input type="submit" value="Login"><td>
</td>
</table>
</form>"""
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Term %s" % term['term_string'],
headline = "Term",
content = Markup(result.decode('utf-8')))
@app.route("/browse")
@app.route("/browse/<listing>")
def browse(listing = None):
g.db = app.dbPool.getScoped()
terms = g.db.getAllTerms(sortBy="term_string")
letter = '~'
result = "<h5>{0} | {1} | {2} | {3} | {4}</h5><hr>".format(
'<a href="/browse/score">high score</a>' if listing != "score" else 'high score',
'<a href="/browse/recent">recent</a>' if listing != "recent" else 'recent',
'<a href="/browse/volatile">volatile</a>' if listing != "volatile" else 'volatile',
'<a href="/browse/stable">stable</a>' if listing != "stable" else 'stable',
'<a href="/browse/alphabetical">alphabetical</a>' if listing != "alphabetical" else 'alphabetical'
)
# xxx alpha ordering of tags is wrong (because they start '#{g: ')
if listing == "recent": # Most recently added listing
result += seaice.pretty.printTermsAsBriefHTML(g.db,
sorted(terms, key=lambda term: term['modified'], reverse=True),
l.current_user.id)
elif listing == "score": # Highest consensus
terms = sorted(terms, key=lambda term: term['consensus'], reverse=True)
result += seaice.pretty.printTermsAsBriefHTML(g.db,
sorted(terms, key=lambda term: term['up'] - term['down'], reverse=True), l.current_user.id)
elif listing == "volatile": # Least stable (Frequent updates, commenting, and voting)
terms = sorted(terms, key=lambda term: term['t_stable'] or term['t_last'], reverse=True)
result += seaice.pretty.printTermsAsBriefHTML(g.db, terms, l.current_user.id)
elif listing == "stable": # Most stable, highest consensus
terms = sorted(terms, key=lambda term: term['t_stable'] or term['t_last'])
result += seaice.pretty.printTermsAsBriefHTML(g.db, terms, l.current_user.id)
elif listing == "alphabetical": # Alphabetical listing
result += "<table>"
for term in terms:
# skip if term is empty
if not term['term_string']:
print >>sys.stderr, "error: empty term string in alpha listing"
continue
#firstc = term['term_string'][0].upper()
firstc = term['term_string'][0].upper() if term['term_string'] else ' '
if firstc != '#' and firstc != letter:
#letter = term['term_string'][0].upper()
letter = firstc
result += "</td></tr><tr><td width=20% align=center valign=top><h4>{0}</h4></td><td width=80%>".format(letter)
result += "<p><a %s</a>" % seaice.pretty.innerAnchor(
g.db, term['term_string'], term['concept_id'], term['definition'],
tagAsTerm=True)
result += " <i>contributed by %s</i></p>" % g.db.getUserNameById(term['owner_id'])
result += "</table>"
# yyy temporary proof that this code is running
print >>sys.stderr, "note: end alpha listing"
else:
return redirect("/browse/recent")
return render_template("browse.html", user_name = l.current_user.name,
title = "Browse",
headline = "Browse dictionary",
content = Markup(result.decode('utf-8')))
hash2uniquerifier_regex = re.compile('(?<!#)#(\w[\w.-]+)')
# xxx is " the problem (use ' below)?
#token_ref_regex = re.compile("(?<!#\{g: )([#&]+)([\w.-]+)")
@app.route("/search", methods = ['POST', 'GET'])
def returnQuery():
g.db = app.dbPool.getScoped()
if request.method == "POST":
# XXX whoa -- this use of term_string variable name (in all html forms)
# is totally different from term_string as used in the database!
search_words = hash2uniquerifier_regex.sub(
seaice.pretty.ixuniq + '\\1',
request.form['term_string'])
terms = g.db.search(search_words)
#terms = g.db.search(request.form['term_string'])
if len(terms) == 0:
return render_template("search.html", user_name = l.current_user.name,
term_string = request.form['term_string'])
else:
result = seaice.pretty.printTermsAsBriefHTML(g.db, terms, l.current_user.id)
return render_template("search.html", user_name = l.current_user.name,
term_string = request.form['term_string'],
result = Markup(result.decode('utf-8')))
else: # GET
return render_template("search.html", user_name = l.current_user.name)
# yyy to do: display tag definition at top of search results
# when user clicks on community tag (searches for all terms bearing the tag)
@app.route("/tag/<tag>")
def getTag(tag = None):
g.db = app.dbPool.getScoped()
terms = g.db.search(seaice.pretty.ixuniq + tag)
if len(terms) == 0:
return render_template("tag.html", user_name = l.current_user.name,
term_string = tag)
else:
result = seaice.pretty.printTermsAsBriefHTML(g.db, terms, l.current_user.id)
return render_template("tag.html", user_name = l.current_user.name,
term_string = tag, result = Markup(result.decode('utf-8')))
## Propose, edit, or remove a term ##
@app.route("/contribute", methods = ['POST', 'GET'])
@l.login_required
def addTerm():
if request.method == "POST":
g.db = app.dbPool.dequeue()
# xxx add check for non-empty term_string before consuming new 'id'
# xxx add check for temporary, test term_string and then only consume
# a test 'id'
term = {
#'term_string' : request.form['term_string'],
'term_string' : seaice.pretty.refs_norm(g.db, request.form['term_string']),
'definition' : seaice.pretty.refs_norm(g.db, request.form['definition']),
'examples' : seaice.pretty.refs_norm(g.db, request.form['examples']),
'owner_id' : l.current_user.id,
'id' : app.termIdPool.ConsumeId() }
(id, concept_id) = g.db.insertTerm(term, prod_mode)
# Special handling is needed for brand new tags, which always return
# "(undefined/ambiguous)" qualifiers at the moment of definition.
#
if term['term_string'].startswith('#{g:'): # if defining a tag
#term['term_string'] = '#{g: %s | %s}' % ( # correct our initial
term['term_string'] = '%s%s | %s}' % ( # correct our initial
seaice.pretty.tagstart,
seaice.pretty.ixuniq + request.form['term_string'][1:],
concept_id) # guesses and update
g.db.updateTerm(term['id'], term, None, prod_mode)
g.db.commit()
app.dbPool.enqueue(g.db)
return getTerm(concept_id,
message = "Your term has been added to the metadictionary!")
else: # GET
return render_template("contribute.html", user_name = l.current_user.name,
title = "Contribute", headline = "Add a dictionary term")
@app.route("/term=<term_concept_id>/edit", methods = ['POST', 'GET'])
@l.login_required
def editTerm(term_concept_id = None):
try:
g.db = app.dbPool.dequeue()
term = g.db.getTermByConceptId(term_concept_id)
#user = g.db.getUser(l.current_user.id)
# yyy not checking if term was found?
assert l.current_user.id and term['owner_id'] == l.current_user.id
if request.method == "POST":
assert request.form.get('examples') != None
updatedTerm = {
#'term_string' : request.form['term_string'],
'term_string' : seaice.pretty.refs_norm(g.db, request.form['term_string']),
'definition' : seaice.pretty.refs_norm(g.db, request.form['definition']),
'examples' : seaice.pretty.refs_norm(g.db, request.form['examples']),
'owner_id' : l.current_user.id }
g.db.updateTerm(term['id'], updatedTerm, term['persistent_id'], prod_mode)
# Notify tracking users
notify_update = seaice.notify.TermUpdate(
term['id'], l.current_user.id, term['modified'])
for user_id in g.db.getTrackingByTerm(term['id']):
app.SeaIceUsers[user_id].notify(notify_update, g.db)
g.db.commit()
app.dbPool.enqueue(g.db)
return getTerm(term_concept_id,
message = "Your term has been updated in the metadictionary.")
else: # GET
app.dbPool.enqueue(g.db)
if term:
return render_template("contribute.html",
user_name = l.current_user.name,
title = "Edit - %s" % term_concept_id,
headline = "Edit term",
edit_id = term_concept_id,
term_string_edit = term['term_string'].decode('utf-8'),
definition_edit = term['definition'].decode('utf-8'),
examples_edit = term['examples'].decode('utf-8'))
except ValueError:
return render_template("basic_page.html",
user_name = l.current_user.name,
title = "Term not found",
headline = "Term",
content = Markup("Term <strong>#%s</strong> not found!" % term_concept_id))
except AssertionError:
return render_template("basic_page.html",
user_name = l.current_user.name,
title = "Term - %s" % term_concept_id,
content =
"""Error! You may only edit or remove terms and definitions that
you've contributed. However, you may comment or vote on this term.
assert term['owner_id'] (%s) == l.current_user.id (%s)""" % (term['owner_id'], l.current_user.id))
@app.route("/term=<int:term_id>/remove", methods=["POST"])
@l.login_required
def remTerm(term_id):
try:
g.db = app.dbPool.getScoped()
term = g.db.getTerm(int(request.form['id']))
assert term and term['owner_id'] == l.current_user.id
assert term['class'] == 'vernacular'
tracking_users = g.db.getTrackingByTerm(term_id)
id = g.db.removeTerm(int(request.form['id']), term['persistent_id'],
prod_mode)
app.termIdPool.ReleaseId(id)
# Notify tracking users
notify_removed = seaice.notify.TermRemoved(l.current_user.id,
term['term_string'],
g.db.getTime())
for user_id in tracking_users:
app.SeaIceUsers[user_id].notify(notify_removed, g.db)
g.db.commit()
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Remove term",
content = Markup(
"Successfully removed term <b>%s (%s)</b> from the metadictionary." % (term['term_string'], term['concept_id'])))
except AssertionError:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Term - %s" % term_id,
content =
"""Error! You may only remove terms that are in the vernacular class and
that you've contributed. However, you may comment or vote on this term. """)
## Comments ##
@app.route("/term=<int:term_id>/comment", methods=['POST'])
@l.login_required
def addComment(term_id):
try:
assert l.current_user.id
term_id = int(term_id)
g.db = app.dbPool.getScoped()
comment = { 'comment_string' : seaice.pretty.refs_norm(g.db, request.form['comment_string']),
'term_id' : term_id,
'owner_id' : l.current_user.id,
'id' : app.commentIdPool.ConsumeId()}
comment_id = g.db.insertComment(comment)
# Notify owner and tracking users
notify_comment = seaice.notify.Comment(term_id, l.current_user.id, comment['comment_string'],
g.db.getComment(comment_id)['created'])
tracking_users = [ user_id for user_id in g.db.getTrackingByTerm(term_id) ]
tracking_users.append(g.db.getTerm(term_id)['owner_id'])
for user_id in tracking_users:
if user_id != l.current_user.id:
app.SeaIceUsers[user_id].notify(notify_comment, g.db)
g.db.commit()
return redirect("/term=%s" % g.db.getTermConceptId(term_id))
except AssertionError:
return redirect(url_for('login'))
@app.route("/comment=<int:comment_id>/edit", methods = ['POST', 'GET'])
@l.login_required
def editComment(comment_id = None):
try:
g.db = app.dbPool.dequeue()
comment = g.db.getComment(int(comment_id))
assert l.current_user.id and comment['owner_id'] == l.current_user.id
if request.method == "POST":
updatedComment = { 'comment_string' : seaice.pretty.refs_norm(g.db, request.form['comment_string']),
'owner_id' : l.current_user.id }
g.db.updateComment(int(comment_id), updatedComment)
g.db.commit()
app.dbPool.enqueue(g.db)
return getTerm(g.db.getTermConceptId(comment['term_id']), message = "Your comment has been updated.")
else: # GET
app.dbPool.enqueue(g.db)
if comment:
form = """
<form action="/comment={0}/edit" method="post">
<table cellpadding=16 width=60%>
<tr><td><textarea type="text" name="comment_string" rows=3
style="width:100%; height:100%"
placeholder="Add comment">{1}</textarea></td></tr>
<tr><td align=right><input type="submit" value="Comment"><td>
</td>
</table>
</form>""".format(comment_id, comment['comment_string'])
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Edit comment",
headline = "Edit your comment",
content = Markup(form.decode('utf-8')))
except ValueError:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Comment not found",
content = Markup("Comment <strong>#%s</strong> not found!" % comment_id))
except AssertionError:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Term - %s" % term_id,
content =
"""Error! You may only edit or remove terms and definitions that
you've contributed. However, you may comment or vote on this term. """)
@app.route("/comment=<int:comment_id>/remove", methods=['POST'])
def remComment(comment_id):
try:
g.db = app.dbPool.getScoped()
comment = g.db.getComment(int(request.form['id']))
assert comment and comment['owner_id'] == l.current_user.id
g.db.removeComment(int(request.form['id']))
g.db.commit()
return redirect("/term=%s" % g.db.getTermConceptId(comment['term_id']))
except AssertionError:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Oops!",
content =
"""Error! You may only edit or remove your own comments.""")
## Voting! ##
@app.route("/term=<int:term_id>/vote", methods=['POST'])
@l.login_required
def voteOnTerm(term_id):
g.db = app.dbPool.getScoped()
p_vote = g.db.getVote(l.current_user.id, term_id)
if request.form['action'] == 'up':
if p_vote == 1:
g.db.castVote(l.current_user.id, term_id, 0)
else:
g.db.castVote(l.current_user.id, term_id, 1)
elif request.form['action'] == 'down':
if p_vote == -1:
g.db.castVote(l.current_user.id, term_id, 0)
else:
g.db.castVote(l.current_user.id, term_id, -1)
else:
g.db.castVote(l.current_user.id, term_id, 0)
g.db.commit()
print "User #%d voted %s term #%d" % (l.current_user.id, request.form['action'], term_id)
return redirect("/term=%s" % g.db.getTermConceptId(term_id))
@app.route("/term=<int:term_id>/track", methods=['POST'])
@l.login_required
def trackTerm(term_id):
g.db = app.dbPool.getScoped()
if request.form['action'] == "star":
g.db.trackTerm(l.current_user.id, term_id)
else:
g.db.untrackTerm(l.current_user.id, term_id)
g.db.commit()
print "User #%d %sed term #%d" % (l.current_user.id, request.form['action'], term_id)
return redirect("/term=%s" % g.db.getTermConceptId(term_id))
## Start HTTP server. (Not relevant on Heroku.) ##
if __name__ == '__main__':
app.debug = True
app.run('0.0.0.0', 5000, use_reloader = False)
| bsd-3-clause | 4,005,987,920,294,729,700 | 38.362515 | 130 | 0.604893 | false |
slackhappy/graphite-web | webapp/graphite/render/views.py | 1 | 13462 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import csv
from time import time, strftime, localtime
from random import shuffle
from httplib import CannotSendRequest
from urllib import urlencode
from urlparse import urlsplit, urlunsplit
from cgi import parse_qs
from cStringIO import StringIO
try:
import cPickle as pickle
except ImportError:
import pickle
from graphite.util import getProfileByUsername, json
from graphite.remote_storage import HTTPConnectionWithTimeout
from graphite.logger import log
from graphite.render.evaluator import evaluateTarget
from graphite.render.attime import parseATTime
from graphite.render.functions import PieFunctions
from graphite.render.hashing import hashRequest, hashData
from graphite.render.glyph import GraphTypes
from django.http import HttpResponse, HttpResponseServerError, HttpResponseRedirect
from django.template import Context, loader
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
def renderView(request):
start = time()
(graphOptions, requestOptions) = parseOptions(request)
useCache = 'noCache' not in requestOptions
cacheTimeout = requestOptions['cacheTimeout']
requestContext = {
'startTime' : requestOptions['startTime'],
'endTime' : requestOptions['endTime'],
'localOnly' : requestOptions['localOnly'],
'data' : []
}
data = requestContext['data']
# First we check the request cache
if useCache:
requestKey = hashRequest(request)
cachedResponse = cache.get(requestKey)
if cachedResponse:
log.cache('Request-Cache hit [%s]' % requestKey)
log.rendering('Returned cached response in %.6f' % (time() - start))
return cachedResponse
else:
log.cache('Request-Cache miss [%s]' % requestKey)
# Now we prepare the requested data
if requestOptions['graphType'] == 'pie':
for target in requestOptions['targets']:
if target.find(':') >= 0:
try:
name,value = target.split(':',1)
value = float(value)
except:
raise ValueError, "Invalid target '%s'" % target
data.append( (name,value) )
else:
seriesList = evaluateTarget(requestContext, target)
for series in seriesList:
func = PieFunctions[requestOptions['pieMode']]
data.append( (series.name, func(requestContext, series) or 0 ))
elif requestOptions['graphType'] == 'line':
# Let's see if at least our data is cached
if useCache:
targets = requestOptions['targets']
startTime = requestOptions['startTime']
endTime = requestOptions['endTime']
dataKey = hashData(targets, startTime, endTime)
cachedData = cache.get(dataKey)
if cachedData:
log.cache("Data-Cache hit [%s]" % dataKey)
else:
log.cache("Data-Cache miss [%s]" % dataKey)
else:
cachedData = None
if cachedData is not None:
requestContext['data'] = data = cachedData
else: # Have to actually retrieve the data now
for target in requestOptions['targets']:
if not target.strip():
continue
t = time()
seriesList = evaluateTarget(requestContext, target)
log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
data.extend(seriesList)
if useCache:
cache.set(dataKey, data, cacheTimeout)
# If data is all we needed, we're done
format = requestOptions.get('format')
if format == 'csv':
response = HttpResponse(mimetype='text/csv')
writer = csv.writer(response, dialect='excel')
for series in data:
for i, value in enumerate(series):
timestamp = localtime( series.start + (i * series.step) )
writer.writerow( (series.name, strftime("%Y-%m-%d %H:%M:%S", timestamp), value) )
return response
if format == 'json':
series_data = []
for series in data:
timestamps = range(series.start, series.end, series.step)
datapoints = zip(series, timestamps)
series_data.append( dict(target=series.name, datapoints=datapoints) )
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)),
mimetype='text/javascript')
else:
response = HttpResponse(content=json.dumps(series_data), mimetype='application/json')
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
if format == 'raw':
response = HttpResponse(mimetype='text/plain')
for series in data:
response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
response.write( ','.join(map(str,series)) )
response.write('\n')
log.rendering('Total rawData rendering time %.6f' % (time() - start))
return response
if format == 'svg':
graphOptions['outputFormat'] = 'svg'
if format == 'pickle':
response = HttpResponse(mimetype='application/pickle')
seriesInfo = [series.getInfo() for series in data]
pickle.dump(seriesInfo, response, protocol=-1)
log.rendering('Total pickle rendering time %.6f' % (time() - start))
return response
# We've got the data, now to render it
graphOptions['data'] = data
if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
image = delegateRendering(requestOptions['graphType'], graphOptions)
else:
image = doImageRender(requestOptions['graphClass'], graphOptions)
useSVG = graphOptions.get('outputFormat') == 'svg'
if useSVG and 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)),
mimetype='text/javascript')
else:
response = buildResponse(image, useSVG and 'image/svg+xml' or 'image/png')
if useCache:
cache.set(requestKey, response, cacheTimeout)
log.rendering('Total rendering time %.6f seconds' % (time() - start))
return response
def parseOptions(request):
queryParams = request.REQUEST
# Start with some defaults
graphOptions = {'width' : 330, 'height' : 250}
requestOptions = {}
graphType = queryParams.get('graphType','line')
assert graphType in GraphTypes, "Invalid graphType '%s', must be one of %s" % (graphType,GraphTypes.keys())
graphClass = GraphTypes[graphType]
# Fill in the requestOptions
requestOptions['graphType'] = graphType
requestOptions['graphClass'] = graphClass
requestOptions['pieMode'] = queryParams.get('pieMode', 'average')
requestOptions['cacheTimeout'] = int( queryParams.get('cacheTimeout', settings.DEFAULT_CACHE_DURATION) )
requestOptions['targets'] = []
for target in queryParams.getlist('target'):
requestOptions['targets'].append(target)
if 'pickle' in queryParams:
requestOptions['format'] = 'pickle'
if 'rawData' in queryParams:
requestOptions['format'] = 'raw'
if 'format' in queryParams:
requestOptions['format'] = queryParams['format']
if 'jsonp' in queryParams:
requestOptions['jsonp'] = queryParams['jsonp']
if 'noCache' in queryParams:
requestOptions['noCache'] = True
requestOptions['localOnly'] = queryParams.get('local') == '1'
# Fill in the graphOptions
for opt in graphClass.customizable:
if opt in queryParams:
val = queryParams[opt]
if (val.isdigit() or (val.startswith('-') and val[1:].isdigit())) and opt not in ('fgcolor','bgcolor','fontColor'):
val = int(val)
elif '.' in val and (val.replace('.','',1).isdigit() or (val.startswith('-') and val[1:].replace('.','',1).isdigit())):
val = float(val)
elif val.lower() in ('true','false'):
val = val.lower() == 'true'
elif val.lower() == 'default' or val == '':
continue
graphOptions[opt] = val
# Get the time interval for time-oriented graph types
if graphType == 'line' or graphType == 'pie':
if 'until' in queryParams:
untilTime = parseATTime( queryParams['until'] )
else:
untilTime = parseATTime('now')
if 'from' in queryParams:
fromTime = parseATTime( queryParams['from'] )
else:
fromTime = parseATTime('-1d')
startTime = min(fromTime, untilTime)
endTime = max(fromTime, untilTime)
assert startTime != endTime, "Invalid empty time range"
requestOptions['startTime'] = startTime
requestOptions['endTime'] = endTime
return (graphOptions, requestOptions)
connectionPools = {}
def delegateRendering(graphType, graphOptions):
start = time()
postData = graphType + '\n' + pickle.dumps(graphOptions)
servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
shuffle(servers)
for server in servers:
start2 = time()
try:
# Get a connection
try:
pool = connectionPools[server]
except KeyError: #happens the first time
pool = connectionPools[server] = set()
try:
connection = pool.pop()
except KeyError: #No available connections, have to make a new one
connection = HTTPConnectionWithTimeout(server)
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
# Send the request
try:
connection.request('POST','/render/local/', postData)
except CannotSendRequest:
connection = HTTPConnectionWithTimeout(server) #retry once
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
connection.request('POST', '/render/local/', postData)
# Read the response
response = connection.getresponse()
assert response.status == 200, "Bad response code %d from %s" % (response.status,server)
contentType = response.getheader('Content-Type')
imageData = response.read()
assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
assert imageData, "Received empty response from %s" % server
# Wrap things up
log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
pool.add(connection)
return imageData
except:
log.exception("Exception while attempting remote rendering request on %s" % server)
log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
continue
def renderLocalView(request):
try:
start = time()
reqParams = StringIO(request.raw_post_data)
graphType = reqParams.readline().strip()
optionsPickle = reqParams.read()
reqParams.close()
graphClass = GraphTypes[graphType]
options = pickle.loads(optionsPickle)
image = doImageRender(graphClass, options)
log.rendering("Delegated rendering request took %.6f seconds" % (time() - start))
return buildResponse(image)
except:
log.exception("Exception in graphite.render.views.rawrender")
return HttpResponseServerError()
def renderMyGraphView(request,username,graphName):
profile = getProfileByUsername(username)
if not profile:
return errorPage("No such user '%s'" % username)
try:
graph = profile.mygraph_set.get(name=graphName)
except ObjectDoesNotExist:
return errorPage("User %s doesn't have a MyGraph named '%s'" % (username,graphName))
request_params = dict(request.REQUEST.items())
if request_params:
url_parts = urlsplit(graph.url)
query_string = url_parts[3]
if query_string:
url_params = parse_qs(query_string)
# Remove lists so that we can do an update() on the dict
for param, value in url_params.items():
if isinstance(value, list) and param != 'target':
url_params[param] = value[-1]
url_params.update(request_params)
# Handle 'target' being a list - we want duplicate &target params out of it
url_param_pairs = []
for key,val in url_params.items():
if isinstance(val, list):
for v in val:
url_param_pairs.append( (key,v) )
else:
url_param_pairs.append( (key,val) )
query_string = urlencode(url_param_pairs)
url = urlunsplit(url_parts[:3] + (query_string,) + url_parts[4:])
else:
url = graph.url
return HttpResponseRedirect(url)
def doImageRender(graphClass, graphOptions):
pngData = StringIO()
t = time()
img = graphClass(**graphOptions)
img.output(pngData)
log.rendering('Rendered PNG in %.6f seconds' % (time() - t))
imageData = pngData.getvalue()
pngData.close()
return imageData
def buildResponse(imageData, mimetype="image/png"):
response = HttpResponse(imageData, mimetype=mimetype)
response['Cache-Control'] = 'no-cache'
response['Pragma'] = 'no-cache'
return response
def errorPage(message):
template = loader.get_template('500.html')
context = Context(dict(message=message))
return HttpResponseServerError( template.render(context) )
| apache-2.0 | 2,105,081,150,524,075,300 | 34.898667 | 125 | 0.676943 | false |
florisvb/multi_tracker | multi_tracker_analysis/trajectory_viewer_gui_v2.py | 1 | 40719 | from optparse import OptionParser
import sys, os
import imp
import rosbag, rospy
import pickle
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.ptime as ptime
import time
import numpy as np
import read_hdf5_file_to_pandas
import data_slicing
#import find_flies_in_image_directory
import matplotlib.pyplot as plt
import multi_tracker_analysis as mta
import cv2
import copy
import progressbar
import subprocess
import warnings
from distutils.version import LooseVersion, StrictVersion
print('Using numpy: ' + np.version.version)
print('Using pyqtgraph: ' + pg.__version__)
# video would not load before installing most recent version of pyqtgraph from github repo
# this is the version of the commit that fixed the
# issue with current numpy: pyqtgraph-0.9.10-118-ge495bbc (in commit e495bbc...)
# version checking with distutils.version. See: http://stackoverflow.com/questions/11887762/compare-version-strings
if StrictVersion(pg.__version__) < StrictVersion("0.9.10"):
if StrictVersion(np.version.version) > StrictVersion("1.10"):
warnings.warn('Using pyqtgraph may be incompatible with numpy. Video may not load.')
quit()
pg.mkQApp()
from gi.repository import Gtk as gtk
window = gtk.Window()
screen = window.get_screen()
screen_width = screen.get_width()
screen_height = screen.get_height()
# check screen size - if "small" screen use smaller ui
path = os.path.dirname(os.path.abspath(__file__))
if screen_height < 1500:
uiFile = os.path.join(path, 'gui_trajectory_viewer_smallscreens.ui')
SMALL = True
else:
uiFile = os.path.join(path, 'gui_trajectory_viewer.ui')
SMALL = False
WindowTemplate, TemplateBaseClass = pg.Qt.loadUiType(uiFile)
def get_random_color():
color = (np.random.randint(0,255), np.random.randint(0,255), np.random.randint(0,255))
return color
class QTrajectory(TemplateBaseClass):
def __init__(self, data_filename, bgimg, delta_video_filename, load_original=False, clickable_width=6, draw_interesting_time_points=True, draw_config_function=False):
self.load_original = load_original
TemplateBaseClass.__init__(self)
self.setWindowTitle('Trajectory Viewer GUI v2')
# Create the main window
#self.app = QtGui.QApplication([])
self.ui = WindowTemplate()
self.ui.setupUi(self)
#self.show()
# options
self.draw_interesting_time_points = draw_interesting_time_points
self.draw_config_function = draw_config_function
# Buttons
self.ui.save_trajecs.clicked.connect(self.save_trajectories)
self.ui.movie_save.clicked.connect(self.save_image_sequence)
self.ui.movie_speed.sliderMoved.connect(self.set_movie_speed)
self.ui.trajec_undo.clicked.connect(self.trajec_undo)
self.ui.movie_play.clicked.connect(self.movie_play)
self.ui.movie_pause.clicked.connect(self.movie_pause)
self.ui.trajec_delete.clicked.connect(self.toggle_trajec_delete)
self.ui.trajec_cut.clicked.connect(self.toggle_trajec_cut)
self.ui.trajec_join_collect.clicked.connect(self.toggle_trajec_join_collect)
self.ui.trajec_select_all.clicked.connect(self.select_all_trajecs)
self.ui.trajec_join_add_data.clicked.connect(self.toggle_trajec_join_add_data)
self.ui.trajec_join_save.clicked.connect(self.trajec_join_save)
self.ui.trajec_join_clear.clicked.connect(self.toggle_trajec_join_clear)
self.ui.save_annotation.clicked.connect(self.save_annotation)
self.ui.load_annotations.clicked.connect(self.load_annotations)
self.ui.annotated_color_checkbox.stateChanged.connect(self.toggle_annotated_colors)
self.ui.annotated_hide_checkbox.stateChanged.connect(self.toggle_annotated_hide)
if not SMALL:
self.ui.get_original_objid.clicked.connect(self.trajec_get_original_objid)
self.ui.save_colors.clicked.connect(self.save_trajec_colors)
self.ui.min_selection_length.setPlainText(str(0))
self.ui.max_selection_length.setPlainText(str(-1)) # -1 means all
# parameters
self.data_filename = data_filename
self.load_data()
self.backgroundimg_filename = bgimg
self.backgroundimg = None
self.binsx = None
self.binsy = None
trange = np.float( np.max(self.pd.time_epoch.values) - np.min(self.pd.time_epoch.values) )
self.troi = [np.min(self.pd.time_epoch.values), np.min(self.pd.time_epoch.values)+trange*0.1]
self.skip_frames = 1
self.frame_delay = 0.03
self.path = os.path.dirname(data_filename)
self.clickable_width = clickable_width
# load delta video bag
if delta_video_filename != 'none':
self.dvbag = rosbag.Bag(delta_video_filename)
else:
self.dvbag = None
# Initialize
self.trajec_width_dict = {}
try:
fname = os.path.join(self.path, 'trajec_to_color_dict.pickle')
f = open(fname, 'r+')
self.trajec_to_color_dict = pickle.load(f)
f.close()
except:
self.trajec_to_color_dict = {}
for key in self.pd.objid.unique():
color = get_random_color()
self.trajec_to_color_dict.setdefault(key, color)
self.plotted_traces_keys = []
self.plotted_traces = []
self.trajectory_ends_vlines = []
self.data_to_add = []
self.selected_trajectory_ends = []
self.object_id_numbers = []
self.annotations = os.path.join(self.path, 'annotations.pickle')
if os.path.exists(self.annotations):
f = open(self.annotations, 'r+')
data = pickle.load(f)
f.close()
self.annotated_keys = data.keys()
else:
self.annotated_keys = []
self.time_mouse_click = time.time()
self.cut_objects = False
self.delete_objects = False
self.join_objects = False
self.add_data = False
self.crosshair_pen = pg.mkPen('w', width=1)
self.ui.qtplot_timetrace.enableAutoRange('xy', False)
if self.config is not None:
print('**** Sensory stimulus: ', self.config.sensory_stimulus_on)
for r, row in enumerate(self.config.sensory_stimulus_on):
v1 = pg.PlotDataItem([self.config.sensory_stimulus_on[r][0],self.config.sensory_stimulus_on[r][0]], [0,10])
v2 = pg.PlotDataItem([self.config.sensory_stimulus_on[r][-1],self.config.sensory_stimulus_on[r][-1]], [0,10])
try:
f12 = pg.FillBetweenItem(curve1=v1, curve2=v2, brush=pg.mkBrush(self.config.sensory_stimulus_rgba[r]) )
except:
f12 = pg.FillBetweenItem(curve1=v1, curve2=v2, brush=pg.mkBrush((255,0,0,150)) )
self.ui.qtplot_timetrace.addItem(f12)
lr = pg.LinearRegionItem(values=self.troi)
f = 'update_time_region'
lr.sigRegionChanged.connect(self.__getattribute__(f))
self.ui.qtplot_timetrace.addItem(lr)
print('drawing interesting time points')
self.draw_timeseries_vlines_for_interesting_timepoints()
print('done drawing interesting time points')
self.ui.qtplot_timetrace.setRange(xRange=[np.min(self.time_epoch_continuous), np.max(self.time_epoch_continuous)], yRange=[0, np.max(self.nflies)])
self.ui.qtplot_timetrace.setLimits(yMin=0, yMax=np.max(self.nflies))
self.ui.qtplot_timetrace.setLimits(minYRange=np.max(self.nflies), maxYRange=np.max(self.nflies))
self.current_time_vline = pg.InfiniteLine(angle=90, movable=False)
self.ui.qtplot_timetrace.addItem(self.current_time_vline, ignoreBounds=True)
self.current_time_vline.setPos(0)
pen = pg.mkPen((255,255,255), width=2)
self.current_time_vline.setPen(pen)
# hide a bunch of the axes
self.ui.qtplot_timetrace.hideAxis('left')
self.ui.qtplot_timetrace.hideAxis('bottom')
self.ui.qtplot_trajectory.hideAxis('left')
self.ui.qtplot_trajectory.hideAxis('bottom')
### Button Callbacks
def save_trajectories(self):
self.troi = self.linear_region.getRegion()
start_frame = self.dataset.timestamp_to_framestamp(self.troi[0])
end_frame = self.dataset.timestamp_to_framestamp(self.troi[-1])
dirname = 'data_selection_' + str(start_frame) + '_to_' + str(end_frame)
dirname = os.path.join(self.path, dirname)
if os.path.exists(dirname):
print 'Data selection path exists!'
else:
os.mkdir(dirname)
fname = 'dataframe_' + str(start_frame) + '_to_' + str(end_frame) + '.pickle'
fname = os.path.join(dirname, fname)
print 'Saving stand alone pandas dataframe to file: '
print ' ' + fname
pd_subset = mta.data_slicing.get_data_in_epoch_timerange(self.pd, self.troi)
pd_subset.to_pickle(fname)
#self.config.plot_trajectories(self.troi)
def set_all_buttons_false(self):
self.cut_objects = False
self.join_objects = False
self.delete_objects = False
self.add_data = False
self.get_original_objid = False
def set_movie_speed(self, data):
if data >0:
self.skip_frames = data
self.frame_Delay = 0.03
if data == 0:
self.skip_frames = 1
self.frame_delay = 0.03
if data <0:
p = 1- (np.abs(data) / 30.)
max_frame_delay = 0.2
self.frame_delay = (max_frame_delay - (max_frame_delay*p))*2
def get_annotations_from_checked_boxes(self):
notes = []
for i in range(1,5):
checkbox = self.ui.__getattribute__('annotated_checkbox_' + str(i))
if checkbox.checkState():
textbox = self.ui.__getattribute__('annotated_text_' + str(i))
note = textbox.toPlainText()
notes.append(str(note))
return notes
def save_annotation(self):
notes = self.get_annotations_from_checked_boxes()
print(notes)
self.annotations = os.path.join(self.path, 'annotations.pickle')
if os.path.exists(self.annotations):
f = open(self.annotations, 'r+')
data = pickle.load(f)
f.close()
else:
f = open(self.annotations, 'w+')
f.close()
data = {}
for key in self.object_id_numbers:
if key not in data.keys():
data.setdefault(key, {'notes': [], 'related_objids': []})
data[key]['notes'] = notes
data[key]['related_objids'] = self.object_id_numbers
if len(notes) == 0:
del(data[key])
self.annotated_keys = data.keys()
f = open(self.annotations, 'r+')
pickle.dump(data, f)
f.close()
print('Saved annotation')
self.toggle_trajec_join_clear()
def load_annotations(self):
for i in range(1,5):
checkbox = self.ui.__getattribute__('annotated_checkbox_' + str(i))
checkbox.setCheckState(0)
textbox = self.ui.__getattribute__('annotated_text_' + str(i))
textbox.clear()
self.annotations = os.path.join(self.path, 'annotations.pickle')
if os.path.exists(self.annotations):
f = open(self.annotations, 'r+')
data = pickle.load(f)
f.close()
for key in self.object_id_numbers:
annotation = data[key]
if len(self.object_id_numbers) > 1:
raise ValueError('Load Annotations only works with single trajectories selected')
for i, note in enumerate(annotation['notes']):
checkbox = self.ui.__getattribute__('annotated_checkbox_' + str(i+1))
checkbox.setChecked(True)
textbox = self.ui.__getattribute__('annotated_text_' + str(i+1))
textbox.setPlainText(note)
def toggle_annotated_colors(self):
self.draw_trajectories()
def toggle_annotated_hide(self):
self.draw_trajectories()
def save_trajec_colors(self):
fname = os.path.join(self.path, 'trajec_to_color_dict.pickle')
f = open(fname, 'w+')
pickle.dump(self.trajec_to_color_dict, f)
f.close()
def trajec_undo(self):
instruction = self.instructions.pop(-1)
filename = os.path.join(self.path, 'delete_cut_join_instructions.pickle')
if os.path.exists(filename):
f = open(filename, 'r+')
data = pickle.load(f)
f.close()
else:
f = open(filename, 'w+')
f.close()
data = []
data = self.instructions
f = open(filename, 'r+')
pickle.dump(data, f)
f.close()
self.load_data()
self.draw_trajectories()
self.draw_timeseries_vlines_for_interesting_timepoints()
def movie_pause(self):
if self.play is True:
self.play = False
print('pause movie')
elif self.play is False:
self.play = True
print('playing movie')
self.updateTime = ptime.time()
self.updateData()
def movie_play(self):
self.play = True
print('loading image sequence')
self.load_image_sequence()
print('playing movie')
self.updateTime = ptime.time()
self.updateData()
def trajec_get_original_objid(self):
self.set_all_buttons_false()
self.get_original_objid = True
self.crosshair_pen = pg.mkPen((255, 129, 234), width=1)
def toggle_trajec_delete(self):
self.set_all_buttons_false()
self.delete_objects = True
self.crosshair_pen = pg.mkPen('r', width=1)
print('Deleting objects!')
# first delete selected trajectories
print 'Deleting selected objects: ', self.object_id_numbers
while len(self.object_id_numbers) > 0:
key = self.object_id_numbers.pop()
self.delete_object_id_number(key, redraw=False)
self.draw_trajectories()
self.draw_timeseries_vlines_for_interesting_timepoints()
#
def toggle_trajec_cut(self):
self.set_all_buttons_false()
self.cut_objects = True
self.crosshair_pen = pg.mkPen('y', width=1)
print('Cutting objects!')
def toggle_trajec_join_collect(self):
self.set_all_buttons_false()
self.join_objects = True
self.crosshair_pen = pg.mkPen('g', width=1)
self.ui.qttext_selected_objids.clear()
print('Ready to collect object id numbers. Click on traces to add object id numbers to the list. Click "save object id numbers" to save, and reset the list')
def select_all_trajecs(self):
if not self.join_objects:
self.toggle_trajec_join_collect()
if not SMALL:
min_len = self.ui.__getattribute__('min_selection_length')
min_len = int(min_len.toPlainText())
max_len = self.ui.__getattribute__('max_selection_length')
max_len = int(max_len.toPlainText())
if max_len == -1:
max_len = np.inf
else:
min_len = 0
max_len = np.inf
pd_subset = mta.data_slicing.get_data_in_epoch_timerange(self.pd, self.troi)
keys = np.unique(pd_subset.objid.values)
for trace in self.plotted_traces:
key = trace.curve.key
trajec_length = len(self.pd[self.pd.objid==key])
if trajec_length > min_len and trajec_length < max_len:
self.trace_clicked(trace.curve)
def toggle_trajec_join_add_data(self):
self.set_all_buttons_false()
self.data_to_add = []
self.add_data = True
self.crosshair_pen = pg.mkPen((0,0,255), width=1)
print('Adding data!')
def toggle_trajec_join_clear(self):
self.set_all_buttons_false()
self.trajec_width_dict = {}
for key in self.object_id_numbers:
self.trajec_width_dict[key] = 2
self.crosshair_pen = pg.mkPen('w', width=1)
self.object_id_numbers = []
self.add_data = []
self.ui.qttext_selected_objids.clear()
print('Join list cleared')
self.draw_trajectories()
self.toggle_trajec_join_collect()
### Mouse moved / clicked callbacks
def mouse_moved(self, pos):
self.mouse_position = [self.img.mapFromScene(pos).x(), self.img.mapFromScene(pos).y()]
self.crosshair_vLine.setPos(self.mouse_position[0])
self.crosshair_hLine.setPos(self.mouse_position[1])
self.crosshair_vLine.setPen(self.crosshair_pen)
self.crosshair_hLine.setPen(self.crosshair_pen)
def mouse_clicked(self, data):
self.time_since_mouse_click = time.time() - self.time_mouse_click
if self.time_since_mouse_click > 0.5:
if self.add_data:
self.add_data_to_trajecs_to_join()
self.time_mouse_click = time.time()
if self.get_original_objid:
s = 'time_epoch > ' + str(self.current_time_epoch - 1) + ' & time_epoch < ' + str(self.current_time_epoch + 1)
pd_tmp = self.original_pd.query(s)
print s, pd_tmp.shape
x_diff = np.abs(pd_tmp.position_x.values - self.mouse_position[1])
y_diff = np.abs(pd_tmp.position_y.values - self.mouse_position[0])
i = np.argmin(x_diff + y_diff)
objid = pd_tmp.iloc[i].objid
self.ui.qttext_show_original_objid.clear()
self.ui.qttext_show_original_objid.setPlainText(str(int(objid)))
def trace_clicked(self, item, redraw=True):
if self.join_objects:
if item.key not in self.object_id_numbers:
print 'Saving object to object list: ', item.key
self.object_id_numbers.append(item.key)
color = self.trajec_to_color_dict[item.key]
pen = pg.mkPen(color, width=4)
self.trajec_width_dict.setdefault(item.key, 4)
item.setPen(pen)
else:
print 'Removing object from object list: ', item.key
self.object_id_numbers.remove(item.key)
color = self.trajec_to_color_dict[item.key]
pen = pg.mkPen(color, width=2)
self.trajec_width_dict.setdefault(item.key, 2)
item.setPen(pen)
self.ui.qttext_selected_objids.clear()
self.ui.qttext_selected_objids.setPlainText(str(self.object_id_numbers))
self.draw_vlines_for_selected_trajecs()
elif self.cut_objects:
print 'Cutting trajectory: ', item.key, ' at: ', self.mouse_position
self.cut_trajectory(item.key, self.mouse_position)
elif self.delete_objects:
self.delete_object_id_number(item.key, redraw=redraw)
elif self.add_data:
self.add_data_to_trajecs_to_join()
def add_data_to_trajecs_to_join(self):
self.data_to_add.append([self.current_time_epoch, self.mouse_position[0], self.mouse_position[1]])
self.draw_data_to_add()
def get_new_unique_objid(self):
fname = os.path.join(self.path, 'new_unique_objids.pickle')
if os.path.exists(fname):
f = open(fname, 'r+')
data = pickle.load(f)
f.close()
else:
f = open(fname, 'w+')
f.close()
data = [np.max(self.pd.objid)+10]
new_objid = data[-1] + 1
data.append(new_objid)
f = open(fname, 'r+')
pickle.dump(data, f)
f.close()
print 'NEW OBJID CREATED: ', new_objid
return new_objid
def cut_trajectory(self, key, point):
dataset = mta.read_hdf5_file_to_pandas.Dataset(self.pd)
trajec = dataset.trajec(key)
p = np.vstack((trajec.position_y, trajec.position_x))
point = np.array([[point[0]], [point[1]]])
error = np.linalg.norm(p-point, axis=0)
trajectory_frame = np.argmin(error)
dataset_frame = dataset.timestamp_to_framestamp(trajec.time_epoch[trajectory_frame])
instructions = {'action': 'cut',
'order': time.time(),
'objid': key,
'cut_frame_global': dataset_frame,
'cut_frame_trajectory': trajectory_frame,
'cut_time_epoch': trajec.time_epoch[trajectory_frame],
'new_objid': self.get_new_unique_objid()}
self.save_delete_cut_join_instructions(instructions)
# update gui
self.pd = mta.read_hdf5_file_to_pandas.delete_cut_join_trajectories_according_to_instructions(self.pd, instructions, interpolate_joined_trajectories=True)
self.draw_trajectories()
self.draw_timeseries_vlines_for_interesting_timepoints()
def trajec_join_save(self):
instructions = {'action': 'join',
'order': time.time(),
'objids': self.object_id_numbers,
'data_to_add': self.data_to_add,
'new_objid': self.get_new_unique_objid()}
print instructions
self.save_delete_cut_join_instructions(instructions)
self.object_id_numbers = []
self.ui.qttext_selected_objids.clear()
self.data_to_add = []
self.trajec_width_dict = {}
# now join them for the gui
self.pd = mta.read_hdf5_file_to_pandas.delete_cut_join_trajectories_according_to_instructions(self.pd, instructions, interpolate_joined_trajectories=True)
self.draw_trajectories()
self.draw_timeseries_vlines_for_interesting_timepoints()
print 'Reset object id list - you may collect a new selection of objects now'
def delete_object_id_number(self, key, redraw=True):
instructions = {'action': 'delete',
'order': time.time(),
'objid': key}
self.save_delete_cut_join_instructions(instructions)
# update gui
#self.trajec_to_color_dict[key] = (0,0,0,0)
self.pd = mta.read_hdf5_file_to_pandas.delete_cut_join_trajectories_according_to_instructions(self.pd, instructions, interpolate_joined_trajectories=True)
if redraw:
self.draw_trajectories()
self.draw_timeseries_vlines_for_interesting_timepoints()
### Drawing functions
def draw_timeseries_vlines_for_interesting_timepoints(self):
if self.draw_interesting_time_points:
self.calc_time_etc()
# clear
try:
self.ui.qtplot_timetrace.removeItem(self.nflies_plot)
except:
pass
for vline in self.trajectory_ends_vlines:
self.ui.qtplot_timetrace.removeItem(vline)
self.trajectory_ends_vlines = []
# draw
self.nflies_plot = self.ui.qtplot_timetrace.plot(x=self.time_epoch_continuous, y=self.nflies)
objid_ends = self.pd.groupby('objid').time_epoch.max()
for key in objid_ends.keys():
t = objid_ends[key]
vline = pg.InfiniteLine(angle=90, movable=False)
self.ui.qtplot_timetrace.addItem(vline, ignoreBounds=True)
vline.setPos(t)
pen = pg.mkPen(self.trajec_to_color_dict[key], width=1)
vline.setPen(pen)
self.trajectory_ends_vlines.append(vline)
# TODO: times (or frames) where trajectories get very close to one another
def draw_vlines_for_selected_trajecs(self):
for vline in self.selected_trajectory_ends:
self.ui.qtplot_timetrace.removeItem(vline)
self.selected_trajectory_ends = []
for key in self.object_id_numbers:
trajec = self.dataset.trajec(key)
vline = pg.InfiniteLine(angle=90, movable=False)
self.ui.qtplot_timetrace.addItem(vline, ignoreBounds=True)
vline.setPos(trajec.time_epoch[-1])
pen = pg.mkPen(self.trajec_to_color_dict[key], width=5)
vline.setPen(pen)
self.selected_trajectory_ends.append(vline)
def update_time_region(self, linear_region):
self.linear_region = linear_region
self.troi = linear_region.getRegion()
self.draw_trajectories()
def init_bg_image(self):
if self.binsx is None:
self.binsx, self.binsy = mta.plot.get_bins_from_backgroundimage(self.backgroundimg_filename)
self.backgroundimg = cv2.imread(self.backgroundimg_filename, cv2.CV_8UC1)
img = copy.copy(self.backgroundimg)
self.img = pg.ImageItem(img, autoLevels=False)
def draw_trajectories(self):
for plotted_trace in self.plotted_traces:
self.ui.qtplot_trajectory.removeItem(plotted_trace)
self.ui.qtplot_trajectory.clear()
pd_subset = mta.data_slicing.get_data_in_epoch_timerange(self.pd, self.troi)
self.dataset = read_hdf5_file_to_pandas.Dataset(self.pd)
self.init_bg_image()
# plot a heatmap of the trajectories, for error checking
h = mta.plot.get_heatmap(pd_subset, self.binsy, self.binsx, position_x='position_y', position_y='position_x', position_z='position_z', position_z_slice=None)
indices = np.where(h != 0)
img = copy.copy(self.backgroundimg)
img[indices] = 0
self.img = pg.ImageItem(img, autoLevels=False)
self.ui.qtplot_trajectory.addItem(self.img)
self.img.setZValue(-200) # make sure image is behind other data
# cross hair mouse stuff
self.ui.qtplot_trajectory.scene().sigMouseMoved.connect(self.mouse_moved)
self.ui.qtplot_trajectory.scene().sigMouseClicked.connect(self.mouse_clicked)
self.crosshair_vLine = pg.InfiniteLine(angle=90, movable=False)
self.crosshair_hLine = pg.InfiniteLine(angle=0, movable=False)
self.ui.qtplot_trajectory.addItem(self.crosshair_vLine, ignoreBounds=True)
self.ui.qtplot_trajectory.addItem(self.crosshair_hLine, ignoreBounds=True)
keys = np.unique(pd_subset.objid.values)
self.plotted_traces_keys = []
self.plotted_traces = []
if len(keys) < 100:
for key in keys:
trajec = self.dataset.trajec(key)
first_time = np.max([self.troi[0], trajec.time_epoch[0]])
first_time_index = np.argmin( np.abs(trajec.time_epoch-first_time) )
last_time = np.min([self.troi[-1], trajec.time_epoch[-1]])
last_time_index = np.argmin( np.abs(trajec.time_epoch-last_time) )
#if trajec.length > 5:
if key not in self.trajec_to_color_dict.keys():
color = get_random_color()
self.trajec_to_color_dict.setdefault(key, color)
else:
color = self.trajec_to_color_dict[key]
if key in self.trajec_width_dict.keys():
width = self.trajec_width_dict[key]
else:
width = 2
if self.ui.annotated_color_checkbox.checkState():
if key in self.annotated_keys:
color = (0,0,0)
width = 6
if self.ui.annotated_hide_checkbox.checkState():
if key in self.annotated_keys:
color = (0,0,0,0)
width = 1
pen = pg.mkPen(color, width=width)
plotted_trace = self.ui.qtplot_trajectory.plot(trajec.position_y[first_time_index:last_time_index], trajec.position_x[first_time_index:last_time_index], pen=pen)
self.plotted_traces.append(plotted_trace)
self.plotted_traces_keys.append(key)
for i, key in enumerate(self.plotted_traces_keys):
self.plotted_traces[i].curve.setClickable(True, width=self.clickable_width)
self.plotted_traces[i].curve.key = key
self.plotted_traces[i].curve.sigClicked.connect(self.trace_clicked)
self.draw_data_to_add()
self.draw_vlines_for_selected_trajecs()
#self.save_trajec_color_width_dicts()
def draw_data_to_add(self):
for data in self.data_to_add:
print data
self.ui.qtplot_trajectory.plot([data[1]], [data[2]], pen=(0,0,0), symbol='o', symbolSize=10)
### Load / read / save data functions
def load_data(self):
if self.load_original:
self.original_pd = mta.read_hdf5_file_to_pandas.load_data_as_pandas_dataframe_from_hdf5_file(self.data_filename)
print 'loading data'
self.pd, self.config = mta.read_hdf5_file_to_pandas.load_and_preprocess_data(self.data_filename)
self.path = self.config.path
self.dataset = read_hdf5_file_to_pandas.Dataset(self.pd)
filename = os.path.join(self.path, 'delete_cut_join_instructions.pickle')
if os.path.exists(filename):
f = open(filename, 'r+')
data = pickle.load(f)
f.close()
else:
data = []
self.instructions = data
self.calc_time_etc()
print 'data loaded'
print 'N Trajecs: ', len(self.pd.groupby('objid'))
def calc_time_etc(self):
self.time_epoch = self.pd.time_epoch.groupby(self.pd.index).mean().values
self.speed = self.pd.speed.groupby(self.pd.index).mean().values
self.nflies = data_slicing.get_nkeys_per_frame(self.pd)
self.time_epoch_continuous = np.linspace(np.min(self.time_epoch), np.max(self.time_epoch), len(self.nflies))
def save_delete_cut_join_instructions(self, instructions):
self.delete_cut_join_filename = os.path.join(self.path, 'delete_cut_join_instructions.pickle')
if os.path.exists(self.delete_cut_join_filename):
f = open(self.delete_cut_join_filename, 'r+')
data = pickle.load(f)
f.close()
else:
f = open(self.delete_cut_join_filename, 'w+')
f.close()
data = []
data.append(instructions)
f = open(self.delete_cut_join_filename, 'r+')
pickle.dump(data, f)
f.close()
self.instructions.append(instructions)
def load_image_sequence(self):
version = subprocess.check_output(["rosversion", "-d"])
timerange = self.troi
print 'loading image sequence from delta video bag - may take a moment'
pbar = progressbar.ProgressBar().start()
rt0 = rospy.Time(timerange[0])
rt1 = rospy.Time(timerange[1])
self.msgs = self.dvbag.read_messages(start_time=rt0, end_time=rt1)
self.image_sequence = []
self.image_sequence_timestamps = []
t0 = None
self.delta_video_background_img_filename = None
self.delta_video_background_img = None
for m, msg in enumerate(self.msgs):
bag_time_stamp = float(msg[1].header.stamp.secs) + float(msg[1].header.stamp.nsecs)*1e-9
delta_video_background_img_filename = os.path.join( self.path, os.path.basename(msg[1].background_image) )
if os.path.exists(delta_video_background_img_filename):
if delta_video_background_img_filename != self.delta_video_background_img_filename:
self.delta_video_background_img_filename = delta_video_background_img_filename
self.delta_video_background_img = cv2.imread(self.delta_video_background_img_filename, cv2.CV_8UC1)
else: # if we can't find the bgimg, do the best we can
if self.delta_video_background_img is None:
self.delta_video_background_img_filename = mta.read_hdf5_file_to_pandas.get_filename(self.path, 'deltavideo_bgimg')
self.delta_video_background_img = cv2.imread(self.delta_video_background_img_filename, cv2.CV_8UC1)
imgcopy = copy.copy(self.delta_video_background_img)
if len(msg[1].values) > 0:
if 'kinetic' in version:
msg[1].xpixels = tuple(x - 1 for x in msg[1].xpixels)
msg[1].ypixels = tuple(y - 1 for y in msg[1].ypixels)
else:
pass #print('Not ros kinetic.')
imgcopy[msg[1].xpixels, msg[1].ypixels] = msg[1].values # if there's an error, check if you're using ROS hydro?
if self.draw_config_function:
imgcopy = cv2.cvtColor(imgcopy, cv2.COLOR_GRAY2RGB)
self.config.draw(imgcopy, bag_time_stamp)
self.image_sequence.append(imgcopy)
#s = int((m / float(len(self.msgs)))*100)
tfloat = msg[1].header.stamp.secs + msg[1].header.stamp.nsecs*1e-9
self.image_sequence_timestamps.append(tfloat)
if t0 is not None:
t_elapsed = tfloat - t0
t_total = timerange[1] - timerange[0]
s = int(100*(t_elapsed / t_total))
pbar.update(s)
else:
t0 = tfloat
pbar.finish()
self.current_frame = -1
def save_image_sequence(self):
start_frame = self.dataset.timestamp_to_framestamp(self.troi[0])
end_frame = self.dataset.timestamp_to_framestamp(self.troi[-1])
dirname = 'data_selection_' + str(start_frame) + '_to_' + str(end_frame)
dirname = os.path.join(self.path, dirname)
if os.path.exists(dirname):
print 'Data selection path exists!'
else:
os.mkdir(dirname)
image_sequence_dirname = 'image_sequence_' + str(start_frame) + '_to_' + str(end_frame)
dirname = os.path.join(dirname, image_sequence_dirname)
print 'Image sequence directory: ', dirname
if os.path.exists(dirname):
print 'Image selection path exists!'
else:
os.mkdir(dirname)
print 'saving image sequence: ', len(self.image_sequence)
zs = int(np.ceil( np.log10(len(self.image_sequence)) )+1)
print 'zs: ', zs
for i, image in enumerate(self.image_sequence):
img_name = str(i).zfill(zs) + '.png'
img_name = os.path.join(dirname, img_name)
cv2.imwrite(img_name, image)
print i, img_name
print 'To turn the PNGs into a movie, you can run this command from inside the directory with the tmp files: '
print 'mencoder \'mf://*.png\' -mf type=png:fps=30 -ovc lavc -lavcopts vcodec=mpeg4 -oac copy -o animation.avi'
print ' or '
print "mencoder 'mf://*.jpg' -mf type=jpg:fps=30 -ovc x264 -x264encopts preset=slow:tune=film:crf=22 -oac copy -o animation.mp4"
print "might need: https://www.faqforge.com/linux/how-to-install-ffmpeg-on-ubuntu-14-04/"
print ''
def get_next_reconstructed_image(self):
self.current_frame += self.skip_frames
if self.current_frame >= len(self.image_sequence)-1:
self.current_frame = -1
img = self.image_sequence[self.current_frame]
return self.image_sequence_timestamps[self.current_frame], img
def updateData(self):
if self.play:
## Display the data
time_epoch, cvimg = self.get_next_reconstructed_image()
try:
self.img.setImage(cvimg)
except AttributeError:
self.init_bg_image()
self.img.setImage(cvimg)
QtCore.QTimer.singleShot(1, self.updateData)
now = ptime.time()
dt = (now-self.updateTime)
self.updateTime = now
if dt < self.frame_delay:
d = self.frame_delay - dt
time.sleep(d)
self.current_time_vline.setPos(time_epoch)
self.current_time_epoch = time_epoch
del(cvimg)
def run(self):
## Display the widget as a new window
#self.w.show()
## Start the Qt event loop
print 'Running!'
self.show()
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
## Read data #############################################################
parser = OptionParser()
parser.add_option('--path', type=str, default='none', help="option: path that points to standard named filename, background image, dvbag, config. If using 'path', no need to provide filename, bgimg, dvbag, and config. Note")
parser.add_option('--movie', type=int, default=1, help="load and play the dvbag movie, default is 1, to load use 1")
parser.add_option('--load-original', type=int, default=0, dest="load_original", help="load original (unprocessed) dataset for debugging, use 1 to load, default 0")
parser.add_option('--draw-interesting-time-points', type=int, default=1, dest="draw_interesting_time_points", help="draw interesting time points (e.g. vertical lines). Default = True, set to False if VERY large dataset.")
parser.add_option('--draw-config-function', type=int, default=0, dest="draw_config_function", help="If config has a draw function, apply this function to movie frames")
parser.add_option('--clickable-width', type=int, default=6, dest="clickable_width", help="pixel distance from trace to accept click (larger number means easier to click traces)")
parser.add_option('--filename', type=str, help="name and path of the hdf5 tracked_objects filename")
parser.add_option('--bgimg', type=str, help="name and path of the background image")
parser.add_option('--dvbag', type=str, default='none', help="name and path of the delta video bag file, optional")
parser.add_option('--config', type=str, default='none', help="name and path of a configuration file, optional. If the configuration file has an attribute 'sensory_stimulus_on', which should be a list of epoch timestamps e.g. [[t1,t2],[t3,4]], then these timeframes will be highlighted in the gui.")
(options, args) = parser.parse_args()
if options.path != 'none':
if not os.path.isdir(options.path):
raise ValueError('Path needs to be a directory!')
options.filename = mta.read_hdf5_file_to_pandas.get_filename(options.path, 'trackedobjects.hdf5')
options.config = mta.read_hdf5_file_to_pandas.get_filename(options.path, 'config')
options.dvbag = mta.read_hdf5_file_to_pandas.get_filename(options.path, 'delta_video.bag')
options.bgimg = mta.read_hdf5_file_to_pandas.get_filename(options.path, '_bgimg_')
if options.movie != 1:
options.dvbag = 'none'
Qtrajec = QTrajectory(options.filename, options.bgimg, options.dvbag, options.load_original, options.clickable_width,
options.draw_interesting_time_points,
options.draw_config_function)
Qtrajec.run()
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
'''
class MainWindow(TemplateBaseClass):
def __init__(self):
TemplateBaseClass.__init__(self)
self.setWindowTitle('pyqtgraph example: Qt Designer')
# Create the main window
self.ui = WindowTemplate()
self.ui.setupUi(self)
self.ui.movie_play.clicked.connect(self.plot)
self.show()
def plot(self):
self.ui.qtplot_trajectory.plot(np.random.normal(size=100), clear=True)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
win = MainWindow()
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
'''
| mit | -1,102,405,379,312,165,500 | 41.952532 | 302 | 0.595373 | false |
TetraAsh/baruwa2 | baruwa/config/middleware.py | 1 | 3543 | # -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2012 Andrew Colin Kissa <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Pylons middleware initialization"""
from beaker.middleware import SessionMiddleware
from paste.cascade import Cascade
from paste.registry import RegistryManager
from paste.urlparser import StaticURLParser
from paste.deploy.converters import asbool
from pylons.middleware import ErrorHandler, StatusCodeRedirect
from pylons.wsgiapp import PylonsApp
from routes.middleware import RoutesMiddleware
# from repoze.what.plugins.config import make_middleware_with_config
from baruwa.config.environment import load_environment
from baruwa.lib.auth.middleware import make_middleware_with_config
def make_app(global_conf, full_stack=True, static_files=True, **app_conf):
"""Create a Pylons WSGI application and return it
``global_conf``
The inherited configuration for this application. Normally from
the [DEFAULT] section of the Paste ini file.
``full_stack``
Whether this application provides a full WSGI stack (by default,
meaning it handles its own exceptions and errors). Disable
full_stack when this application is "managed" by another WSGI
middleware.
``static_files``
Whether this application serves its own static files; disable
when another web server is responsible for serving them.
``app_conf``
The application's local configuration. Normally specified in
the [app:<name>] section of the Paste ini file (where <name>
defaults to main).
"""
# Configure the Pylons environment
config = load_environment(global_conf, app_conf)
# The Pylons WSGI app
app = PylonsApp(config=config)
# Routing/Session Middleware
app = RoutesMiddleware(app, config['routes.map'], singleton=False)
app = SessionMiddleware(app, config)
# CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)
if 'what_log_file' in app_conf:
what_log_file = app_conf['what_log_file']
else:
what_log_file = None
app = make_middleware_with_config(app, global_conf, what_log_file)
if asbool(full_stack):
# Handle Python exceptions
app = ErrorHandler(app, global_conf, **config['pylons.errorware'])
# Display error documents for 401, 403, 404 status codes (and
# 500 when debug is disabled)
if asbool(config['debug']):
app = StatusCodeRedirect(app)
else:
app = StatusCodeRedirect(app, [400, 401, 403, 404, 500])
# Establish the Registry for this application
app = RegistryManager(app)
if asbool(static_files):
# Serve static files
static_app = StaticURLParser(config['pylons.paths']['static_files'])
app = Cascade([static_app, app])
app.config = config
return app
| gpl-3.0 | -2,069,630,663,363,078,400 | 37.096774 | 76 | 0.709004 | false |
ChristianAA/python_koans_solutions | python3/koans/about_with_statements.py | 1 | 3562 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutSandwichCode in the Ruby Koans
#
from runner.koan import *
import re # For regular expression string comparisons
class AboutWithStatements(Koan):
def count_lines(self, file_name):
try:
file = open(file_name)
try:
return len(file.readlines())
finally:
file.close()
except IOError:
# should never happen
self.fail()
def test_counting_lines(self):
self.assertEqual(4, self.count_lines("example_file.txt"))
# ------------------------------------------------------------------
def find_line(self, file_name):
try:
file = open(file_name)
try:
for line in file.readlines():
match = re.search('e', line)
#print(line)
if match:
return line
finally:
file.close()
except IOError:
# should never happen
self.fail()
def test_finding_lines(self):
self.assertEqual("test\n", self.find_line("example_file.txt"))
## ------------------------------------------------------------------
## THINK ABOUT IT:
##
## The count_lines and find_line are similar, and yet different.
## They both follow the pattern of "sandwich code".
##
## Sandwich code is code that comes in three parts: (1) the top slice
## of bread, (2) the meat, and (3) the bottom slice of bread.
## The bread part of the sandwich almost always goes together, but
## the meat part changes all the time.
##
## Because the changing part of the sandwich code is in the middle,
## abstracting the top and bottom bread slices to a library can be
## difficult in many languages.
##
## (Aside for C++ programmers: The idiom of capturing allocated
## pointers in a smart pointer constructor is an attempt to deal with
## the problem of sandwich code for resource allocation.)
##
## Python solves the problem using Context Managers. Consider the
## following code:
##
class FileContextManager():
def __init__(self, file_name):
self._file_name = file_name
self._file = None
def __enter__(self):
self._file = open(self._file_name)
return self._file
def __exit__(self, cls, value, tb):
self._file.close()
# Now we write:
def count_lines2(self, file_name):
with self.FileContextManager(file_name) as file:
return len(file.readlines())
def test_counting_lines2(self):
self.assertEqual(4, self.count_lines2("example_file.txt"))
# ------------------------------------------------------------------
def find_line2(self, file_name):
with self.FileContextManager(file_name) as file:
for line in file.readlines():
if re.search('e', line): return line
def test_finding_lines2(self):
self.assertEqual("test\n", self.find_line2("example_file.txt"))
self.assertNotEqual('a\n', self.find_line2("example_file.txt"))
# ------------------------------------------------------------------
def count_lines3(self, file_name):
with open(file_name) as file:
return len(file.readlines())
def test_open_already_has_its_own_built_in_context_manager(self):
self.assertEqual(4, self.count_lines3("example_file.txt"))
| mit | 3,022,711,990,002,879,000 | 31.678899 | 73 | 0.537619 | false |
richardmillson/Calkin_Wilf_tree | tree_test.py | 1 | 2245 | from tree_plot import *
import timeit
first_7 = (Fraction(1, 1), Fraction(1, 2), Fraction(2, 1), Fraction(1, 3),
Fraction(3, 2), Fraction(2, 3), Fraction(3, 1))
def test_succ():
for n in range(len(first_7) - 1):
assert succ(first_7[n]) == first_7[n + 1]
def test_entire_tree():
start = 0
stop = 7
generated_first_7 = get_slice(start, stop)
assert generated_first_7 == first_7
def test_get_nth():
assert get_nth(3) == first_7[3]
assert get_nth(6) == first_7[6]
def time_get_nth(n):
"""
time how long it takes the accumulator to get the nth element
:param n: positive int
:return: float
"""
start = timeit.default_timer()
x = Fraction(1, 1)
i = 0
while i < n:
x = succ(x)
i += 1
print x
stop = timeit.default_timer() - start
return stop
def time_get_slice(n):
"""
time how long it takes slice to get the nth element
:param n: positive int
:return: float
"""
start = timeit.default_timer()
print tuple(itertools.islice(entire_tree(), n, n + 1))[0]
stop = timeit.default_timer() - start
return stop
def compare_times():
n = 100000
print "get_nth", time_get_nth(n)#, get_nth(n)
print "get_slice", time_get_slice(n)#, get_slice(n, n + 1)[0]
def test_is_power_of_two():
assert is_power_of_two(0) == False
assert is_power_of_two(1) == True
assert is_power_of_two(2) == True
assert is_power_of_two(3) == False
assert is_power_of_two(4) == True
def test_new_level():
assert new_level(0) == False
assert new_level(1) == True
assert new_level(2) == False
assert new_level(3) == True
assert new_level(4) == False
assert new_level(5) == False
assert new_level(6) == False
assert new_level(7) == True
def test_display_slice():
display = display_slice(0, 2**6 - 1)
print display
def test_get_position():
for i in range(0, 16):
assert i == get_position(get_nth(i))
def test_plot_distribution():
n = 1048
plot_distribution(n)
test_succ()
test_entire_tree()
test_get_nth()
# compare_times()
test_is_power_of_two()
# test_display_slice()
test_new_level()
test_get_position()
test_plot_distribution()
| mit | -3,908,501,368,428,007,000 | 20.796117 | 74 | 0.600891 | false |
voc/voctomix | voctogui/lib/toolbar/blinder.py | 1 | 4053 | #!/usr/bin/env python3
import logging
import os
import time
from gi.repository import Gtk, GLib
import lib.connection as Connection
from lib.config import Config
class BlinderToolbarController(object):
"""Manages Accelerators and Clicks on the Composition Toolbar-Buttons"""
# set resolution of the blink timer in seconds
timer_resolution = 1.0
def __init__(self, win, uibuilder):
self.log = logging.getLogger('BlinderToolbarController')
self.toolbar = uibuilder.find_widget_recursive(win, 'toolbar_blinder')
live_button = uibuilder.find_widget_recursive(self.toolbar, 'stream_live')
blind_button = uibuilder.find_widget_recursive(
self.toolbar, 'stream_blind')
blinder_box = uibuilder.find_widget_recursive(
win, 'box_blinds')
blind_button_pos = self.toolbar.get_item_index(blind_button)
if not Config.getBlinderEnabled():
self.log.info('disabling blinding features '
'because the server does not support them')
self.toolbar.remove(live_button)
self.toolbar.remove(blind_button)
# hide blinder box
blinder_box.hide()
blinder_box.set_no_show_all(True)
return
blinder_sources = Config.getBlinderSources()
self.current_status = None
live_button.connect('toggled', self.on_btn_toggled)
live_button.set_can_focus(False)
self.live_button = live_button
self.blind_buttons = {}
accel_f_key = 11
for idx, name in enumerate(blinder_sources):
if idx == 0:
new_btn = blind_button
else:
new_btn = Gtk.RadioToolButton(group=live_button)
self.toolbar.insert(new_btn, blind_button_pos)
new_btn.set_name(name)
new_btn.get_style_context().add_class("output")
new_btn.get_style_context().add_class("mode")
new_btn.set_can_focus(False)
new_btn.set_label(name.upper())
new_btn.connect('toggled', self.on_btn_toggled)
new_btn.set_tooltip_text("Stop streaming by %s" % name)
self.blind_buttons[name] = new_btn
accel_f_key = accel_f_key - 1
# connect event-handler and request initial state
Connection.on('stream_status', self.on_stream_status)
Connection.send('get_stream_status')
self.timeout = None
def start_blink(self):
self.blink = True
self.do_timeout()
self.blink = True
# remove old time out
if self.timeout:
GLib.source_remove(self.timeout)
# set up timeout for periodic redraw
self.timeout = GLib.timeout_add_seconds(self.timer_resolution, self.do_timeout)
def on_btn_toggled(self, btn):
if btn.get_active():
btn_name = btn.get_name()
if self.current_status != btn_name:
self.log.info('stream-status activated: %s', btn_name)
if btn_name == 'live':
Connection.send('set_stream_live')
else:
Connection.send('set_stream_blind', btn_name)
def on_stream_status(self, status, source=None):
self.log.info('on_stream_status callback w/ status %s and source %s',
status, source)
self.current_status = source if source is not None else status
for button in list(self.blind_buttons.values()) + [self.live_button]:
if button.get_name() == self.current_status:
button.set_active(True)
self.start_blink()
def do_timeout(self):
# if time did not change since last redraw
for button in list(self.blind_buttons.values()) + [self.live_button]:
if self.blink:
button.get_style_context().add_class("blink")
else:
button.get_style_context().remove_class("blink")
self.blink = not self.blink
return True
| mit | -8,651,226,816,891,558,000 | 34.243478 | 87 | 0.595361 | false |
Lochlan/LochlanAndCatherine.com | lochlanandcatherinecom/settings.py | 1 | 5538 | """
Django settings for lochlanandcatherinecom project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import socket
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# openshift is our PAAS for now.
ON_PAAS = 'OPENSHIFT_REPO_DIR' in os.environ
if ON_PAAS:
SECRET_KEY = os.environ['OPENSHIFT_SECRET_TOKEN']
else:
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*5p6z+ksj!$k0elf$)@psnr-i%1&5)z@$ada3$02=$855^0ziz'
# SECURITY WARNING: don't run with debug turned on in production!
# adjust to turn off when on Openshift, but allow an environment variable to override on PAAS
DEBUG = not ON_PAAS
DEBUG = DEBUG or 'DEBUG' in os.environ
if ON_PAAS and DEBUG:
print("*** Warning - Debug mode is on ***")
if ON_PAAS:
ALLOWED_HOSTS = [
'.lochlanandcatherine.com',
os.environ['OPENSHIFT_APP_DNS'],
socket.gethostname(),
]
else:
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rsvps',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'lochlanandcatherinecom.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lochlanandcatherinecom.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
if ON_PAAS:
# determine if we are on MySQL or POSTGRESQL
if "OPENSHIFT_POSTGRESQL_DB_USERNAME" in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['OPENSHIFT_APP_NAME'],
'USER': os.environ['OPENSHIFT_POSTGRESQL_DB_USERNAME'],
'PASSWORD': os.environ['OPENSHIFT_POSTGRESQL_DB_PASSWORD'],
'HOST': os.environ['OPENSHIFT_POSTGRESQL_DB_HOST'],
'PORT': os.environ['OPENSHIFT_POSTGRESQL_DB_PORT'],
}
}
elif "OPENSHIFT_MYSQL_DB_USERNAME" in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['OPENSHIFT_APP_NAME'],
'USER': os.environ['OPENSHIFT_MYSQL_DB_USERNAME'],
'PASSWORD': os.environ['OPENSHIFT_MYSQL_DB_PASSWORD'],
'HOST': os.environ['OPENSHIFT_MYSQL_DB_HOST'],
'PORT': os.environ['OPENSHIFT_MYSQL_DB_PORT'],
}
}
else:
# stock django, local development.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'wsgi', 'static')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert your TEMPLATE_DIRS here
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'lochlanandcatherinecom.context_processors.environ',
],
},
},
]
| mit | -980,325,389,961,318,300 | 30.11236 | 93 | 0.628025 | false |
koverholt/bayes-fire | Example_Cases/Evac_Stairs/Scripts/evac_flow_occupants_models.py | 1 | 1805 | #!/usr/bin/env python
"""Module for setting up statistical models"""
from __future__ import division
import numpy as np
import pymc as mc
import evac_flow_occupants_graphics as graphics
import data_evac
def linear():
"""
PyMC configuration with a linear model.
This is an example of the Bayes MCMC model with
the linear model as the model.
"""
# Priors
theta = mc.Uninformative('theta', value=[1., 1.])
sigma = mc.Uniform('sigma', lower=0., upper=100., value=1.)
# Model
@mc.deterministic
def y_mean(theta=theta, occupants=data_evac.occupants):
return theta[0]*occupants + theta[1]
# Likelihood
# The likelihood is N(y_mean, sigma^2), where sigma
# is pulled from a uniform distribution.
y_obs = mc.Normal('y_obs',
value=data_evac.flow,
mu=y_mean,
tau=sigma**-2,
observed=True)
return vars()
def power_law():
"""
PyMC configuration with a point source radiation model.
This is an example of the Bayes MCMC model with
the point source radiation equation as the model.
"""
# Priors
theta = mc.Uniform('theta',
lower=[-10, -5], upper=[10, 5], value=[0, 0])
sigma = mc.Uniform('sigma', lower=0., upper=100., value=1.)
# Model
@mc.deterministic
def y_mean(theta=theta, occupants=data_evac.occupants):
return theta[0] * (occupants**theta[1])
# Likelihood
# The likelihood is N(y_mean, sigma^2), where sigma
# is pulled from a uniform distribution.
y_obs = mc.Normal('y_obs',
value=data_evac.flow,
mu=y_mean,
tau=sigma**-2,
observed=True)
return vars()
| bsd-3-clause | -6,766,210,754,383,178,000 | 25.544118 | 68 | 0.578947 | false |
fidothe/rgf | spec/rgf/dsl_spec.py | 1 | 1149 | from rgf.dsl import *
from rgf.core.examples import ExampleGroup, ExampleSuite
def first_test_function(world):
world.has_been_run = True
def before_func(world):
world.before_was_run = True
with subject('DSL'):
@it('provides subject helper context to create and set current ExampleGroup')
def spec(world):
eg = subject('This Example Group')
assert type(eg) is ExampleGroup
@it('provides it() decorator creator. The decorator creates Examples on the current ExampleGroup')
def spec(world):
example_suite = ExampleSuite()
ExampleSuite.set_suite(example_suite)
with subject('Example Group with examples added by it()') as eg:
decorator = it('Example description created by it()')
example = decorator(first_test_function)
assert eg.examples == [example]
@it("provides before() decorator creator. The decorator adds a function to the current ExampleGroup's before runner")
def spec(world):
with subject('Example Group with before function') as eg:
before(before_func)
assert eg.before_function is before_func
| mit | -3,633,537,729,751,433,700 | 37.3 | 121 | 0.678851 | false |
joberreiter/pyload | module/plugins/internal/MultiHoster.py | 1 | 4606 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.Plugin import Fail, encode
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, replace_patterns, set_cookie, set_cookies
class MultiHoster(SimpleHoster):
__name__ = "MultiHoster"
__type__ = "hoster"
__version__ = "0.53"
__status__ = "testing"
__pattern__ = r'^unmatchable$'
__config__ = [("use_premium" , "bool", "Use premium account if available" , True),
("fallback_premium", "bool", "Fallback to free download if premium fails", True),
("chk_filesize" , "bool", "Check file size" , True),
("revertfailed" , "bool", "Revert to standard download if fails" , True)]
__description__ = """Multi hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]")]
PLUGIN_NAME = None
LEECH_HOSTER = False
LOGIN_ACCOUNT = True
def init(self):
self.PLUGIN_NAME = self.pyload.pluginManager.hosterPlugins[self.__name__]['name']
def _log(self, level, plugintype, pluginname, messages):
return super(MultiHoster, self)._log(level,
plugintype,
pluginname,
(self.PLUGIN_NAME,) + messages)
def setup(self):
self.chunk_limit = 1
self.multiDL = bool(self.account)
self.resume_download = self.premium
def prepare(self):
#@TODO: Recheck in 0.4.10
plugin = self.pyload.pluginManager.hosterPlugins[self.__name__]
name = plugin['name']
module = plugin['module']
klass = getattr(module, name)
self.get_info = klass.get_info
if self.DIRECT_LINK is None:
direct_dl = self.__pattern__ != r'^unmatchable$' and re.match(self.__pattern__, self.pyfile.url)
else:
direct_dl = self.DIRECT_LINK
super(MultiHoster, self).prepare()
self.direct_dl = direct_dl
def process(self, pyfile):
try:
self.prepare()
self.check_info() #@TODO: Remove in 0.4.10
if self.direct_dl:
self.log_info(_("Looking for direct download link..."))
self.handle_direct(pyfile)
if self.link or was_downloaded():
self.log_info(_("Direct download link detected"))
else:
self.log_info(_("Direct download link not found"))
if not self.link and not self.last_download:
self.preload()
self.check_errors()
self.check_status(getinfo=False)
if self.premium and (not self.CHECK_TRAFFIC or self.check_traffic()):
self.log_info(_("Processing as premium download..."))
self.handle_premium(pyfile)
elif not self.LOGIN_ACCOUNT or (not self.CHECK_TRAFFIC or self.check_traffic()):
self.log_info(_("Processing as free download..."))
self.handle_free(pyfile)
if not self.last_download:
self.log_info(_("Downloading file..."))
self.download(self.link, disposition=self.DISPOSITION)
self.check_download()
except Fail, e: #@TODO: Move to PluginThread in 0.4.10
if self.premium:
self.log_warning(_("Premium download failed"))
self.restart(premium=False)
elif self.get_config("revertfailed", True) \
and "new_module" in self.pyload.pluginManager.hosterPlugins[self.__name__]:
hdict = self.pyload.pluginManager.hosterPlugins[self.__name__]
tmp_module = hdict['new_module']
tmp_name = hdict['new_name']
hdict.pop('new_module', None)
hdict.pop('new_name', None)
pyfile.initPlugin()
hdict['new_module'] = tmp_module
hdict['new_name'] = tmp_name
self.restart(_("Revert to original hoster plugin"))
else:
raise Fail(encode(e)) #@TODO: Remove `encode` in 0.4.10
def handle_premium(self, pyfile):
return self.handle_free(pyfile)
def handle_free(self, pyfile):
if self.premium:
raise NotImplementedError
else:
self.fail(_("Required premium account not found"))
| gpl-3.0 | 4,138,684,023,256,754,700 | 33.118519 | 120 | 0.534954 | false |
HolubTom/Queue | pCore/module/queue/Queue.py | 1 | 1138 | # -*- coding: utf-8 -*-
from pCore.module.queue.messages.Message import Message
__author__ = u"Tomas Holub"
__email__ = u"[email protected]"
class Queue(object):
u""" Class implementing FIFO principle """
def __init__(self, pDriver):
u""" Class constructor """
self.__aDriver = pDriver
def put(self, lMessage):
u"""
Puts message at the end of the queue
:param lMessage: Message object
:type lMessage: pCore.module.queue.messages.Message.Message
"""
assert isinstance(lMessage, Message)
lMessage.setQueued()
self.__aDriver.put(lMessage)
def get(self):
u"""
Returns and removes message from the beginning of the queue
:rtype : pCore.module.queue.messages.Message.Message
"""
lItem = self.__aDriver.get()
lItem.setDequeued()
return lItem
@property
def count(self):
u"""
Returns count of messages waiting in the line
:rtype : int
"""
return self.__aDriver.count
def getHistory(self):
return self.__aDriver.getHistory() | mit | -1,916,209,359,209,340,700 | 23.76087 | 67 | 0.591388 | false |
catapult-project/catapult | dashboard/dashboard/pinpoint/models/quest/run_browser_test.py | 3 | 1382 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Quest for running a browser test in Swarming."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from dashboard.pinpoint.models.quest import run_test
_DEFAULT_EXTRA_ARGS = ['--test-launcher-bot-mode']
class RunBrowserTest(run_test.RunTest):
@classmethod
def _ComputeCommand(cls, arguments):
if 'target' not in arguments:
raise ValueError('Missing "target" in arguments.')
# We are assuming that the 'target' already has the name of the browser
# test.
command = arguments.get(
'command', ['luci-auth', 'context', '--',
arguments.get('target')])
relative_cwd = arguments.get('relative_cwd', 'out/Release')
return relative_cwd, command
@classmethod
def _ExtraTestArgs(cls, arguments):
extra_test_args = []
# The browser test launcher only properly parses arguments in the
# --key=value format.
test_filter = arguments.get('test-filter')
if test_filter:
extra_test_args.append('--gtest_filter=%s' % test_filter)
extra_test_args += _DEFAULT_EXTRA_ARGS
extra_test_args += super(RunBrowserTest, cls)._ExtraTestArgs(arguments)
return extra_test_args
| bsd-3-clause | 280,661,727,197,403,650 | 32.707317 | 75 | 0.691027 | false |
robbie/anthill | anthill/people/models.py | 1 | 1958 | import datetime
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from tagging.fields import TagField
from markupfield.fields import MarkupField
from anthill.models import LocationModel
ROLES = (
('other', 'Community Member'),
('des', 'Designer'),
('dev', 'Developer'),
('both', 'Developer/Designer'),
)
MESSAGE_WAIT_PERIOD = 2
INITIAL_MAX_MESSAGES = 100
class Profile(LocationModel):
user = models.OneToOneField(User, related_name='profile')
url = models.URLField(blank=True)
about = MarkupField(blank=True, default_markup_type=settings.ANTHILL_DEFAULT_MARKUP)
role = models.CharField(max_length=5, choices=ROLES, default='other')
twitter_id = models.CharField(max_length=15, blank=True)
skills = TagField('comma separated list of your skills (eg. python, django)')
# other metadata
allow_org_emails = models.BooleanField(default=False)
signup_date = models.DateTimeField(auto_now_add=True)
last_email_sent = models.DateTimeField(null=True)
num_emails_sent = models.IntegerField(default=0)
allowed_emails = models.IntegerField(default=INITIAL_MAX_MESSAGES)
def __unicode__(self):
return unicode(self.user)
def can_send_email(self):
if self.last_email_sent:
elapsed = datetime.datetime.now() - self.last_email_sent
else:
elapsed = datetime.timedelta(minutes=MESSAGE_WAIT_PERIOD+1)
return (elapsed > datetime.timedelta(minutes=MESSAGE_WAIT_PERIOD) and
self.num_emails_sent < self.allowed_emails)
def record_email_sent(self):
self.last_email_sent = datetime.datetime.now()
self.num_emails_sent += 1
self.save()
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
post_save.connect(create_profile, sender=User)
| bsd-3-clause | 3,611,347,824,985,564,000 | 35.259259 | 88 | 0.703269 | false |
chromium/chromium | components/autofill/PRESUBMIT.py | 4 | 3207 | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/components/autofill.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
USE_PYTHON3 = True
def _CheckNoBaseTimeCalls(input_api, output_api):
"""Checks that no files call base::Time::Now() or base::TimeTicks::Now()."""
pattern = input_api.re.compile(
r'(base::(Time|TimeTicks)::Now)\(\)',
input_api.re.MULTILINE)
files = []
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if (f.LocalPath().startswith('components/autofill/') and
not f.LocalPath().endswith("PRESUBMIT.py")):
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitPromptWarning(
'Consider to not call base::Time::Now() or base::TimeTicks::Now() ' +
'directly but use AutofillClock::Now() and '+
'Autofill::TickClock::NowTicks(), respectively. These clocks can be ' +
'manipulated through TestAutofillClock and TestAutofillTickClock '+
'for testing purposes, and using AutofillClock and AutofillTickClock '+
'throughout Autofill code makes sure Autofill tests refers to the '+
'same (potentially manipulated) clock.',
files) ]
return []
def _CheckFeatureNames(input_api, output_api):
"""Checks that no features are enabled."""
pattern = input_api.re.compile(
r'\bbase::Feature\s+k(\w*)\s*{\s*"(\w*)"',
input_api.re.MULTILINE)
warnings = []
def exception(constant, feature):
if constant == "AutofillAddressEnhancementVotes" and \
feature == "kAutofillAddressEnhancementVotes":
return True
return False
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if (f.LocalPath().startswith('components/autofill/') and
f.LocalPath().endswith('features.cc')):
contents = input_api.ReadFile(f)
mismatches = [(constant, feature)
for (constant, feature) in pattern.findall(contents)
if constant != feature and not exception(constant, feature)]
if mismatches:
mismatch_strings = ['\t{} -- {}'.format(*m) for m in mismatches]
mismatch_string = format('\n').join(mismatch_strings)
warnings += [ output_api.PresubmitPromptWarning(
'Feature names should be identical to variable names:\n{}'
.format(mismatch_string),
[f]) ]
return warnings
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckNoBaseTimeCalls(input_api, output_api))
results.extend(_CheckFeatureNames(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
| bsd-3-clause | 2,318,194,266,867,851,000 | 36.729412 | 79 | 0.67758 | false |
itamaro/home-control-web | HomeControlWeb/HomeControlWeb/settings.py | 1 | 4959 | # Django settings for HomeControlWeb project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'home_control.db',
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#hhpd0s&!dn)&!ki#q+!ncx6n7=%jzbkj=g1uu5k-8_#1k+*6a'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'sillyauth.middleware.SillyAuthMiddleware',
'common.middleware.NavbarMiddleware',
)
ROOT_URLCONF = 'HomeControlWeb.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'HomeControlWeb.wsgi.application'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django_js_reverse',
'sillyauth',
'common',
'AC',
'cam',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'AC': {
'handlers': ['console'],
'level': 'DEBUG',
},
'cam': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
}
# Secret password for Silly Auth authentication middleware
#SILLY_AUTH_PASS = ''
# Order of apps in navbar
NAVBAR_APPS = (
'AC',
'cam',
)
try:
from local_settings import *
except ImportError:
pass
| apache-2.0 | -3,709,662,254,056,702,000 | 29.801242 | 127 | 0.660617 | false |
cmollet/edifice | nyc/db_import_scripts/dcaScraper.py | 1 | 1325 | import urllib, urllib2
import os
from bs4 import BeautifulSoup as soup
import psycopg2
import time
from random import random
from math import ceil
TOP_URL = 'https://a866-bcportal.nyc.gov/BCPortals/LicenseCheckResults.aspx?'
spanid = 'ctl00_Content_lblTotalTop'
conn = psycopg2.connect(dbname='nyc')
cur = conn.cursor()
existingZips = os.listdir('dca')
cur.execute("SELECT DISTINCT zip FROM zip_codes;")
zips = [z[0] for z in cur.fetchall()]
if not os.path.exists('dca'):
os.mkdir('dca')
def makepost(z, page):
data = {
'EntityName': '',
'LicenseNumber': '',
'Zip': z,
'LicCat': '',
'PageNumber': page
}
return urllib.urlencode(data)
for z in zips:
try:
if z not in existingZips:
time.sleep(random() * 10)
page = urllib2.urlopen(TOP_URL+makepost(z, '1')).read()
os.mkdir('dca/'+z)
total = soup(page).find('span', {"id": spanid}).text
numPages = int(ceil(float(total)/20))
f = open('dca/'+z+'/1.html', 'w')
f.write(page)
f.close()
p = 2
while p < numPages + 1:
time.sleep(random() * 10)
page = urllib2.urlopen(TOP_URL+makepost(z, p)).read()
f = open('dca/'+z+'/'+str(p)+'.html', 'w')
f.write(page)
f.close()
p += 1
except AttributeError:
pass
conn.close()
| mit | -6,664,058,214,418,876,000 | 23.090909 | 77 | 0.598491 | false |
nanalelfe/fofe-ner | LinkingUtil.py | 1 | 10706 | #!/eecs/research/asr/mingbin/python-workspace/hopeless/bin/python
import numpy, os, codecs, itertools, logging
from gigaword2feature import *
from scipy.sparse import csr_matrix
from sklearn import preprocessing
logger = logging.getLogger( __name__ )
def LoadED( rspecifier, language = 'eng' ):
entity2cls = { # KBP2015 label
'PER_NAM' : 0,
'PER_NOM' : 5,
'ORG_NAM' : 1,
'GPE_NAM' : 2,
'LOC_NAM' : 3,
'FAC_NAM' : 4,
'TTL_NAM' : 5,
# iflytek label
'PER_NAME' : 0,
'ORG_NAME' : 1,
'GPE_NAME' : 2,
'LOC_NAME' : 3,
'FAC_NAME' : 4,
'PER_NOMINAL' : 5,
'ORG_NOMINAL' : 6,
'GPE_NOMINAL' : 7,
'LOC_NOMINAL' : 8,
'FAC_NOMINAL' : 9,
'PER_NOM' : 5,
'ORG_NOM' : 6,
'GPE_NOM' : 7,
'LOC_NOM' : 8,
'FAC_NOM' : 9,
'TITLE_NAME' : 5,
'TITLE_NOMINAL' : 5
}
if os.path.isfile( rspecifier ):
with codecs.open( rspecifier, 'rb', 'utf8' ) as fp:
processed, original = fp.read().split( u'=' * 128, 1 )
original = original.strip()
# texts, tags, failures = processed.split( u'\n\n\n', 2 )
texts = processed.split( u'\n\n\n' )[0]
for text in texts.split( u'\n\n' ):
parts = text.split( u'\n' )
# assert len(parts) in [2, 3], 'sentence, offsets, labels(optional)'
if len( parts ) not in [2, 3]:
logger.exception( text )
continue
sent, boe, eoe, target, mids, spelling = parts[0].split(u' '), [], [], [], [], []
offsets = map( lambda x : (int(x[0]), int(x[1])),
[ offsets[1:-1].split(u',') for offsets in parts[1].split() ] )
assert len(offsets) == len(sent), rspecifier + '\n' + \
str( offsets ) + '\n' + str( sent ) + '\n%d vs %d' % (len(offsets), len(sent))
if len(parts) == 3:
for ans in parts[-1].split():
try:
begin_idx, end_idx, mid, mention1, mention2 = ans[1:-1].split(u',')
target.append( entity2cls[str(mention1 + u'_' + mention2)] )
boe.append( int(begin_idx) )
eoe.append( int(end_idx) )
mids.append( mid )
spelling.append( original[ offsets[boe[-1]][0] : offsets[eoe[-1] - 1][1] ] )
except ValueError as ex1:
logger.exception( rspecifier )
logger.exception( ans )
except KeyError as ex2:
logger.exception( rspecifier )
logger.exception( ans )
try:
assert 0 <= boe[-1] < eoe[-1] <= len(sent), \
'%s %d ' % (rspecifier.split('/')[-1], len(sent)) + \
' '.join( str(x) for x in [sent, boe, eoe, target, mids] )
except IndexError as ex:
logger.exception( rspecifier )
logger.exception( str(boe) + ' ' + str(eoe) )
continue
assert( len(boe) == len(eoe) == len(target) == len(mids) )
# move this part to processed_sentence
# if language == 'eng':
# for i,w in enumerate( sent ):
# sent[i] = u''.join( c if 0 <= ord(c) < 128 else chr(0) for c in list(w) )
yield sent, boe, eoe, target, mids, spelling
else:
for filename in os.listdir( rspecifier ):
for X in LoadED( os.path.join( rspecifier, filename ), language ):
yield X
def LoadEL( rspecifier, language = 'eng', window = 1 ):
if os.path.isfile( rspecifier ):
data = list( LoadED( rspecifier, language ) )
for i,(sent,boe,eoe,label,mid,spelling) in enumerate(data):
if len(label) > 0:
previous, next = [], []
for s,_,_,_,_,_ in data[i - window: i]:
previous.extend( s )
for s,_,_,_,_,_ in data[i + 1: i + 1 + window]:
next.extend( s )
yield previous + sent + next, \
[ len(previous) + b for b in boe ], \
[ len(previous) + e for e in eoe ], \
label, mid, spelling
else:
for filename in os.listdir( rspecifier ):
for X in LoadEL( os.path.join( rspecifier, filename ), language ):
yield X
def PositiveEL( embedding_basename,
rspecifier, language = 'eng', window = 1 ):
raw_data = list( LoadEL( rspecifier, language, window ) )
# with open( embedding_basename + '.word2vec', 'rb' ) as fp:
# shape = numpy.fromfile( fp, dtype = numpy.int32, count = 2 )
# projection = numpy.fromfile( fp, dtype = numpy.float32 ).reshape( shape )
# logger.debug( 'embedding loaded' )
with codecs.open( embedding_basename + '.wordlist', 'rb', 'utf8' ) as fp:
n_word = len( fp.read().strip().split() )
logger.debug( 'a vocabulary of %d words is used' % n_word )
numericizer = vocabulary( embedding_basename + '.wordlist',
case_sensitive = False )
bc = batch_constructor( [ rd[:4] for rd in raw_data ],
numericizer, numericizer,
window = 1024, n_label_type = 7 )
logger.debug( bc )
index_filter = set([2, 3, 6, 7, 8])
mid_itr = itertools.chain.from_iterable( rd[-2] for rd in raw_data )
mention = itertools.chain.from_iterable( rd[-1] for rd in raw_data )
# for sent, boe, eoe, _, _ in raw_data:
# for b,e in zip( boe, eoe ):
# mention.append( sent[b:e] )
# feature_itr = bc.mini_batch( 1,
# shuffle_needed = False,
# overlap_rate = 0, disjoint_rate = 0,
# feature_choice = 7 )
# # assert( len(list(mid_itr)) == len(list(feature_itr)) )
# for mid, feature in zip( mid_itr, feature_itr ):
# yield mid, \
# [ f.reshape([-1])[1::2] if i in index_filter else f.reshape([-1]) \
# for i,f in enumerate(feature[:9]) ]
l1v, r1v, l1i, r1i, l2v, r2v, l2i, r2i, bow = \
bc.mini_batch( len(bc.positive),
shuffle_needed = False,
overlap_rate = 0,
disjoint_rate = 0,
feature_choice = 7 ).next()[:9]
l1 = csr_matrix( ( l1v, ( l1i[:,0].reshape([-1]), l1i[:,1].reshape([-1]) ) ),
shape = [len(bc.positive), n_word] ).astype( numpy.float32 )
l2 = csr_matrix( ( l2v, ( l2i[:,0].reshape([-1]), l2i[:,1].reshape([-1]) ) ),
shape = [len(bc.positive), n_word] ).astype( numpy.float32 )
r1 = csr_matrix( ( r1v, ( r1i[:,0].reshape([-1]), r1i[:,1].reshape([-1]) ) ),
shape = [len(bc.positive), n_word] ).astype( numpy.float32 )
r2 = csr_matrix( ( r2v, ( r2i[:,0].reshape([-1]), r2i[:,1].reshape([-1]) ) ),
shape = [len(bc.positive), n_word] ).astype( numpy.float32 )
bow = csr_matrix( ( numpy.ones( bow.shape[0] ),
( bow[:,0].reshape([-1]), bow[:,1].reshape([-1]) ) ),
shape = [len(bc.positive), n_word] ).astype( numpy.float32 )
return list(mid_itr), mention, l1, l2, r1, r2, bow
def LoadTfidf( tfidf_basename, col ):
indices = numpy.fromfile( tfidf_basename + '.indices', dtype = numpy.int32 )
data = numpy.fromfile( tfidf_basename + '.data', dtype = numpy.float32 )
indptr = numpy.fromfile( tfidf_basename + '.indptr', dtype = numpy.int32 )
assert indices.shape == data.shape
mid2tfidf = csr_matrix( (data, indices, indptr),
shape = (indptr.shape[0] - 1, col) )
del data, indices, indptr
mid2tfidf = mid2tfidf.astype( numpy.float32 )
with open( tfidf_basename + '.list' ) as fp:
idx2mid = [ mid[1:-1] for mid in fp.readlines() ]
mid2idx = { m:i for i,m in enumerate( idx2mid ) }
return mid2tfidf, idx2mid, mid2idx
if __name__ == '__main__':
logging.basicConfig( format = '%(asctime)s : %(levelname)s : %(message)s',
level = logging.DEBUG )
embedding_basename = 'word2vec/gigaword128-case-insensitive'
tfidf_basename = '/eecs/research/asr/Shared/Entity_Linking_training_data_from_Freebase/mid2tfidf'
with open( embedding_basename + '.word2vec', 'rb' ) as fp:
shape = numpy.fromfile( fp, dtype = numpy.int32, count = 2 )
projection = numpy.fromfile( fp, dtype = numpy.float32 ).reshape( shape )
logger.info( 'embedding loaded' )
solution, mention, l1, l2, r1, r2, bow = PositiveEL( embedding_basename,
'kbp-raw-data/eng-train-parsed' )
logger.info( 'fofe loaded' )
mid2tfidf, idx2mid, mid2idx = LoadTfidf( tfidf_basename, projection.shape[0] )
logger.info( 'tfidf loaded' )
l1p = l1.dot( projection )
l2p = l2.dot( projection )
r1p = r1.dot( projection )
r2p = r2.dot( projection )
bowp = bow.dot( projection )
mid2tfidfp = mid2tfidf.dot( projection )
logger.info( 'projection done' )
del l1, l2, r1, r2, bow, mid2tfidf
bow_coef = 0.5
feature = bow_coef * bowp + (1. - bowp) * (l2p + r2p) / 2.
del l1p, l2p, r1p, r2p, bowp
normalized_feature = preprocessing.normalize(feature, norm = 'l2')
logger.info( 'feature computed & normalized' )
del feature
normalized_mid2tfidfp = preprocessing.normalize(mid2tfidfp, norm = 'l2')
logger.info( 'tfidf normalized' )
del mid2tfidfp
for i,(s,m) in enumerate( zip( solution, mention ) ):
print s, m
# similarity = numpy.dot( normalized_feature[i:i + 1], normalized_mid2tfidfp.T )
# top = numpy.argsort( similarity, axis = 1, kind = 'heapsort' )
# print m, s, idx2mid[top[0,-1]]
| mit | -8,459,524,670,432,376,000 | 41.149606 | 104 | 0.48272 | false |
5-star/plugin.video.last_played | default.py | 1 | 11689 | # -*- coding: utf-8 -*-
import os, time
import json
import ssl
import xbmc
import xbmcaddon
import xbmcvfs
try:
from urllib.parse import quote, unquote
import urllib.request
python="3"
except ImportError:
from urllib import quote, unquote
import urllib2
python="2"
addon = xbmcaddon.Addon()
enable_debug = addon.getSetting('enable_debug')
if addon.getSetting('custom_path_enable') == "true" and addon.getSetting('custom_path') != "":
txtpath = addon.getSetting('custom_path')
else:
txtpath = xbmc.translatePath(addon.getAddonInfo('profile'))
if not os.path.exists(txtpath):
os.makedirs(txtpath)
txtfile = txtpath + "lastPlayed.json"
starmovies = addon.getSetting('starmovies')
lang = addon.getLocalizedString
class LP:
title = ""
year = ""
thumbnail = ""
fanart = ""
showtitle = ""
season = ""
episode = ""
DBID = ""
type = ""
file=""
video=""
artist=""
vidPos=1
vidTot=1000
lp=LP()
player_monitor = xbmc.Monitor()
def getRequest2(url):
try:
context = ssl._create_unverified_context()
request = urllib2.Request(url)
response = urllib2.urlopen(request, context=context)
return response
except:
pass
def getRequest3(url):
try:
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as response:
return(response.read())
except:
pass
# Builds JSON request with provided json data
def buildRequest(method, params, jsonrpc='2.0', rid='1'):
request = { 'jsonrpc' : jsonrpc, 'method' : method, 'params' : params, 'id' : rid }
return request
# Checks JSON response and returns boolean result
def checkReponse(response):
result = False
if ( ('result' in response) and ('error' not in response) ):
result = True
return result
# Executes JSON request and returns the JSON response
def JSexecute(request):
request_string = json.dumps(request)
response = xbmc.executeJSONRPC(request_string)
if ( response ):
response = json.loads(response)
return response
# Performs single JSON query and returns result boolean, data dictionary and error string
def JSquery(request):
result = False
data = {}
error = ''
if ( request ):
response = JSexecute(request)
if ( response ):
result = checkReponse(response)
if ( result ):
data = response['result']
else: error = response['error']
return (result, data, error)
def send2starmovies(line):
if enable_debug == "true": xbmc.log("<<<plugin.video.last_played (starmovies) "+str(line), 3)
if LP.vidPos/LP.vidTot<0.8: return
wid = 0
if line["id"]!="": wid = int(line["id"])
if line["type"]=="movie": typ="M"
elif line["type"]=="episode": typ="S"
elif line["type"]=="song": typ="P"
else: typ="V"
if enable_debug == "true": xbmc.log("<<<plugin.video.last_played (starmovies) "+str(addon.getSetting('smovies')), 3)
if typ=="M" and addon.getSetting('smovies') != "true": return
if typ=="S" and addon.getSetting('stv') != "true": return
if typ=="V" : return
if typ=="P" : return
imdbId = ""
tvdbId = ""
orgTitle = ""
showTitle = line["show"]
season = line["season"]
episode = line["episode"]
thumbnail = line["thumbnail"]
fanart = line["fanart"]
if enable_debug == "true": xbmc.log("<<<plugin.video.last_played (starmovies) "+str(wid), 3)
if wid>0:
if typ=="M":
request = buildRequest('VideoLibrary.GetMovieDetails', {'movieid' : wid, 'properties' : ['imdbnumber', 'originaltitle']})
result, data = JSquery(request)[:2]
if ( result and 'moviedetails' in data ):
imdbId = data['moviedetails']["imdbnumber"]
orgTitle = data['moviedetails']["originaltitle"]
elif typ=="S":
request = buildRequest('VideoLibrary.GetEpisodeDetails', {'episodeid' : wid, 'properties' : ['tvshowid', 'season', 'episode']})
result, data = JSquery(request)[:2]
if ( result and 'episodedetails' in data ):
season = data['episodedetails']["season"]
episode = data['episodedetails']["episode"]
request = buildRequest('VideoLibrary.GetTvShowDetails', {'tvshowid' : data['episodedetails']["tvshowid"], 'properties' : ['imdbnumber', 'originaltitle']})
result, data = JSquery(request)[:2]
if ( result and 'tvshowdetails' in data ):
showTitle = data['tvshowdetails']["label"]
orgTitle = data['tvshowdetails']["originaltitle"]
tvdbId = data['tvshowdetails']["imdbnumber"]
xvideo = line["file"]
if "video" in line and line["video"]!="": xvideo = line["video"]
url = "https://www.starmovies.org/WebService.asmx/kodiWatch?tmdbId="
url = url + "&tvdbId=" + tvdbId
url = url + "&imdbId=" + imdbId
url = url + "&kodiId=" + str(wid)
url = url + "&title=" + quote(line["title"].encode("utf-8"))
url = url + "&orgtitle=" + quote(orgTitle.encode("utf-8"))
url = url + "&year=" + str(line["year"])
url = url + "&source=" + quote(line["source"].encode("utf-8"))
url = url + "&type=" + typ
url = url + "&usr=" + quote(addon.getSetting('TMDBusr').encode("utf-8"))
url = url + "&pwd=" + addon.getSetting('TMDBpwd')
url = url + "&link=" + quote(xvideo.encode("utf-8"))
url = url + "&thumbnail=" + quote(line["thumbnail"].encode("utf-8"))
url = url + "&fanart=" + quote(line["fanart"].encode("utf-8"))
url = url + "&showtitle=" + quote(showTitle.encode("utf-8"))
url = url + "&season=" + str(season)
url = url + "&episode=" + str(episode)
url = url + "&version=1.22"
url = url + "&date=" + line["date"]
if enable_debug == "true": xbmc.log("<<<plugin.video.last_played (starmovies) "+url, 3)
if python=="3":
response = getRequest3(url)
else:
response = getRequest2(url)
if enable_debug == "true": xbmc.log("<<<plugin.video.last_played (starmovies) response:"+str(response), 3)
def videoEnd():
retry=1
xsource=''
while xsource=='' and retry<50:
xsource = xbmc.getInfoLabel('ListItem.Path')
retry=retry+1
time.sleep(0.1)
if xsource=='': xsource="player"
xtitle = lp.title
xyear = lp.year
xartist = lp.artist
xid = lp.DBID
xtype = lp.type
xfanart = unquote(lp.fanart).replace("image://","").rstrip("/")
xthumb = unquote(lp.thumbnail).replace("image://","").rstrip("/")
if ".jpg" not in xthumb.lower(): xthumb=xfanart
xfile = lp.file.strip()
xvideo = lp.video.strip()
try:
xshow = lp.showtitle
xseason = lp.season
xepisode = lp.episode
except:
pass
if xid!="" and int(xid)>0:
if xtype=="movie": xsource=lang(30002)
elif xtype=="episode": xsource=lang(30003)
elif xtype=="musicvideo": xsource=lang(30004)
else: xsource=xtype
else:
ads = xsource.split("/")
if len(ads) > 2: xsource = ads[2]
# if source is on blacklist, do not keep
if xtype=="movie" and addon.getSetting('movies') != "true": return
if xtype=="episode" and addon.getSetting('tv') != "true": return
if xtype=="song" and addon.getSetting('music') != "true": return
if xtype!="movie" and xtype!="episode" and xtype!="song" and addon.getSetting('videos') != "true": return
# if source is on blacklist, do not keep
if addon.getSetting('blackadddon').lower().find(xsource.lower())>=0: return
if addon.getSetting('blackfolder')!="":
for dir in addon.getSetting('blackfolder').lower().split(","):
if xsource.lower().find(dir)>=0: return
if addon.getSetting('blackvideo')!="":
for vid in addon.getSetting('blackvideo').lower().split(","):
if xtitle.lower().find(vid)>=0: return
if enable_debug == "true": xbmc.log("<<<plugin.video.last_played (end source) "+xsource, 3)
if xbmcvfs.exists(txtfile):
f = xbmcvfs.File(txtfile)
try: lines = json.load(f)
except: lines = []
f.close()
else: lines = []
replay = "N"
for line in lines:
if xfile!="" and xfile==line["file"]: replay = "S"
if replay == "S":
lines.remove(line)
line.update({"date": time.strftime("%Y-%m-%d")})
line.update({"time": time.strftime("%H:%M:%S")})
lines.insert(0, line)
replay = "S"
if enable_debug == "true": xbmc.log("<<<plugin.video.last_played (end final replay) "+str(line), 3)
if starmovies == "true": send2starmovies(line)
break
if replay=="N":
newline = {"source":xsource, "title":xtitle, "year":xyear, "artist":xartist, "file":xfile, "video":xvideo, "id":xid, "type":xtype,"thumbnail":xthumb, "fanart":xfanart, "show":xshow, "season":xseason, "episode":xepisode, "date":time.strftime("%Y-%m-%d"), "time":time.strftime("%H:%M:%S") }
lines.insert(0, newline)
if enable_debug == "true": xbmc.log("<<<plugin.video.last_played (end final play) "+str(newline), 3)
if starmovies == "true": send2starmovies(newline)
if len(lines)>100:
del lines[len(lines)-1]
if len(lines)>0:
f = xbmcvfs.File(txtfile, 'w')
json.dump(lines, f)
f.close()
class KodiPlayer(xbmc.Player):
def __init__(self, *args, **kwargs):
kplayer=xbmc.Player.__init__(self)
@classmethod
def onPlayBackEnded(self):
videoEnd()
@classmethod
def onPlayBackStopped(self):
videoEnd()
def onPlayBackStarted(self):
if xbmc.getCondVisibility('Player.HasMedia'):
lp.video = self.getPlayingFile()
request = {"jsonrpc": "2.0", "method": "Player.GetItem", "params": { "properties": ["title", "year", "thumbnail", "fanart", "showtitle", "season", "episode", "file"], "playerid": 1 }, "id": "VideoGetItem"}
result, data = JSquery(request)[:2]
if(len(data)==0):
request = {"jsonrpc": "2.0", "method": "Player.GetItem", "params": { "properties": ["title", "album", "artist", "duration", "thumbnail", "file", "fanart", "streamdetails"], "playerid": 0 }, "id": "AudioGetItem"}
result, data = JSquery(request)[:2]
if len(data)>0:
item=data["item"]
if enable_debug == "true": xbmc.log("<<<plugin.video.last_played (start play) "+str(item), 3)
if "title" in item: lp.title = item["title"]
if lp.title=="" and "label" in item: lp.title = item["label"]
if "year" in item: lp.year = item["year"]
if "thumbnail" in item: lp.thumbnail = item["thumbnail"]
if "fanart" in item: lp.fanart = item["fanart"]
if "showtitle" in item: lp.showtitle = item["showtitle"]
if "season" in item and item["season"]>0: lp.season = item["season"]
if "episode" in item and item["episode"]>0: lp.episode = item["episode"]
if "id" in item: lp.DBID = item["id"]
if "type" in item: lp.type = item["type"]
if "file" in item: lp.file = item["file"]
if "artist" in item: lp.artist = item["artist"]
class KodiRunner:
player = KodiPlayer()
while not player_monitor.abortRequested():
if player.isPlaying():
LP.vidPos=player.getTime()
LP.vidTot=player.getTotalTime()
player_monitor.waitForAbort(1)
del player
| gpl-3.0 | 7,753,852,709,783,120,000 | 37.32459 | 296 | 0.581915 | false |
christiansandberg/canopen | canopen/emcy.py | 1 | 4163 | import struct
import logging
import threading
import time
# Error code, error register, vendor specific data
EMCY_STRUCT = struct.Struct("<HB5s")
logger = logging.getLogger(__name__)
class EmcyConsumer(object):
def __init__(self):
#: Log of all received EMCYs for this node
self.log = []
#: Only active EMCYs. Will be cleared on Error Reset
self.active = []
self.callbacks = []
self.emcy_received = threading.Condition()
def on_emcy(self, can_id, data, timestamp):
code, register, data = EMCY_STRUCT.unpack(data)
entry = EmcyError(code, register, data, timestamp)
with self.emcy_received:
if code & 0xFF00 == 0:
# Error reset
self.active = []
else:
self.active.append(entry)
self.log.append(entry)
self.emcy_received.notify_all()
for callback in self.callbacks:
callback(entry)
def add_callback(self, callback):
"""Get notified on EMCY messages from this node.
:param callback:
Callable which must take one argument of an
:class:`~canopen.emcy.EmcyError` instance.
"""
self.callbacks.append(callback)
def reset(self):
"""Reset log and active lists."""
self.log = []
self.active = []
def wait(self, emcy_code=None, timeout=10):
"""Wait for a new EMCY to arrive.
:param int emcy_code: EMCY code to wait for
:param float timeout: Max time in seconds to wait
:return: The EMCY exception object or None if timeout
:rtype: canopen.emcy.EmcyError
"""
end_time = time.time() + timeout
while True:
with self.emcy_received:
prev_log_size = len(self.log)
self.emcy_received.wait(timeout)
if len(self.log) == prev_log_size:
# Resumed due to timeout
return None
# Get last logged EMCY
emcy = self.log[-1]
logger.info("Got %s", emcy)
if time.time() > end_time:
# No valid EMCY received on time
return None
if emcy_code is None or emcy.code == emcy_code:
# This is the one we're interested in
return emcy
class EmcyProducer(object):
def __init__(self, cob_id):
self.network = None
self.cob_id = cob_id
def send(self, code, register=0, data=b""):
payload = EMCY_STRUCT.pack(code, register, data)
self.network.send_message(self.cob_id, payload)
def reset(self, register=0, data=b""):
payload = EMCY_STRUCT.pack(0, register, data)
self.network.send_message(self.cob_id, payload)
class EmcyError(Exception):
"""EMCY exception."""
DESCRIPTIONS = [
# Code Mask Description
(0x0000, 0xFF00, "Error Reset / No Error"),
(0x1000, 0xFF00, "Generic Error"),
(0x2000, 0xF000, "Current"),
(0x3000, 0xF000, "Voltage"),
(0x4000, 0xF000, "Temperature"),
(0x5000, 0xFF00, "Device Hardware"),
(0x6000, 0xF000, "Device Software"),
(0x7000, 0xFF00, "Additional Modules"),
(0x8000, 0xF000, "Monitoring"),
(0x9000, 0xFF00, "External Error"),
(0xF000, 0xFF00, "Additional Functions"),
(0xFF00, 0xFF00, "Device Specific")
]
def __init__(self, code, register, data, timestamp):
#: EMCY code
self.code = code
#: Error register
self.register = register
#: Vendor specific data
self.data = data
#: Timestamp of message
self.timestamp = timestamp
def get_desc(self):
for code, mask, description in self.DESCRIPTIONS:
if self.code & mask == code:
return description
return ""
def __str__(self):
text = "Code 0x{:04X}".format(self.code)
description = self.get_desc()
if description:
text = text + ", " + description
return text
| mit | 6,766,806,848,442,568,000 | 29.837037 | 63 | 0.554648 | false |
benjsmith/mubiomics | MPSDemultiplexer/patricia.py | 1 | 3563 | #!/usr/local/bin/python
#patrcia.py
#Python class definitions for creating a radix-like trie.
# Copyright (C) <2012> <Benjamin C. Smith>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class patricia:
"""Creates Patricia(Radix)-like Tries using dictionaries. Code found on
StackOverflow.com"""
def __init__(self, *args, **kwargs):
self._d = {}
def addWord(self,w):
d = self._d
i = 0
while 1:
try:
node = d[w[i:i+1]]
except KeyError:
if d:
d[w[i:i+1]] = [w[i+1:],{}]
else:
if w[i:i+1] == '':
return
else:
if i != 0:
d[''] = ['',{}]
d[w[i:i+1]] = [w[i+1:],{}]
return
i += 1
if w.startswith(node[0],i):
if len(w[i:]) == len(node[0]):
if node[1]:
try:
node[1]['']
except KeyError:
d = node[1]
d[''] = ['',{}]
return
else:
i += len(node[0])
d = node[1]
else:
ii = i
j = 0
while ii != len(w) and j != len(node[0]) and \
w[ii:ii+1] == node[0][j:j+1]:
ii += 1
j += 1
tmpd = {}
tmpd[node[0][j:j+1]] = [node[0][j+1:],node[1]]
tmpd[w[ii:ii+1]] = [w[ii+1:],{}]
d[w[i-1:i]] = [node[0][:j],tmpd]
return
def isWord(self,w):
d = self._d
i = 0
while 1:
try:
node = d[w[i:i+1]]
except KeyError:
return False
i += 1
if w.startswith(node[0],i):
if len(w[i:]) == len(node[0]):
if node[1]:
try:
node[1]['']
except KeyError:
return False
return True
else:
i += len(node[0])
d = node[1]
else:
return False
def isPrefix(self,w):
d = self._d
i = 0
wlen = len(w)
while 1:
try:
node = d[w[i:i+1]]
except KeyError:
return False
i += 1
if w.startswith(node[0][:wlen-i],i):
if wlen - i > len(node[0]):
i += len(node[0])
d = node[1]
else:
return True
else:
return False
__getitem__ = isWord | gpl-3.0 | 3,643,960,447,306,891,000 | 30.263158 | 75 | 0.386191 | false |
yaricom/brainhash | src/experiment_cA5_1_dt_th_al_ah_bl_bh.py | 1 | 2061 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The experiment with 10 Hz/5Hz, wisp, attention, 70, cA 5, delta, theta, alpha low, alpha high, beta low, beta high, batch size = 1 and
balanced data set
@author: yaric
"""
import experiment as ex
import config
from time import time
n_hidden = 5
batch_size = 1
experiment_name = 'cA_%d_%d_dt-th-a_l-a_h-b_l-b_h' % (n_hidden, batch_size) # will be used as parent dir for analyzer results
# The sample records identifiers
signal_ids = ['IO_10_2', 'IO_TXT', 'IO_SKY', 'KS_10_2', 'RO_10_2']
noise_ids = ['noise']
# Setup analyzer configuration
analyzer_config = ex.defaultAnalyzerConfig()
analyzer_config['batch_size'] = batch_size
analyzer_config['learning_rate'] = 0.1
analyzer_config['n_hidden'] = n_hidden
analyzer_config['training_epochs'] = 50000
analyzer_config['encoder'] = 'cA'
analyzer_config['bands'] = 'delta,theta,alpha_l,alpha_h,beta_l,beta_h'
start = time()
#
# Run analyzer
#
print("\nStart analysis with parameters:\n%s\n" % analyzer_config)
print("Start analysis for signal records: %s" % signal_ids)
ex.runEEGAnalyzerWithIDs(ids_list=signal_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
print("Start analysis for noise records: %s" % noise_ids)
ex.runEEGAnalyzerWithIDs(ids_list=noise_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
#
# Run classifiers
#
signal_dir = "%s/%s" % (config.analyzer_out_dir, experiment_name)
noise_dir = "%s/%s/%s" % (config.analyzer_out_dir, experiment_name, noise_ids[0])
out_suffix = experiment_name
print("Run classifiers over analyzed records. \nSignal dir: %s\nNoise dir: %s"
% (signal_dir, noise_dir))
ex.runClassifier(signal_dir=signal_dir,
signal_records=signal_ids,
noise_dir=noise_dir,
out_suffix=out_suffix)
print("\n\nExperiment %s took %.2f seconds.\n"
% (experiment_name, time() - start))
| gpl-3.0 | -8,799,735,400,979,485,000 | 31.714286 | 135 | 0.640951 | false |
Vaei/ModularChannelBox | jtChannelBox_Menu_Rigging.py | 1 | 19478 | # jtChannelBox - Modular / Customizeable Channel Box
# Copyright (C) 2016 Jared Taylor
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------------
# To request a commercial license please email me at my address:
# [email protected]
# --------------------------------------------------------------------------
from collections import OrderedDict
import jtChannelBox_Commands_Default as cbc
reload(cbc)
# --------------------------------------------------------------------------
# HELPER FUNCTIONS
# Used for menu creation
# --------------------------------------------------------------------------
# Variables used for helper functions
divider_step = [
0] # as list containing one int, because list is mutable and therefore passed by reference (so function can
# directly affect it without setting the variable)
menu_step = [0,
"genericKey"] # the key built off this becomes "genericKey_0", +1 is added to the 0 each time, it's
# used to generate unique keys
# --------------------------------------------------------------------------
# Helper function for creating dividers.
# USAGE: divider(menu_name)
def divider(_menu, step=divider_step):
_menu["divider_" + str(step[0])] = ["", 0, "divider", ""]
step[0] += 1
# --------------------------------------------------------------------------
# Helper function for creating menu items.
# USAGE:
# _menu : this is the OrderedDict storing the menu
# _label : the label for the menu item that the user sees on the menu
# _hasEnableConditions : if 0/False will always be available, if 1/True then will have conditions to meet before being
# enabled, by default this is whether an
# attribute is selected or not, you can overwride it in jtChannelBox.py function channelBox_Menu_States
# _type : various types are available and will be listed after the definition below, however for a default menu item
# simply enter "" with nothing in the string (empty string)
# _command : the function that is executed when the menu item is pressed
# _tooltip : (optional if no _key entered) this is assigned a default value of "" which equates to no tooltip, it is
# optional unless you enter a menu key
# _key : (optional) set a custom menu key, only required if you need to refer to the menu item later, will always need
# this for a checkbox or optionbox to query
# the state or when there's a corresponding variable attached in saved_states, if you enter a key, you must also
# enter a tooltip (can simply be "" for no tooltip)
# or the system will think the key is the tooltip
# Without using the function it would be: menu_channels["keyItem"] = ["Key Selected", 1, "", cbc.channelbox_command_
# keyItem]
# With the function the equivelant is: menu(menu_channels, "Key Selected", 1, "", cbc.channelbox_command_keyItem) -
# but the key will be set automatically to genericKey_0, which is OK,
# we don't need to refer back to this menu item
# all KEYS must be UNIQUE per dict in python. This function handles it for you unless you need a specific key.
# Duplicate keys will be excluded/ignored.
def menu(_menu, _label, _has_enable_conditions, _type, _command, _tooltip="", _key=menu_step):
key = _key[1] + "_" + str(_key[0]) # build key based off menu_step
if _key is menu_step: # check if no custom key entered, increment step if true
_key[0] += 1
else: # custom key was entered, use that instead
key = _key
_menu[key] = [_label, _has_enable_conditions, _type, _command, _tooltip if _tooltip is not "" else None]
# TYPES for _type:
# "checkbox" : can be enabled or disabled with a box to the left of the item, you will need to set a custom key and
# add it also to the saved_states
# "optionbox" : has a secondary function that is used when clicking the option box, which is placed to the right of
# the item, you do not have to set a custom key
# "submenu" : replace the _command with integer defining how many of the following menu items are placed in this
# submenu, you do not have to set a custom key
# "radio" : replace the _command with integer defining how many of following menu items are a part of this radio
# collection, you will need to set a custom key and add it also to the saved_states
# "custom" : for behaviour that is not defined here, add to the function in jtChannelBox.py called channelBox_Menu
# _Custom for what happens for this specific key, you will need to set a custom key - for example, look at
# "selectFilterSet" and the specified function
# "divider" : this is also a type, but you would usually use the divider() function instead
# ----------------------End : Helper Functions End--------------------------
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# MENU ITEMS DICTS
# This is where you add your own menus
# --------------------------------------------------------------------------
# Read the "USAGE" for the helper functions if you don't know what to do
# --------------------------------------------------------------------------
# ------------------------------CHANNEL MENU--------------------------------
menu_channels = OrderedDict()
# -------------------------------------------------------------------------
# +submenu
menu(menu_channels, "Freeze", 0, "submenu", 4)
menu(menu_channels, "Translate", 0, "", cbc.channelbox_command_freezeTranslate)
menu(menu_channels, "Rotate", 0, "", cbc.channelbox_command_freezeRotate)
menu(menu_channels, "Scale", 0, "", cbc.channelbox_command_freezeScale)
menu(menu_channels, "All", 0, "optionbox", cbc.channelbox_command_freezeAll)
# -submenu
divider(menu_channels)
menu(menu_channels, "Key Selected", 1, "", cbc.channelbox_command_keyItem)
# menu(menu_channels, "Key All Keyable", 0, "", cbc.channelbox_command_keyAll)
# menu(menu_channels, "Breakdown Selected", 1, "", cbc.channelbox_command_breakdown)
# menu(menu_channels, "Breakdown All", 0, "", cbc.channelbox_command_breakdownAll)
# menu(menu_channels, "Mute Selected", 1, "", cbc.channelbox_command_mute)
# menu(menu_channels, "Mute All", 0, "", cbc.channelbox_command_muteAll)
# menu(menu_channels, "Unmute Selected", 1, "", cbc.channelbox_command_unmute)
# menu(menu_channels, "Unmute All", 0, "", cbc.channelbox_command_unmuteAll)
divider(menu_channels)
# menu(menu_channels, "Sync Timeline Display", 0, "checkbox", cbc.channelbox_command_syncTimeline,
# "Update timeline ticks based on selected channel box entries. Active list is used when there is no
# channel box selection",
# "syncTimeline")
# divider(menu_channels)
# menu(menu_channels, "Cut Selected", 1, "", cbc.channelbox_command_cut, "Cut selected keyframes")
menu(menu_channels, "Copy Selected", 1, "", cbc.channelbox_command_copy, "Copy selected keyframes")
menu(menu_channels, "Paste Selected", 1, "", cbc.channelbox_command_paste, "Paste selected keyframes")
menu(menu_channels, "Delete Selected", 1, "", cbc.channelbox_command_delete, "Delete selected keyframes")
divider(menu_channels)
# menu(menu_channels, "Duplicate Values", 1, "", cbc.channelbox_command_duplicateAttrValues)
menu(menu_channels, "Break Connections", 1, "", cbc.channelbox_command_break)
# menu(menu_channels, "Select Connection", 1, "", cbc.channelbox_command_selectConnection)
divider(menu_channels)
menu(menu_channels, "Lock Selected", 1, "", cbc.channelbox_command_lock)
menu(menu_channels, "Unlock Selected", 1, "", cbc.channelbox_command_unlock)
menu(menu_channels, "Hide Selected", 1, "", cbc.channelbox_command_unkeyable)
menu(menu_channels, "Lock and Hide Selected", 1, "", cbc.channelbox_command_lockUnkeyable)
menu(menu_channels, "Make Selected Nonkeyable", 1, "", cbc.channelbox_command_unkeyableDisplayed)
menu(menu_channels, "Make Selected Keyable", 1, "", cbc.channelbox_command_keyable)
divider(menu_channels)
# menu(menu_channels, "Add to Selected Layers", 1, "", cbc.channelbox_command_addToLayers,
# "Add selected attributes to selected Animation Layer")
# menu(menu_channels, "Remove From Selected Layers", 1, "", cbc.channelbox_command_removeFromLayers,
# "Remove selected attributes from selected Animation Layer")
menu(menu_channels, "Sync Graph Editor Display", 0, "checkbox", cbc.channelbox_command_syncGraph,
"Update Graph Editor based on selected channel box entries and set keyframes only on selected entries."
" Active list is used when there is no channel box selection",
"syncGraphEditor")
# -------------------------------EDIT MENU---------------------------------
menu_edit = OrderedDict()
# -------------------------------------------------------------------------
menu(menu_edit, "Expressions...", 1, "", cbc.channelbox_command_expression)
menu(menu_edit, "Set Driven Key...", 1, "", cbc.channelbox_command_driven)
menu(menu_edit, "Connection Editor", 0, "", cbc.channelbox_command_connectionEditor)
menu(menu_edit, "Graph Editor", 0, "", cbc.channelbox_command_animCurve)
menu(menu_edit, "Channel Control", 0, "", cbc.channelbox_command_channelControlEditor)
# menu(menu_edit, "Attribute Editor", 0, "", cbc.channelbox_command_attributeEditor)
# menu(menu_edit, "Material Attributes", 0, "", cbc.channelbox_command_materialAttributes)
divider(menu_edit)
menu(menu_edit, "Add Attribute", 0, "", cbc.channelbox_command_addAttribute)
menu(menu_edit, "Edit Attribute", 1, "", cbc.channelbox_command_renameAttribute)
menu(menu_edit, "Duplicate Attribute", 1, "", cbc.channelbox_command_duplicateAttr)
menu(menu_edit, "Delete Attributes", 1, "", cbc.channelbox_command_deleteAttributes)
divider(menu_edit)
# menu(menu_edit, "Select Node", 0, "", cbc.channelbox_command_selectNode)
# menu(menu_edit, "Delete Node", 0, "", cbc.channelbox_command_deleteNode)
# menu(menu_edit, "Delete History", 0, "", cbc.channelbox_command_deleteHistory)
# +submenu
menu(menu_edit, "Settings", 0, "submenu", 9)
# +radio
menu(menu_edit, "", 0, "radio", 2, "", "speedState")
menu(menu_edit, "Slow", 0, "", cbc.channelbox_command_setSpeed, "Channel box attributes move in increments of "
"0.1","speedSlow")
menu(menu_edit, "Medium", 0, "", cbc.channelbox_command_setSpeed, "Channel box attributes move in increments of "
"1.0", "speedMedium")
menu(menu_edit, "Fast", 0, "", cbc.channelbox_command_setSpeed, "Channel box attributes move in increments of "
"10.0", "speedFast")
# -radio
# divider(menu_edit)
# menu(menu_edit, "Hyperbolic", 0, "checkbox", cbc.channelbox_command_setHyperbolic,
# "Switch between increments acting as linear (unchecked) or curve-based", "hyperbolic")
divider(menu_edit)
menu(menu_edit, "Show Namespace", 0, "checkbox", cbc.channelbox_command_setNamespace, "", "showNamespace")
divider(menu_edit)
# +radio
menu(menu_edit, "", 0, "radio", 2, "", "manipsState")
menu(menu_edit, "No Manips", 0, "", cbc.channelbox_command_setManip, "", "noManips")
menu(menu_edit, "Invisible Manips", 0, "", cbc.channelbox_command_setManip, "", "invisibleManips")
menu(menu_edit, "Standard Manips", 0, "", cbc.channelbox_command_setManip, "", "standardManips")
# -radio
divider(menu_edit)
menu(menu_edit, "Change Precision...", 0, "", cbc.channelbox_command_precision,
"How many floating point values are displayed in the Channel Box", "changePrecision")
menu(menu_edit, "Reset to Default", 0, "", cbc.channelbox_command_reset)
# -submenu, +submenu
menu(menu_edit, "Channel Names", 0, "submenu", 3)
# +radio
menu(menu_edit, "", 0, "radio", 3, "", "namesState")
menu(menu_edit, "Nice", 0, "", cbc.channelbox_command_setChannelName, "", "nameNice")
menu(menu_edit, "Long", 0, "", cbc.channelbox_command_setChannelName, "", "nameLong")
menu(menu_edit, "Short", 0, "", cbc.channelbox_command_setChannelName, "", "nameShort")
# ------------------------------SHOW MENU----------------------------------
menu_show = OrderedDict()
# -------------------------------------------------------------------------
# +submenu
menu(menu_show, "Attributes", 0, "submenu", 8)
menu(menu_show, "Driven by Anim Curve", 0, "checkbox", cbc.channelbox_command_filter_itemCB, "", "attr_animCurve")
menu(menu_show, "Driven by Expression", 0, "checkbox", cbc.channelbox_command_filter_itemCB,
"View->Show Results in Graph Editor must be on to see curves driven by expressions", "attr_expression")
menu(menu_show, "Driven by Driven Key", 0, "checkbox", cbc.channelbox_command_filter_itemCB, "", "attr_drivenKey")
menu(menu_show, "Scale", 0, "checkbox", cbc.channelbox_command_filter_itemCB, "", "attr_scale")
menu(menu_show, "Rotate", 0, "checkbox", cbc.channelbox_command_filter_itemCB, "", "attr_rotate")
menu(menu_show, "Translate", 0, "checkbox", cbc.channelbox_command_filter_itemCB, "", "attr_translate")
menu(menu_show, "Scale Rotate Translate", 0, "checkbox", cbc.channelbox_command_filter_itemCB, "",
"attr_scaleRotateTranslate")
menu(menu_show, "User Defined", 0, "checkbox", cbc.channelbox_command_filter_itemCB,
"No effect if there are no user-defined attributes present", "attr_userDefined")
# -submenu
menu(menu_show, "Isolate Selected", 0, "optionbox", cbc.channelbox_command_isolateAttr, "", "selectAttr")
menu(menu_show, "Invert Shown", 1, "checkbox", cbc.channelbox_command_filter_invertShown,
"Toggle between isolating/hiding", "invertShown")
divider(menu_show)
menu(menu_show, "Show All", 0, "", cbc.channelbox_command_filter_filterShowAll, "Reset all attribute filters")
divider(menu_show)
menu(menu_show, "Select Filter Set", 1, "custom", cbc.channelbox_command_selectFilterSet, "", "selectFilterSet")
menu(menu_show, "Create Filter Set...", 1, "", cbc.channelbox_command_createFilterSet, "", "createFilterSet")
divider(menu_show)
menu(menu_show, "Channel Box Settings", 0, "submenu", 4)
menu(menu_show, "Label on Right-Click Menu", 0, "checkbox", cbc.channelbox_command_popupLabel,
"Show the menu label at top of right-click menu", "popupLabel")
menu(menu_show, "Show Icons", 0, "checkbox", cbc.channelbox_command_showIcons,
"Show the Manipulator, Speed, and Hyperbolic icons above the menu bar", "showIcons")
menu(menu_show, "Hide Unavailable Menu Items", 0, "checkbox", cbc.channelbox_command_hideUnavailable,
"Hide unavailable menu options instead of disabling them", "hideUnavailable")
divider(menu_show)
menu(menu_show, "Delete All Stored Settings (Full Reset)", 0, "", cbc.channelbox_command_cboxReset,
"Re-initialize this channel box at the default state")
# -------------------------------End : Menus-------------------------------
# -------------------------------------------------------------------------
# ------------------------------MENUS DICT---------------------------------
menus = OrderedDict() # Add your custom menus here too
# -------------------------------------------------------------------------
menus["Channels"] = menu_channels
menus["Edit"] = menu_edit
menus["Objects"] = "" # this is a custom menu and it's behaviour is defined (differently) in jtChannelBox.py
menus["Show"] = menu_show
# ----------------------------End : Menus Dict-----------------------------
# -------------------------------------------------------------------------
# ----------------------------SYMBOL COMMANDS------------------------------
symbol_commands = {}
# -------------------------------------------------------------------------
symbol_commands["pressed"] = cbc.channelbox_command_Symbol_pressed
symbol_commands["update"] = cbc.channelbox_command_Symbol_update
# --------------------------End : Symbol Commands--------------------------
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# SAVED STATES
# Variables stored by the system
# [x, 0] - First element is the saved data, second element is whether or #
# not this state is saved/serialized persistently to disk and restored
# when the script or maya is restarted
saved_states = {}
# -------------------------------------------------------------------------
# checkbox states
saved_states["syncGraphEditor"] = [0, 0]
saved_states["syncTimeline"] = [0, 0]
saved_states["hyperbolic"] = [0, 1]
saved_states["showNamespace"] = [1, 1]
# radio button collection states
saved_states["speedState"] = [2, 1]
saved_states["manipsState"] = [3, 1]
saved_states["namesState"] = [1, 1]
# serialized settings
saved_states["changePrecision"] = [3, 1]
saved_states["fieldWidth"] = [65, 1]
saved_states["channelWidth"] = [230, 1]
saved_states["hideUnavailable"] = [0, 1]
saved_states["showIcons"] = [1, 1]
saved_states["popupLabel"] = [1, 1]
# filter checkbox states
saved_states["attr_animCurve"] = [0, 0]
saved_states["attr_expression"] = [0, 0]
saved_states["attr_drivenKey"] = [0, 0]
saved_states["attr_scaleRotateTranslate"] = [0, 0]
saved_states["attr_userDefined"] = [0, 0]
saved_states["attr_scale"] = [0, 0]
saved_states["attr_rotate"] = [0, 0]
saved_states["attr_translate"] = [0, 0]
saved_states["invertShown"] = [0, 0]
saved_states["savedFilters"] = [OrderedDict(), 1] # Used to store filter sets, you probably don't want to modify this
# --------------------------End : Saved States-----------------------------
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# SCRIPT JOB IDs
# Saved for later removal of script jobs
# Script jobs end automatically when the parent UI is closed
# -1 almost always is the default value, -1 means not currently running #
jobIDs = {}
# -------------------------------------------------------------------------
jobIDs["syncGraphEditor"] = -1
jobIDs["syncTimeline"] = -1
# -------------------------End : Script Job IDs----------------------------
# ------------------------------------------------------------------------- | agpl-3.0 | 2,335,228,321,807,812,600 | 55.294118 | 118 | 0.595595 | false |
iansealy/projecteuler | optimal/9.py | 1 | 1324 | #!/usr/bin/env python
"""This script solves the Project Euler problem "Special Pythagorean triplet".
The problem is: There exists exactly one Pythagorean triplet for which
a + b + c = 1000. Find the product abc.
"""
from __future__ import division
import math
def main():
"""Special Pythagorean triplet"""
# Constants
SUM = 1000
a, b, c = get_pythagorean_triplet_by_sum(SUM)
print(a * b * c)
def get_pythagorean_triplet_by_sum(s):
"""Get Pythagorean triplet"""
s2 = s // 2
mlimit = int(math.ceil(math.sqrt(s2))) - 1
for m in range(2, mlimit + 1):
if s2 % m == 0:
sm = s2 // m
while sm % 2 == 0:
sm = sm // 2
k = m + 1
if m % 2 == 1:
k = m + 2
while k < 2 * m and k <= sm:
if sm % k == 0 and gcd(k, m) == 1:
d = s2 // (k * m)
n = k - m
a = d * (m * m - n * n)
b = 2 * d * m * n
c = d * (m * m + n * n)
return(a, b, c)
k += 2
return(0, 0, 0)
def gcd(a, b):
"""Get greatest common divisor"""
if a > b:
a, b = b, a
while a:
a, b = b % a, a
return(b)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,416,430,042,208,403,000 | 21.066667 | 78 | 0.428248 | false |
JaneliaSciComp/janelia-parking-manager | ParkingPermit/models.py | 1 | 11541 |
"""
Holds all of the data models for the site for managing campus visitors.
TODO:
Triggers for history so we don't lose old data - document
Load new data - Update to use user defined make/model
Future:
history
Deploy notes:
"""
import datetime
import re
from django.contrib.auth.models import Group, User
from django.db import models
from django.contrib.admin.models import LogEntry
#from django.contrib.localflavor.us.models import USStateField
from django.contrib.localflavor.us.us_states import STATE_CHOICES
from django.core.mail import EmailMultiAlternatives
from general.models import AuditableTable,LookUp
from django.db.models.signals import post_save
from general.utilities import memoize
from django.conf import settings
#fix Django bug, uggh
User._meta.ordering=["username"]
class VehicleType(LookUp):
class Meta:
ordering = ['name']
class VehicleColor(LookUp):
class Meta:
ordering = ['name']
class VehicleMake(LookUp):
class Meta:
ordering = ['name']
class ParkingLocation(LookUp):
class Meta:
ordering = ['name']
class ViolationReason(LookUp):
class Meta:
ordering = ['name']
class LivingArrangement(LookUp):
class Meta:
ordering = ['name']
class Vehicle(AuditableTable):
""" """
vehicle_type = models.ForeignKey(VehicleType)
make = models.ForeignKey(VehicleMake)
model = models.CharField(max_length=500)
class Meta:
unique_together = ("vehicle_type", "make", "model", )
ordering = ['vehicle_type', 'make', 'model']
def __unicode__(self):
ret_str = "%s %s" % (self.make, self.model)
return ret_str
class VehicleRegistration(AuditableTable):
"""This is the main table to record the issue of a parking permit
(registration) for a user/vehicle combination for one parking year.
Note: These records should generally not be deleted since we want to keep a
historical record.
"""
class Meta:
ordering = ['-created_datetime']
user = models.ForeignKey(User, null=True, blank=True)
vehicle = models.ForeignKey(Vehicle, null=True, blank=True,
help_text="If you see your vehicle in the dropdown list, select it. Otherwise <a href='#' id='show_user_vehicle'>click here</a>.")
user_entered_vehicle_make = models.CharField(max_length=50, blank=True)
user_entered_vehicle_model = models.CharField(max_length=50, blank=True)
color = models.ForeignKey(VehicleColor,
help_text="Choose closest matching color from the list.")
license_plate = models.CharField(max_length=20, help_text="Please no spaces or dashes")
#Django hack, update states to include a *foreign option* (instead of using USStateField)
license_plate_state = models.CharField(max_length=2, choices=(('ZZ', '*Non - US*'),) + STATE_CHOICES)
parking_location = models.ForeignKey(ParkingLocation, null=True, blank=True)
current_living_arrangement = models.ForeignKey(LivingArrangement,
verbose_name="Where do you live?")
current_apt_number = models.CharField(max_length=20, blank=True, help_text="Apartment Number (if applicable)")
parking_number = models.CharField(max_length=200, blank=True)
parking_number_year = models.IntegerField(blank=True, null=True)
notes = models.CharField(max_length=500, blank=True)
agree_to_TOS = models.BooleanField("Policy Agreement", blank=False,
help_text="I acknowledge that I have read and understand the <a href='http://wiki/wiki/display/policy/Parking+on+Campus'>rules</a> for parking " \
"on the Janelia Farm Research Campus. I agree to abide by these rules. I understand " \
"that failure to follow these rules may result in loss of parking privileges on campus.")
active = models.BooleanField(default=True, help_text="Uncheck to remove this vehicle.")
#Fields to collect data for non credentialed employees who won't have their own user
#accounts. The parking system gamekeeper will enter their registrations manually into
#the system.
non_cred_first_name = models.CharField(max_length=255, blank=True,
verbose_name="Non-Credentialed User - First Name")
non_cred_last_name = models.CharField(max_length=255, blank=True,
verbose_name="Non-Credentialed User - Last Name")
non_cred_dept_company = models.CharField(max_length=255, blank=True,
verbose_name="Non-Credentialed User - Dept. or Company")
def vehicle_for_display(self):
if self.vehicle:
return str(self.vehicle)
else:
return "%s %s" % (self.user_entered_vehicle_make,self.user_entered_vehicle_model)
def user_display_name(self):
if self.user:
return self.user.get_profile().display_name
else:
return str(self.non_cred_first_name) + ' ' + str(self.non_cred_last_name)
def user_dept_company(self):
if self.user:
return self.user.get_profile().description
else:
return self.non_cred_dept_company
def user_phone_email(self):
if self.user:
return "%s / %s" % (self.user.get_profile().work_phone,
self.user.email)
else:
return ""
def __unicode__(self):
if self.user:
user_str = str(self.user)
else:
user_str = str(self.non_cred_first_name) + ' ' + str(self.non_cred_last_name)
return "%s, %s, Tags: %s %s Parking #: %s" % (
user_str,
self.vehicle,
self.license_plate_state,
self.license_plate,
#self.parking_location, #doesn't get included in selected related so leave out
self.parking_number)
def get_edit_url(self,include_base=False):
url = '/ParkingPermit/vehicleregistration/%s/' % self.id
if include_base:
url = settings.BASE_URL + url
return url
def save(self, *args, **kwargs):
"""Clean up, replace spaces and dashes in license plate"""
if self.license_plate:
self.license_plate = self.license_plate.replace('-','').replace(' ','')
super(VehicleRegistration,self).save(*args, **kwargs)
def send_created_email(self):
"""Send an email when a new registration is added"""
if settings.NOTIFY_NEW_REG:
to = settings.NOTIFY_NEW_REG
message = """\
Greetings,<br><br>
A new vehicle registration has been submitted by %s.<br><br>
Go here to view or edit the request: <br>
<a href="%s">%s</a>
<br><br>
Sincerely,<br><br>
The Janelia Parking Permit Program
""" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))
subject = 'A new parking permit request has been entered'
from_email = '[email protected]'
text_content = re.sub(r'<[^>]+>','',message)
html_content = message
msg = EmailMultiAlternatives(subject, text_content, from_email, to)
msg.attach_alternative(html_content, "text/html")
msg.send()
class Violation(AuditableTable):
""" """
class Meta:
ordering = ['serial_number']
serial_number = models.CharField(max_length=50, unique=True)
vehicle_registration = models.ForeignKey(VehicleRegistration,) # limit_choices_to = {'active': True}) .. would be good but breaks old records:-(
reason = models.ForeignKey(ViolationReason)
location = models.ForeignKey(ParkingLocation, null=True, blank=True)
violation_datetime = models.DateTimeField(blank=True)
notes = models.CharField(max_length=500, blank=True, help_text="Optional notes")
photo = models.ImageField(blank=True, upload_to='violation_photos',
help_text="Optional image of infraction")
def __unicode__(self):
ret_str = "%s / %s - %s" % (self.reason, self.created_datetime,
self.vehicle_registration)
return ret_str
def user_display_name(self):
if self.vehicle_registration.user:
return self.vehicle_registration.user.get_profile().display_name
else:
return str(self.vehicle_registration.non_cred_first_name) + ' ' + str(self.vehicle_registration.non_cred_last_name)
def user_dept_company(self):
if self.vehicle_registration.user:
return self.vehicle_registration.user.get_profile().description
else:
return self.vehicle_registration.non_cred_dept_company
class UserProfile(models.Model):
"""Additional information to be stored with each user"""
# This field is required.
user = models.OneToOneField(User)
work_phone = models.CharField(max_length=255, blank=True)
job_title = models.CharField(max_length=255, blank=True)
department = models.CharField(max_length=255, blank=True)
employee_num = models.CharField(max_length=30)
LDAP_name = models.CharField(max_length=255)
description = models.CharField(max_length=500, blank=True)
company = models.CharField(max_length=500, blank=True)
display_name = models.CharField(max_length=100, blank=True)
room = models.CharField(max_length=100, help_text="Location", blank=True)
is_active_employee = models.BooleanField(default=True)
date_severed = models.DateField(blank=True)
employee_type = models.CharField(max_length=100, blank=True,
choices=(('SR','Shared Resource'),('RESEARCH','Research')))
gender = models.CharField(max_length=100, blank=True,
choices=(('m','Male'),('f','Female')))
def date_joined(self):
return self.user.date_joined
#Make sure a user profile gets created if a user doesn't have one
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
#Use any string with dispatch_uid to prevent signal from being fired once for every
#time the module it imported. Stupid Django bug ...
post_save.connect(create_user_profile, sender=User, dispatch_uid="models.py")
class MyVehicalsProxy(VehicleRegistration):
"""This is a dummy model for a different object view in admin interface
see: http://stackoverflow.com/questions/1861897/django-filtering-or-displaying-a-model-method-in-django-admin
"""
class Meta:
proxy=True
verbose_name = "Registered Vehicle"
verbose_name_plural = "My Registered Vehicles"
ordering = ['-active','vehicle']
class PendingVehicalsProxy(VehicleRegistration):
"""This is a dummy model for a different object view in admin interface
see: http://stackoverflow.com/questions/1861897/django-filtering-or-displaying-a-model-method-in-django-admin
This displays any registrations without a year or parking number.
"""
class Meta:
proxy=True
verbose_name = "Pending Registered Vehicle"
verbose_name_plural = "Pending Registered Vehicles"
ordering = ['-updated_datetime']
class OffboardedVehicalsProxy(VehicleRegistration):
"""This is a dummy model for a different object view in admin interface
see: http://stackoverflow.com/questions/1861897/django-filtering-or-displaying-a-model-method-in-django-admin
This displays any active registrations for employees who have been offboarded.
"""
class Meta:
proxy=True
verbose_name = "Offboarded Employee - Registered Vehicle"
verbose_name_plural = "Offboarded Employees - Registered Vehicles"
ordering = ['-updated_datetime']
| bsd-3-clause | 6,460,956,195,560,898,000 | 40.071174 | 155 | 0.673858 | false |
0x7678/binwalk | src/binwalk/modules/binvis.py | 1 | 10370 | # Generates 3D visualizations of input files.
import os
from binwalk.core.compat import *
from binwalk.core.common import BlockFile
from binwalk.core.module import Module, Option, Kwarg
class Plotter(Module):
'''
Base class for visualizing binaries in Qt.
Other plotter classes are derived from this.
'''
VIEW_DISTANCE = 1024
MAX_2D_PLOT_POINTS = 12500
MAX_3D_PLOT_POINTS = 25000
TITLE = "Binary Visualization"
CLI = [
Option(short='3',
long='3D',
kwargs={'axis' : 3, 'enabled' : True},
description='Generate a 3D binary visualization'),
Option(short='2',
long='2D',
kwargs={'axis' : 2, 'enabled' : True},
description='Project data points onto 3D cube walls only'),
Option(short='Z',
long='points',
type=int,
kwargs={'max_points' : 0},
description='Set the maximum number of plotted data points'),
# Option(short='V',
# long='grids',
# kwargs={'show_grids' : True},
# description='Display the x-y-z grids in the resulting plot'),
]
KWARGS = [
Kwarg(name='axis', default=3),
Kwarg(name='max_points', default=0),
Kwarg(name='show_grids', default=False),
Kwarg(name='enabled', default=False),
]
# There isn't really any useful data to print to console. Disable header and result output.
HEADER = None
RESULT = None
def init(self):
import pyqtgraph.opengl as gl
from pyqtgraph.Qt import QtGui
self.verbose = self.config.verbose
self.offset = self.config.offset
self.length = self.config.length
self.plane_count = -1
self.plot_points = None
if self.axis == 2:
self.MAX_PLOT_POINTS = self.MAX_2D_PLOT_POINTS
self._generate_data_point = self._generate_2d_data_point
elif self.axis == 3:
self.MAX_PLOT_POINTS = self.MAX_3D_PLOT_POINTS
self._generate_data_point = self._generate_3d_data_point
else:
raise Exception("Invalid Plotter axis specified: %d. Must be one of: [2,3]" % self.axis)
if not self.max_points:
self.max_points = self.MAX_PLOT_POINTS
self.app = QtGui.QApplication([])
self.window = gl.GLViewWidget()
self.window.opts['distance'] = self.VIEW_DISTANCE
if len(self.config.target_files) == 1:
self.window.setWindowTitle(self.config.target_files[0].name)
def _print(self, message):
'''
Print console messages. For internal use only.
'''
if self.verbose:
print(message)
def _generate_plot_points(self, data_points):
'''
Generates plot points from a list of data points.
@data_points - A dictionary containing each unique point and its frequency of occurance.
Returns a set of plot points.
'''
total = 0
min_weight = 0
weightings = {}
plot_points = {}
# If the number of data points exceeds the maximum number of allowed data points, use a
# weighting system to eliminate data points that occur less freqently.
if sum(data_points.values()) > self.max_points:
# First, generate a set of weight values 1 - 10
for i in range(1, 11):
weightings[i] = 0
# Go through every data point and how many times that point occurs
for (point, count) in iterator(data_points):
# For each data point, compare it to each remaining weight value
for w in get_keys(weightings):
# If the number of times this data point occurred is >= the weight value,
# then increment the weight value. Since weight values are ordered lowest
# to highest, this means that more frequent data points also increment lower
# weight values. Thus, the more high-frequency data points there are, the
# more lower-frequency data points are eliminated.
if count >= w:
weightings[w] += 1
else:
break
# Throw out weight values that exceed the maximum number of data points
if weightings[w] > self.max_points:
del weightings[w]
# If there's only one weight value left, no sense in continuing the loop...
if len(weightings) == 1:
break
# The least weighted value is our minimum weight
min_weight = min(weightings)
# Get rid of all data points that occur less frequently than our minimum weight
for point in get_keys(data_points):
if data_points[point] < min_weight:
del data_points[point]
for point in sorted(data_points, key=data_points.get, reverse=True):
plot_points[point] = data_points[point]
# Register this as a result in case future modules need access to the raw point information,
# but mark plot as False to prevent the entropy module from attempting to overlay this data on its graph.
self.result(point=point, plot=False)
total += 1
if total >= self.max_points:
break
return plot_points
def _generate_data_point(self, data):
'''
Subclasses must override this to return the appropriate data point.
@data - A string of data self.axis in length.
Returns a data point tuple.
'''
return (0,0,0)
def _generate_data_points(self, fp):
'''
Generates a dictionary of data points and their frequency of occurrance.
@fp - The BlockFile object to generate data points from.
Returns a dictionary.
'''
i = 0
data_points = {}
self._print("Generating data points for %s" % fp.name)
# We don't need any extra data from BlockFile
fp.set_block_size(peek=0)
while True:
(data, dlen) = fp.read_block()
if not data or not dlen:
break
i = 0
while (i+(self.axis-1)) < dlen:
point = self._generate_data_point(data[i:i+self.axis])
if has_key(data_points, point):
data_points[point] += 1
else:
data_points[point] = 1
i += 3
return data_points
def _generate_plot(self, plot_points):
import numpy as np
import pyqtgraph.opengl as gl
nitems = float(len(plot_points))
pos = np.empty((nitems, 3))
size = np.empty((nitems))
color = np.empty((nitems, 4))
i = 0
for (point, weight) in iterator(plot_points):
r = 0.0
g = 0.0
b = 0.0
pos[i] = point
frequency_percentage = (weight / nitems)
# Give points that occur more frequently a brighter color and larger point size.
# Frequency is determined as a percentage of total unique data points.
if frequency_percentage > .010:
size[i] = .20
r = 1.0
elif frequency_percentage > .005:
size[i] = .15
b = 1.0
elif frequency_percentage > .002:
size[i] = .10
g = 1.0
r = 1.0
else:
size[i] = .05
g = 1.0
color[i] = (r, g, b, 1.0)
i += 1
scatter_plot = gl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=False)
scatter_plot.translate(-127.5, -127.5, -127.5)
return scatter_plot
def plot(self, wait=True):
import pyqtgraph.opengl as gl
self.window.show()
if self.show_grids:
xgrid = gl.GLGridItem()
ygrid = gl.GLGridItem()
zgrid = gl.GLGridItem()
self.window.addItem(xgrid)
self.window.addItem(ygrid)
self.window.addItem(zgrid)
# Rotate x and y grids to face the correct direction
xgrid.rotate(90, 0, 1, 0)
ygrid.rotate(90, 1, 0, 0)
# Scale grids to the appropriate dimensions
xgrid.scale(12.8, 12.8, 12.8)
ygrid.scale(12.8, 12.8, 12.8)
zgrid.scale(12.8, 12.8, 12.8)
for fd in iter(self.next_file, None):
data_points = self._generate_data_points(fd)
self._print("Generating plot points from %d data points" % len(data_points))
self.plot_points = self._generate_plot_points(data_points)
del data_points
self._print("Generating graph from %d plot points" % len(self.plot_points))
self.window.addItem(self._generate_plot(self.plot_points))
if wait:
self.wait()
def wait(self):
from pyqtgraph.Qt import QtCore, QtGui
t = QtCore.QTimer()
t.start(50)
QtGui.QApplication.instance().exec_()
def _generate_3d_data_point(self, data):
'''
Plot data points within a 3D cube.
'''
return (ord(data[0]), ord(data[1]), ord(data[2]))
def _generate_2d_data_point(self, data):
'''
Plot data points projected on each cube face.
'''
self.plane_count += 1
if self.plane_count > 5:
self.plane_count = 0
if self.plane_count == 0:
return (0, ord(data[0]), ord(data[1]))
elif self.plane_count == 1:
return (ord(data[0]), 0, ord(data[1]))
elif self.plane_count == 2:
return (ord(data[0]), ord(data[1]), 0)
elif self.plane_count == 3:
return (255, ord(data[0]), ord(data[1]))
elif self.plane_count == 4:
return (ord(data[0]), 255, ord(data[1]))
elif self.plane_count == 5:
return (ord(data[0]), ord(data[1]), 255)
def run(self):
self.plot()
return True
| mit | -8,027,040,989,788,108,000 | 32.451613 | 117 | 0.539923 | false |
dmilith/SublimeText3-dmilith | Packages/pymdownx/st3/pymdownx/mark.py | 1 | 2825 | """
Mark.
pymdownx.mark
Really simple plugin to add support for
<mark>test</mark> tags as ==test==
MIT license.
Copyright (c) 2014 - 2017 Isaac Muse <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import re
from markdown import Extension
from . import util
SMART_CONTENT = r'((?:(?<=\s)=+?(?=\s)|.)+?=*?)'
CONTENT = r'((?:[^=]|(?<!={2})=)+?)'
# ==mark==
MARK = r'(={2})(?!\s)%s(?<!\s)\1' % CONTENT
# ==mark==
SMART_MARK = r'(?:(?<=_)|(?<![\w=]))(={2})(?![\s=])%s(?<!\s)\1(?:(?=_)|(?![\w=]))' % SMART_CONTENT
class MarkProcessor(util.PatternSequenceProcessor):
"""Handle mark patterns."""
PATTERNS = [
util.PatSeqItem(re.compile(MARK, re.DOTALL | re.UNICODE), 'single', 'mark')
]
class MarkSmartProcessor(util.PatternSequenceProcessor):
"""Handle smart mark patterns."""
PATTERNS = [
util.PatSeqItem(re.compile(SMART_MARK, re.DOTALL | re.UNICODE), 'single', 'mark')
]
class MarkExtension(Extension):
"""Add the mark extension to Markdown class."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'smart_mark': [True, "Treat ==connected==words== intelligently - Default: True"]
}
super(MarkExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md):
"""Insert `<mark>test</mark>` tags as `==test==`."""
config = self.getConfigs()
smart = bool(config.get('smart_mark', True))
md.registerExtension(self)
escape_chars = []
escape_chars.append('=')
util.escape_chars(md, escape_chars)
mark = MarkSmartProcessor(r'=') if smart else MarkProcessor(r'=')
md.inlinePatterns.register(mark, "mark", 65)
def makeExtension(*args, **kwargs):
"""Return extension."""
return MarkExtension(*args, **kwargs)
| mit | -617,774,133,755,232,400 | 32.235294 | 111 | 0.662655 | false |
agapow/egas | egas/models.py | 1 | 2568 |
### IMPORTS
from flask_appbuilder import Model
from flask_appbuilder.models.mixins import AuditMixin
from sqlalchemy import Table, ForeignKey, Column, Integer, String, Enum, Float, Text
from sqlalchemy.orm import relationship
#from sqlalchemy import UniqueConstraint
from . import consts
from . import utils
### CODE ###
## Linking table between tags and associations
tag_membership_table = Table ('tag_membership', Model.metadata,
Column ('assoc_id', String(48), ForeignKey ('associations.id')),
Column ('tag_id', Integer, ForeignKey ('tags.id'))
)
def gen_assoc_id (context):
return "%s.%s" % (context.current_parameters['snp_id'],
context.current_parameters['cpg_id'])
class Association (AuditMixin, Model):
"""
A SNP and methylation pairing with statistical support.
"""
__tablename__ = 'associations'
## Properties:
id = Column (String (48), primary_key=True, default=gen_assoc_id)
snp_id = Column (String (16), nullable=False)
snp_locn_chr = Column (Enum (*consts.chromosomes), nullable=False)
snp_locn_posn = Column (Integer, nullable=False)
snp_base_wild = Column (String (1), nullable=False)
snp_base_var = Column (String (1), nullable=False)
cpg_id = Column (String (16), nullable=False)
cpg_locn_chr = Column (Enum (*consts.chromosomes), nullable=False)
cpg_locn_posn = Column (Integer, nullable=False)
stat_beta = Column (Float)
stat_stderr = Column (Float)
stat_pval = Column (Float)
tags = relationship ('Tag', secondary=tag_membership_table, back_populates='associations')
def __repr__(self):
return utils.simple_repr (self, 'id', 'snp_id', 'cpg_id')
class Tag (AuditMixin, Model):
"""
A group of associations, implemented as tagging.
"""
__tablename__ = 'tags'
## Properties:
id = Column (Integer, autoincrement=True, primary_key=True)
title = Column (String (64), nullable=False)
description = Column (Text())
associations = relationship ('Association', secondary=tag_membership_table, back_populates='tags')
def __repr__(self):
return utils.simple_repr (self, 'id', 'title', 'description')
class News (AuditMixin, Model):
"""
News items and updates for the website.
"""
__tablename__ = 'news'
## Properties:
id = Column (Integer, autoincrement=True, primary_key=True)
title = Column (String (64), nullable=False)
body = Column (Text(), nullable=False)
## Accessors:
## Utils:
def __repr__(self):
return utils.simple_repr (self, 'id', 'title', 'body')
### END ###
| mit | 7,409,194,600,156,728,000 | 24.425743 | 101 | 0.669782 | false |
klmitch/nova | nova/tests/functional/compute/test_live_migration.py | 1 | 9085 | # Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_notifier
class FakeCinderError(object):
"""Poor man's Mock because we're stubbing out and not mock.patching. Stubs
out attachment_delete. We keep a raise and call count to simulate a single
volume error while being able to assert that we still got called for all
of an instance's volumes.
"""
def __init__(self):
self.raise_count = 0
self.call_count = 0
def __call__(self, *args, **kwargs):
self.call_count += 1
if self.raise_count == 0:
self.raise_count += 1
raise exception.CinderConnectionFailed(reason='Fake Cinder error')
class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase):
# Default self.api to the self.admin_api as live migration is admin only
ADMIN_API = True
api_major_version = 'v2.1'
microversion = 'latest'
def setUp(self):
super(LiveMigrationCinderFailure, self).setUp()
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
# Start a second compute node (the first one was started for us by
# _IntegratedTestBase. set_nodes() is needed to avoid duplicate
# nodenames. See comments in test_bug_1702454.py.
self.compute2 = self.start_service('compute', host='host2')
def test_live_migrate_attachment_delete_fails(self):
server = self.api.post_server({
'server': {
'flavorRef': 1,
'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'live-migrate-attachment-delete-fail-test',
'networks': 'none',
'block_device_mapping_v2': [
{'boot_index': 0,
'uuid': uuids.broken_volume,
'source_type': 'volume',
'destination_type': 'volume'},
{'boot_index': 1,
'uuid': uuids.working_volume,
'source_type': 'volume',
'destination_type': 'volume'}]}})
server = self._wait_for_state_change(server, 'ACTIVE')
source = server['OS-EXT-SRV-ATTR:host']
if source == self.compute.host:
dest = self.compute2.host
else:
dest = self.compute.host
post = {
'os-migrateLive': {
'host': dest,
'block_migration': False,
}
}
stub_attachment_delete = FakeCinderError()
self.stub_out('nova.volume.cinder.API.attachment_delete',
stub_attachment_delete)
self.api.post_server_action(server['id'], post)
self._wait_for_server_parameter(server,
{'OS-EXT-SRV-ATTR:host': dest,
'status': 'ACTIVE'})
self.assertEqual(2, stub_attachment_delete.call_count)
self.assertEqual(1, stub_attachment_delete.raise_count)
class TestVolAttachmentsDuringLiveMigration(
integrated_helpers._IntegratedTestBase
):
"""Assert the lifecycle of volume attachments during LM rollbacks
"""
# Default self.api to the self.admin_api as live migration is admin only
ADMIN_API = True
microversion = 'latest'
def _setup_compute_service(self):
self._start_compute('src')
self._start_compute('dest')
@mock.patch('nova.virt.fake.FakeDriver.live_migration')
def test_vol_attachments_during_driver_live_mig_failure(self, mock_lm):
"""Assert volume attachments during live migration rollback
* Mock live_migration to always rollback and raise a failure within the
fake virt driver
* Launch a boot from volume instance
* Assert that the volume is attached correctly to the instance
* Live migrate the instance to another host invoking the mocked
live_migration method
* Assert that the instance is still on the source host
* Assert that the original source host volume attachment remains
"""
# Mock out driver.live_migration so that we always rollback
def _fake_live_migration_with_rollback(
context, instance, dest, post_method, recover_method,
block_migration=False, migrate_data=None):
# Just call the recover_method to simulate a rollback
recover_method(context, instance, dest, migrate_data)
# raise test.TestingException here to imitate a virt driver
raise test.TestingException()
mock_lm.side_effect = _fake_live_migration_with_rollback
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server = self._build_server(
name='test_bfv_live_migration_failure', image_uuid='',
networks='none'
)
server['block_device_mapping_v2'] = [{
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
'uuid': volume_id
}]
server = self.api.post_server({'server': server})
self._wait_for_state_change(server, 'ACTIVE')
# Fetch the source host for use later
server = self.api.get_server(server['id'])
src_host = server['OS-EXT-SRV-ATTR:host']
# Assert that the volume is connected to the instance
self.assertIn(
volume_id, self.cinder.volume_ids_for_instance(server['id']))
# Assert that we have an active attachment in the fixture
attachments = self.cinder.volume_to_attachment.get(volume_id)
self.assertEqual(1, len(attachments))
# Fetch the attachment_id for use later once we have migrated
src_attachment_id = list(attachments.keys())[0]
# Migrate the instance and wait until the migration errors out thanks
# to our mocked version of live_migration raising TestingException
self._live_migrate(server, 'error', server_expected_state='ERROR')
# Assert that we called the fake live_migration method
mock_lm.assert_called_once()
# Assert that the instance is on the source
server = self.api.get_server(server['id'])
self.assertEqual(src_host, server['OS-EXT-SRV-ATTR:host'])
# Assert that the src attachment is still present
attachments = self.cinder.volume_to_attachment.get(volume_id)
self.assertIn(src_attachment_id, attachments.keys())
self.assertEqual(1, len(attachments))
class LiveMigrationNeutronInteractionsTest(
integrated_helpers._IntegratedTestBase):
# NOTE(artom) We need the admin API to force the host when booting the test
# server.
ADMIN_API = True
microversion = 'latest'
def _setup_compute_service(self):
self._start_compute('src')
self._start_compute('dest')
def test_live_migrate_vifs_from_info_cache(self):
"""Test that bug 1879787 can no longer manifest itself because we get
the network_info from the instance info cache, and not Neutron.
"""
def stub_notify(context, instance, event_suffix,
network_info=None, extra_usage_info=None, fault=None):
vif = network_info[0]
# Make sure we have the correct VIF (the NeutronFixture
# deterministically uses port_2 for networks=auto) and that the
# profile does not contain `migrating_to`, indicating that we did
# not obtain it from the Neutron API.
self.assertEqual(self.neutron.port_2['id'], vif['id'])
self.assertNotIn('migrating_to', vif['profile'])
server = self._create_server(networks='auto',
host=self.computes['src'].host)
with mock.patch.object(self.computes['src'].manager,
'_notify_about_instance_usage',
side_effect=stub_notify) as mock_notify:
self._live_migrate(server, 'completed')
server = self.api.get_server(server['id'])
self.assertEqual('dest', server['OS-EXT-SRV-ATTR:host'])
# We don't care about call arguments here, we just want to be sure
# our stub actually got called.
mock_notify.assert_called()
| apache-2.0 | 5,602,891,008,159,615,000 | 40.674312 | 79 | 0.623886 | false |
davidcandal/gr-tfg | python/qa_test_mac.py | 1 | 1206 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import tfg_swig as tfg
class qa_test_mac (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_test_mac, "qa_test_mac.xml")
| gpl-3.0 | -4,891,861,112,705,026,000 | 28.414634 | 70 | 0.686567 | false |
PalisadoesFoundation/switchmap-ng | switchmap/test/test_general.py | 1 | 15337 | #!/usr/bin/env python3
"""Test the general module."""
import getpass
import unittest
import random
import os
import sys
import string
import tempfile
import yaml
import shutil
# Try to create a working PYTHONPATH
TEST_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
SWITCHMAP_DIRECTORY = os.path.abspath(os.path.join(TEST_DIRECTORY, os.pardir))
ROOT_DIRECTORY = os.path.abspath(os.path.join(SWITCHMAP_DIRECTORY, os.pardir))
if TEST_DIRECTORY.endswith('/switchmap-ng/switchmap/test') is True:
sys.path.append(ROOT_DIRECTORY)
else:
print(
'This script is not installed in the "switchmap-ng/bin" directory. '
'Please fix.')
sys.exit(2)
from switchmap.utils import general
from switchmap import switchmap
class KnownValues(unittest.TestCase):
"""Checks all functions and methods."""
#########################################################################
# General object setup
#########################################################################
# Required
maxDiff = None
random_string = ''.join([random.choice(
string.ascii_letters + string.digits) for n in range(9)])
def test_cli_help(self):
"""Testing method / function cli_help."""
pass
def test_systemd_daemon(self):
"""Testing function systemd_daemon."""
pass
def test_systemd_exists(self):
"""Testing function systemd_exists."""
# Get result for unknown service
agent_name = self.random_string
result = general.systemd_exists(agent_name)
self.assertEqual(result, False)
def test_check_sudo(self):
"""Testing function check_sudo."""
# Test with sudo variable set
result = 'SUDO_UID' in os.environ
self.assertEqual(result, False)
# Test with sudo variable set
os.environ['SUDO_UID'] = getpass.getuser()
with self.assertRaises(SystemExit):
general.check_sudo()
def test_check_user(self):
"""Testing function check_user."""
pass
def test_root_directory(self):
"""Testing method / function root_directory."""
# Initializing key variables
# Determine root directory for switchmap
switchmap_dir = switchmap.__path__[0]
components = switchmap_dir.split(os.sep)
# Determine root directory 2 levels above
root_dir = os.sep.join(components[0:-2])
result = general.root_directory()
self.assertEqual(result, root_dir)
def test_get_hosts(self):
"""Testing method / function get_hosts."""
# Initializing key variables
pass
def test_read_yaml_file(self):
"""Testing method / function read_yaml_file."""
# Initializing key variables
dict_1 = {
'key1': 1,
'key2': 2,
'key3': 3,
'key4': 4,
}
# Create temp file with known data
directory = tempfile.mkdtemp()
file_data = [
(('{}/file_1.yaml').format(directory), dict_1)
]
for item in file_data:
filename = item[0]
data_dict = item[1]
with open(filename, 'w') as filehandle:
yaml.dump(data_dict, filehandle, default_flow_style=False)
# Get Results
result = general.read_yaml_file(filename)
# Test equivalence
for key in result.keys():
self.assertEqual(data_dict[key], result[key])
# Clean up
filelist = [
next_file for next_file in os.listdir(
directory) if next_file.endswith('.yaml')]
for delete_file in filelist:
delete_path = ('{}/{}').format(directory, delete_file)
os.remove(delete_path)
os.removedirs(directory)
def test_read_yaml_files(self):
"""Testing method / function read_yaml_files."""
# Initializing key variables
dict_1 = {
'key1': 1,
'key2': 2,
'key3': 3,
'key4': 4,
}
dict_2 = {
'key6': 6,
'key7': 7,
}
dict_3 = {}
# Populate a third dictionary with contents of other dictionaries.
for key, value in dict_1.items():
dict_3[key] = value
for key, value in dict_2.items():
dict_3[key] = value
# Create temp file with known data
directory = tempfile.mkdtemp()
filenames = {
('%s/file_1.yaml') % (directory): dict_1,
('%s/file_2.yaml') % (directory): dict_2
}
for filename, data_dict in filenames.items():
with open(filename, 'w') as filehandle:
yaml.dump(data_dict, filehandle, default_flow_style=False)
# Get Results
result = general.read_yaml_files([directory])
# Clean up
for key in result.keys():
self.assertEqual(dict_3[key], result[key])
filelist = [
next_file for next_file in os.listdir(
directory) if next_file.endswith('.yaml')]
for delete_file in filelist:
delete_path = ('%s/%s') % (directory, delete_file)
os.remove(delete_path)
os.removedirs(directory)
def test_run_script(self):
"""Testing method / function run_script."""
# Initializing key variables
pass
def test_delete_files(self):
"""Testing method / function delete_files."""
# Testing with a known invalid directory
directory = self.random_string
with self.assertRaises(SystemExit):
general.delete_files(directory)
# Creating temporary yaml and json files for testing
directory = tempfile.mkdtemp()
testfiles = ['test1.yaml', 'test2.yaml', 'test3.json']
for filename in testfiles:
filepath = '{}/{}'.format(directory, filename)
open(filepath, 'a').close()
# Testing if all yaml files were created
count = len([name for name in os.listdir(
directory) if name.endswith('.yaml')])
self.assertEqual(count, 2)
# Test if json file was created
jcount = len([name for name in os.listdir(
directory) if name.endswith('.json')])
self.assertEqual(jcount, 1)
# Deleting all yaml files using function
general.delete_files(directory)
# Test if all yaml files were deleted
result = len([name for name in os.listdir(
directory) if name.endswith('.yaml')])
self.assertEqual(result, 0)
# Test if json file was not deleted
jcount = len([name for name in os.listdir(
directory) if name.endswith('.json')])
self.assertEqual(jcount, 1)
# Delete json file
general.delete_files(directory, extension='.json')
# Test if json file was deleted
jcount = len([name for name in os.listdir(
directory) if name.endswith('.json')])
self.assertEqual(jcount, 0)
# Removing test directory
os.removedirs(directory)
# Test if directory has been deleted
self.assertEqual(os.path.isdir(directory), False)
def test_config_directories(self):
"""Testing method / function config_directories."""
# Initializing key variables
# Initialize key variables
save_directory = None
if 'SWITCHMAP_CONFIGDIR' in os.environ:
save_directory = os.environ['SWITCHMAP_CONFIGDIR']
# Try with no SWITCHMAP_CONFIGDIR
os.environ.pop('SWITCHMAP_CONFIGDIR', None)
directory = '{}/etc'.format(general.root_directory())
result = general.config_directories()
self.assertEqual(result, [directory])
# Test with SWITCHMAP_CONFIGDIR set
directory = tempfile.mkdtemp()
os.environ['SWITCHMAP_CONFIGDIR'] = directory
result = general.config_directories()
self.assertEqual(result, [directory])
# Restore state
if save_directory is not None:
os.environ['SWITCHMAP_CONFIGDIR'] = save_directory
def test_search_file(self):
"""Testing method / function search_file."""
# Initializing key variables
result = general.search_file('cp')
self.assertEqual(result, '/bin/cp')
def test_move_files(self):
"""Testing method / function move_files."""
# Initialize key variables
source_filenames = {}
target_filenames = {}
#################################################
# Test with invalid source directory
#################################################
invalid_path = ('/tmp/%s.%s') % (
self.random_string,
self.random_string)
with self.assertRaises(SystemExit):
general.move_files(invalid_path, '/tmp')
#################################################
# Test with invalid destination directory
#################################################
invalid_path = ('/tmp/%s.%s') % (
self.random_string,
self.random_string)
with self.assertRaises(SystemExit):
general.move_files('/tmp', invalid_path)
#################################################
# Test with valid directory
#################################################
# Create a source directory
source_dir = ('/tmp/%s.1') % (self.random_string)
if os.path.exists(source_dir) is False:
os.makedirs(source_dir)
# Create a target directory
target_dir = ('/tmp/%s.2') % (self.random_string)
if os.path.exists(target_dir) is False:
os.makedirs(target_dir)
# Place files in the directory
for count in range(0, 4):
filename = ''.join([random.choice(
string.ascii_letters + string.digits) for n in range(15)])
source_filenames[count] = ('%s/%s') % (source_dir, filename)
target_filenames[count] = ('%s/%s') % (target_dir, filename)
open(source_filenames[count], 'a').close()
# Check files in directory
self.assertEqual(os.path.isfile(source_filenames[count]), True)
# Delete files in directory
general.move_files(source_dir, target_dir)
# Check that files are not in source_dir
for filename in source_filenames.values():
self.assertEqual(os.path.isfile(filename), False)
# Check that files are in in target_dir
for filename in target_filenames.values():
self.assertEqual(os.path.isfile(filename), True)
# Delete directory
shutil.rmtree(source_dir)
# Delete directory
shutil.rmtree(target_dir)
def test_create_yaml_file(self):
"""Testing method / function create_yaml_file."""
# Initializing key variables
pass
def test_dict2yaml(self):
"""Testing method / function dict2yaml."""
# Initializing key variables
data_dict = {
'1': 'test 1',
'two': 'test 2'
}
data_yaml = """'1': test 1
two: test 2
"""
# Do test with good dict
yaml_result = general.dict2yaml(data_dict)
self.assertEqual(yaml_result, data_yaml)
def test_delete_file(self):
"""Test function delete_file."""
# Testing with a known invalid directory
directory = self.random_string
with self.assertRaises(SystemExit):
general.delete_files(directory)
# Creating temporary yaml and json files to test with
directory = tempfile.mkdtemp()
filenames = ['test1.yaml', 'test2.yaml', 'test3.json']
for filename in filenames:
filepath = '{}/{}'.format(directory, filename)
open(filepath, 'a').close()
# Testing if all files were created
yamlcount = len([name for name in os.listdir(
directory) if name.endswith('.yaml')])
self.assertEqual(yamlcount, 2)
jsoncount = len([name for name in os.listdir(
directory) if name.endswith('.json')])
self.assertEqual(jsoncount, 1)
# Testing if all json files are deleted
general.delete_files(directory, extension='.json')
result = len([name for name in os.listdir(
directory) if name.endswith('.json')])
self.assertEqual(result, 0)
# Testing if all yaml files are deleted
general.delete_files(directory, extension='.yaml')
result = len([name for name in os.listdir(
directory) if name.endswith('.yaml')])
self.assertEqual(result, 0)
# Removing test directory
os.removedirs(directory)
# Test if directory has been deleted
self.assertEqual(os.path.isdir(directory), False)
def test_delete_yaml_files(self):
"""Test function delete_yaml_files."""
# Testing with a known invalid directory
directory = self.random_string
with self.assertRaises(SystemExit):
general.delete_files(directory)
# Creating temporary yaml and json files for testing
directory = tempfile.mkdtemp()
testfiles = ['test1.yaml', 'test2.yaml', 'test3.json']
for filename in testfiles:
filepath = '{}/{}'.format(directory, filename)
open(filepath, 'a').close()
# Testing if all yaml files were created
count = len([name for name in os.listdir(
directory) if name.endswith('.yaml')])
self.assertEqual(count, 2)
# Test if json file was created
jcount = len([name for name in os.listdir(
directory) if name.endswith('.json')])
self.assertEqual(jcount, 1)
# Deleting all yaml files using function
general.delete_yaml_files(directory)
# Test if all yaml files were deleted
result = len([name for name in os.listdir(
directory) if name.endswith('.yaml')])
self.assertEqual(result, 0)
# Test if json file was not deleted
jcount = len([name for name in os.listdir(
directory) if name.endswith('.json')])
self.assertEqual(jcount, 1)
# Delete json file
general.delete_files(directory, extension='.json')
# Test if json file was deleted
jcount = len([name for name in os.listdir(
directory) if name.endswith('.json')])
self.assertEqual(jcount, 0)
# Removing test directory
os.removedirs(directory)
# Test if directory has been deleted
self.assertEqual(os.path.isdir(directory), False)
def test_cleanstring(self):
"""Testing method / function cleanstring."""
# Initializing key variables
dirty_string = (' %s\n \r %s \n %s ') % (
self.random_string, self.random_string, self.random_string)
clean_string = ('%s %s %s') % (
self.random_string, self.random_string, self.random_string)
# Test result
result = general.cleanstring(dirty_string)
self.assertEqual(result, clean_string)
if __name__ == '__main__':
# Do the unit test
unittest.main()
| apache-2.0 | -1,092,549,414,544,443,000 | 32.341304 | 78 | 0.569407 | false |
edonyM/toolkitem | fileprocess/emgui/filebrowser.py | 1 | 14274 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - [email protected]
#
# twitter : @edonyzpc
#
# Last modified: 2015-07-03 21:38
#
# Filename: project_file_browser.py
#
# Description: All Rights Are Reserved
#
"""
#import scipy as sp
#import math as m
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D as Ax3
#from scipy import stats as st
#from matplotlib import cm
#import numpy as np
import os
import sys
if sys.version.startswith('3.4'):
import tkinter
else:
import Tkinter as tkinter
import tkMessageBox
import shutil
from packages.filesline.getdirections import GetDirections as GD
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;31m'
self.tipcolor = '\033[0;32m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self, color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
class GUI(tkinter.Frame):
def __init__(self, root):
tkinter.Frame.__init__(self, root, background="white")
self.root = root
self.path = ""
self.files = {}
self.pc_file = ""
self.stl_file = ""
self.igs_file = ""
self.init_gui()
self.pack(fill=tkinter.BOTH, expand=1)
def get_files(self, path=None):
if path:
path = GD.normal_path(path)
files_list = GD(path)
else:
self.path = GD.normal_path(self.path)
files_list = GD(self.path)
files_list.get_dir()
files_list.all_files()
self.files = files_list.files
def update_files(self, path):
if self.path != path:
self.get_files(path)
else:
self.get_files(self.path)
def update_listbox(self):
self.lsbox_pc.delete(0, self.lsbox_pc.size())
self.lsbox_stl.delete(0, self.lsbox_stl.size())
self.lsbox_igs.delete(0, self.lsbox_igs.size())
for file in self.files:
for name in self.files[file]:
if name.endswith(".sp"):
self.lsbox_pc.insert(tkinter.END, name)
if name.endswith(".stl"):
self.lsbox_stl.insert(tkinter.END, name)
if name.endswith(".igs"):
self.lsbox_igs.insert(tkinter.END, name)
def update_listbox_search(self, search_txt):
self.lsbox_pc.delete(0, self.lsbox_pc.size())
for file in self.files:
for name in self.files[file]:
if name.endswith(".sp") and search_txt in name:
self.lsbox_pc.insert(tkinter.END, name)
def match_files(self, match_re):
self.lsbox_stl.delete(0, self.lsbox_stl.size())
self.lsbox_igs.delete(0, self.lsbox_igs.size())
for file in self.files:
for name in self.files[file]:
if name.startswith(match_re+".") and name.endswith("stl"):
self.lsbox_stl.insert(tkinter.END, name)
if name.startswith(match_re+".") and name.endswith("igs"):
self.lsbox_igs.insert(tkinter.END, name)
def full_path_file(self, name):
for path, item in self.files.items():
if name in item:
return path + "/" + name
def init_gui(self):
#self.get_files()
# main frame
self.frame_top = tkinter.Frame(self.root, height=400, width=800, background="black")
self.frame_top.pack(side=tkinter.TOP, fill=tkinter.BOTH)
self.frame_bottom = tkinter.Frame(self.root, height=100, width=400, background="black")
self.frame_bottom.pack(side=tkinter.RIGHT, fill=tkinter.BOTH)
self.frame_bottom_l = tkinter.Frame(self.root, height=100, width=400, background="black")
self.frame_bottom_l.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
# labelframe of bottom frame
self.labframe = tkinter.LabelFrame(self.frame_bottom, text="Console",
height=50, width=400, background="white")
self.labframe.pack(side=tkinter.RIGHT, fill=tkinter.BOTH, expand=1)
# labelframel of bottom frame
self.labframel = tkinter.LabelFrame(self.frame_bottom_l, text="Enter",
height=50, width=400, background="white")
self.labframel.pack(side=tkinter.LEFT, fill=tkinter.BOTH, expand=1)
# labelframe of top frame
self.labframe_bottom = tkinter.LabelFrame(self.frame_top, text="Point Cloud",
height=400, width=800, background="cyan")
self.labframe_bottom.pack(side=tkinter.BOTTOM, fill=tkinter.BOTH)
self.labframe_left = tkinter.LabelFrame(self.frame_top, text="STL",
height=400, width=400, background="cyan")
self.labframe_left.pack(side=tkinter.LEFT, fill=tkinter.BOTH, expand=1)
self.labframe_right = tkinter.LabelFrame(self.frame_top, text="IGS",
height=400, width=400, background="cyan")
self.labframe_right.pack(side=tkinter.RIGHT, fill=tkinter.BOTH, expand=1)
# message
# message of labframe
txt = tkinter.StringVar()
msm_status = tkinter.Message(self.labframe, textvariable=txt, width=200, background="white")
msm_status.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
txt.set("FILE MANAGEMENT START...")
# button
# quit button
quit_button = tkinter.Button(self.labframe, text="Browser", relief=tkinter.SUNKEN,
fg="blue", height=50,
activebackground="green", command=self.root.destroy)
quit_button.pack(side=tkinter.RIGHT, fill=tkinter.BOTH, expand=1)
# entry
# entry of labframe
## enter event handler
def getin(content):
content = enter.get()
self.path = content
self.update_files(content)
self.update_listbox()
txt.set(enter.get())
enter_str = tkinter.StringVar()
enter = tkinter.Entry(self.labframel, textvariable=enter_str, width=400, background="red")
enter.pack(side=tkinter.TOP, fill=tkinter.BOTH)
enter.bind("<Return>", getin)
def rmpcfile(event):
if tkMessageBox.askokcancel("Remove", "Are you sure to remove the file?"):
event = self.lsbox_pc.get(self.lsbox_pc.curselection())
remove_file = self.full_path_file(event)
os.remove(remove_file)
self.get_files()
self.update_listbox()
def rmstlfile(event):
if tkMessageBox.askokcancel("Remove", "Are you sure to remove the file?"):
event = self.lsbox_stl.get(self.lsbox_stl.curselection())
remove_file = self.full_path_file(event)
os.remove(remove_file)
self.get_files()
self.update_listbox()
def rmigsfile(event):
if tkMessageBox.askokcancel("Remove", "Are you sure to remove the file?"):
event = self.lsbox_igs.get(self.lsbox_igs.curselection())
remove_file = self.full_path_file(event)
os.remove(remove_file)
self.get_files()
self.update_listbox()
def addfile(evect):
topdlg = tkinter.Toplevel(self.root)
topdlg.title("Add Files")
topdlg.geometry("250x80+300+200")
def mvfile():
if self.path:
event = enter_add_file.get()
print(event)
filename = event[event.rfind("/"):]
shutil.move(event, self.path+"/"+filename)
self.get_files()
self.update_listbox()
topdlg.destroy()
else:
txt.set("Please Set The Root Path")
topdlg.destroy()
enter_add_file = tkinter.Entry(topdlg, width=250)
label = tkinter.Label(topdlg, text="New File Name With Path", width=250, anchor="w", justify="left")
label.pack(side=tkinter.TOP)
#enter_add_file.bind("<Retrun>", mvfile)
enter_add_file.pack()
button_add_file = tkinter.Button(topdlg, text="Add Single File", command=mvfile)
button_add_file.pack(side=tkinter.LEFT)
button_add_file_list = tkinter.Button(topdlg, text="Add Multiple File", command=mvfile)
button_add_file_list.pack(side=tkinter.RIGHT)
# listbox
# listbox of point cloud labelframe
## lsbox_pc event handler
def selectpcfile(event):
event = self.lsbox_pc.get(self.lsbox_pc.curselection())
name_without = event.split(".")[0]
self.match_files(name_without)
self.pc_file = self.full_path_file(event)
txt.set(self.lsbox_pc.get(self.lsbox_pc.curselection()))
txt.set(self.pc_file)
self.lsbox_pc = tkinter.Listbox(self.labframe_bottom,
selectmode=tkinter.BROWSE, background="yellow")
self.lsbox_pc.bind("<Double-Button-1>", selectpcfile)
self.lsbox_pc.bind("<Double-Button-3>", rmpcfile)
self.lsbox_pc.bind("<Button-2>", addfile)
for file in self.files:
for name in self.files[file]:
if name.endswith(".sp"):
self.lsbox_pc.insert(tkinter.END, name)
self.lsbox_pc.pack(side=tkinter.LEFT, fill=tkinter.BOTH, expand=1)
## entry for lsbox_pc search
### enter for lsbox_pc event handler
def getsearch(content):
content = enter_pc.get()
self.update_listbox_search(content)
txt.set(enter_pc.get())
enter_str_pc = tkinter.StringVar()
enter_pc = tkinter.Entry(self.labframe, textvariable=enter_str_pc, background="cyan")
enter_pc.pack(side=tkinter.BOTTOM, fill=tkinter.BOTH, expand=1)
enter_pc.bind("<Return>", getsearch)
# listbox of STL labelframe
def selectstlfile(event):
event = self.lsbox_stl.get(self.lsbox_stl.curselection())
self.stl_file = self.full_path_file(event)
txt.set(self.stl_file)
self.lsbox_stl = tkinter.Listbox(self.labframe_left,
selectmode=tkinter.BROWSE, background="yellow")
self.lsbox_stl.bind("<Double-Button-1>", selectstlfile)
self.lsbox_stl.bind("<Double-Button-3>", rmstlfile)
for file in self.files:
for name in self.files[file]:
if name.endswith(".stl"):
self.lsbox_stl.insert(tkinter.END, name)
self.lsbox_stl.pack(side=tkinter.TOP, fill=tkinter.BOTH)
# listbox of IGS labelframe
def selectigsfile(event):
event = self.lsbox_igs.get(self.lsbox_igs.curselection())
self.igs_file = self.full_path_file(event)
txt.set(self.igs_file)
self.lsbox_igs = tkinter.Listbox(self.labframe_right,
selectmode=tkinter.BROWSE, background="yellow")
self.lsbox_igs.bind("<Double-Button-1>", selectigsfile)
self.lsbox_igs.bind("<Double-Button-3>", rmigsfile)
for file in self.files:
for name in self.files[file]:
if name.endswith(".igs"):
self.lsbox_igs.insert(tkinter.END, name)
self.lsbox_igs.pack(side=tkinter.TOP, fill=tkinter.BOTH)
if __name__ == "__main__":
WIN = tkinter.Tk()
WIN.geometry("800x450+200+300")
WIN.title("File Manager")
# root.resizable(width=False, height=False)
GUI = GUI(WIN)
WIN.mainloop()
STL = GUI.stl_file
IGS = GUI.igs_file
#print(STL)
#print(IGS)
| mit | -9,219,411,283,376,683,000 | 41.230769 | 112 | 0.519756 | false |
dropbox/changes | tests/changes/models/test_testresult.py | 1 | 14753 | from base64 import b64encode
import mock
from changes.constants import Result
from changes.lib.artifact_store_mock import ArtifactStoreMock
from changes.models.failurereason import FailureReason
from changes.models.itemstat import ItemStat
from changes.models.testresult import TestResult, TestResultManager, logger
from changes.testutils.cases import TestCase
def _stat(jobstep, name):
id = jobstep.id
return ItemStat.query.filter_by(name=name, item_id=id)[0].value
class TestResultManagerTestCase(TestCase):
@mock.patch('changes.models.testresult.ArtifactStoreClient', ArtifactStoreMock)
@mock.patch('changes.storage.artifactstore.ArtifactStoreClient', ArtifactStoreMock)
def test_simple(self):
from changes.models.test import TestCase
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
jobphase = self.create_jobphase(job)
jobstep = self.create_jobstep(jobphase)
artifact = self.create_artifact(jobstep, 'junit.xml')
results = [
TestResult(
step=jobstep,
name='test_bar',
package='tests.changes.handlers.test_xunit',
result=Result.failed,
message='collection failed',
duration=156,
artifacts=[{
'name': 'artifact_name',
'type': 'text',
'base64': b64encode('sample content')}]),
TestResult(
step=jobstep,
name='test_foo',
package='tests.changes.handlers.test_coverage',
result=Result.passed,
message='foobar passed',
duration=12,
reruns=1,
),
]
manager = TestResultManager(jobstep, artifact)
manager.save(results)
testcase_list = sorted(TestCase.query.all(), key=lambda x: x.name)
assert len(testcase_list) == 2
for test in testcase_list:
assert test.job_id == job.id
assert test.step_id == jobstep.id
assert test.project_id == project.id
assert testcase_list[0].name == 'tests.changes.handlers.test_coverage.test_foo'
assert testcase_list[0].result == Result.passed
assert testcase_list[0].message == 'foobar passed'
assert testcase_list[0].duration == 12
assert testcase_list[0].reruns == 1
assert testcase_list[1].name == 'tests.changes.handlers.test_xunit.test_bar'
assert testcase_list[1].result == Result.failed
assert testcase_list[1].message == 'collection failed'
assert testcase_list[1].duration == 156
assert testcase_list[1].reruns is 0
testartifacts = testcase_list[1].artifacts
assert len(testartifacts) == 1
assert testartifacts[0].file.get_file().read() == 'sample content'
assert _stat(jobstep, 'test_count') == 2
assert _stat(jobstep, 'test_failures') == 1
assert _stat(jobstep, 'test_duration') == 168
assert _stat(jobstep, 'test_rerun_count') == 1
failures = FailureReason.query.filter_by(step_id=jobstep.id).all()
assert failures == []
@mock.patch('changes.models.testresult.ArtifactStoreClient', ArtifactStoreMock)
@mock.patch('changes.storage.artifactstore.ArtifactStoreClient', ArtifactStoreMock)
def test_bad_duration(self):
from changes.models.test import TestCase
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
jobphase = self.create_jobphase(job)
jobstep = self.create_jobstep(jobphase)
artifact = self.create_artifact(jobstep, 'junit.xml')
results = [
TestResult(
step=jobstep,
name='test_bar',
package='tests.changes.handlers.test_xunit',
result=Result.failed,
message='collection failed',
duration=2147483647 * 2,
artifacts=[{
'name': 'artifact_name',
'type': 'text',
'base64': b64encode('sample content')}]),
]
manager = TestResultManager(jobstep, artifact)
with mock.patch.object(logger, 'warning') as warn:
manager.save(results)
assert warn.called
testcase_list = TestCase.query.all()
assert len(testcase_list) == 1
for test in testcase_list:
assert test.job_id == job.id
assert test.step_id == jobstep.id
assert test.project_id == project.id
assert testcase_list[0].name == 'tests.changes.handlers.test_xunit.test_bar'
assert testcase_list[0].result == Result.failed
assert testcase_list[0].message == 'collection failed'
assert testcase_list[0].duration == 0
@mock.patch('changes.models.testresult.ArtifactStoreClient', ArtifactStoreMock)
@mock.patch('changes.storage.artifactstore.ArtifactStoreClient', ArtifactStoreMock)
def test_duplicate_tests_in_same_result_list(self):
from changes.models.test import TestCase
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
jobphase = self.create_jobphase(job)
jobstep = self.create_jobstep(jobphase, label='STEP1')
artifact = self.create_artifact(jobstep, 'junit.xml')
results = [
TestResult(
step=jobstep,
name='test_foo',
package='project.tests',
result=Result.passed,
duration=12,
reruns=0,
artifacts=[{
'name': 'artifact_name',
'type': 'text',
'base64': b64encode('first artifact')}],
message_offsets=[('system-out', 123, 10)],
),
TestResult(
step=jobstep,
name='test_bar',
package='project.tests',
result=Result.passed,
duration=13,
reruns=0,
),
TestResult(
step=jobstep,
name='test_foo',
package='project.tests',
result=Result.passed,
duration=11,
reruns=0,
artifacts=[{
'name': 'artifact_name',
'type': 'text',
'base64': b64encode('second artifact')}],
message_offsets=[('system-err', 555, 25)],
),
]
manager = TestResultManager(jobstep, artifact)
manager.save(results)
testcase_list = sorted(TestCase.query.all(), key=lambda x: x.name)
assert len(testcase_list) == 2
for test in testcase_list:
assert test.job_id == job.id
assert test.step_id == jobstep.id
assert test.project_id == project.id
assert testcase_list[0].name == 'project.tests.test_bar'
assert testcase_list[0].result == Result.passed
assert testcase_list[0].message is None
assert testcase_list[0].duration == 13
assert testcase_list[0].reruns == 0
assert len(testcase_list[0].artifacts) == 0
assert len(testcase_list[0].messages) == 0
assert testcase_list[1].name == 'project.tests.test_foo'
assert testcase_list[1].result == Result.failed
assert testcase_list[1].message.startswith('Error: Duplicate Test')
assert testcase_list[1].message.endswith('\nSTEP1\n')
assert testcase_list[1].duration == 12
assert testcase_list[1].reruns == 0
testartifacts = testcase_list[1].artifacts
assert len(testartifacts) == 2
a1 = testartifacts[0].file.get_file().read()
a2 = testartifacts[1].file.get_file().read()
assert {a1, a2} == {'first artifact', 'second artifact'}
testmessages = testcase_list[1].messages
assert len(testmessages) == 2
assert testmessages[0].artifact == artifact
assert testmessages[0].label == 'system-out'
assert testmessages[0].start_offset == 123
assert testmessages[0].length == 10
assert testmessages[1].artifact == artifact
assert testmessages[1].label == 'system-err'
assert testmessages[1].start_offset == 555
assert testmessages[1].length == 25
assert _stat(jobstep, 'test_count') == 2
assert _stat(jobstep, 'test_failures') == 1
assert _stat(jobstep, 'test_duration') == 25
assert _stat(jobstep, 'test_rerun_count') == 0
failures = FailureReason.query.filter_by(step_id=jobstep.id).all()
assert len(failures) == 1
assert failures[0].reason == 'duplicate_test_name'
@mock.patch('changes.models.testresult.ArtifactStoreClient', ArtifactStoreMock)
@mock.patch('changes.storage.artifactstore.ArtifactStoreClient', ArtifactStoreMock)
def test_duplicate_tests_in_different_result_lists(self):
from changes.models.test import TestCase
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
jobphase = self.create_jobphase(job)
jobstep = self.create_jobstep(jobphase, label='STEP1')
artifact = self.create_artifact(jobstep, 'junit.xml')
results = [
TestResult(
step=jobstep,
name='test_foo',
package='project.tests',
result=Result.passed,
duration=12,
reruns=0,
artifacts=[{
'name': 'one_artifact',
'type': 'text',
'base64': b64encode('first artifact')}]
),
TestResult(
step=jobstep,
name='test_bar',
package='project.tests',
result=Result.passed,
duration=13,
reruns=0,
),
]
manager = TestResultManager(jobstep, artifact)
manager.save(results)
testcase_list = sorted(TestCase.query.all(), key=lambda x: x.name)
assert len(testcase_list) == 2
for test in testcase_list:
assert test.job_id == job.id
assert test.step_id == jobstep.id
assert test.project_id == project.id
assert testcase_list[0].name == 'project.tests.test_bar'
assert testcase_list[0].result == Result.passed
assert testcase_list[0].message is None
assert testcase_list[0].duration == 13
assert testcase_list[0].reruns == 0
assert len(testcase_list[0].artifacts) == 0
assert testcase_list[1].name == 'project.tests.test_foo'
assert testcase_list[1].result == Result.passed
assert testcase_list[1].message is None
assert testcase_list[1].duration == 12
assert testcase_list[1].reruns == 0
testartifacts = testcase_list[1].artifacts
assert len(testartifacts) == 1
a1 = testartifacts[0].file.get_file().read()
assert a1 == 'first artifact'
assert _stat(jobstep, 'test_count') == 2
assert _stat(jobstep, 'test_failures') == 0
assert _stat(jobstep, 'test_duration') == 25
assert _stat(jobstep, 'test_rerun_count') == 0
jobstep2 = self.create_jobstep(jobphase, label='STEP2')
artifact2 = self.create_artifact(jobstep2, 'junit.xml')
results = [
TestResult(
step=jobstep2,
name='test_foo',
package='project.tests',
result=Result.passed,
duration=11,
reruns=0,
artifacts=[{
'name': 'another_artifact',
'type': 'text',
'base64': b64encode('second artifact')}]
),
TestResult(
step=jobstep2,
name='test_baz',
package='project.tests',
result=Result.passed,
duration=18,
reruns=2,
),
]
manager = TestResultManager(jobstep2, artifact2)
manager.save(results)
testcase_list = sorted(TestCase.query.all(), key=lambda x: x.name)
assert len(testcase_list) == 3
for test in testcase_list:
assert test.job_id == job.id
assert test.project_id == project.id
assert testcase_list[0].step_id == jobstep.id
assert testcase_list[0].name == 'project.tests.test_bar'
assert testcase_list[0].result == Result.passed
assert testcase_list[0].message is None
assert testcase_list[0].duration == 13
assert testcase_list[0].reruns == 0
assert testcase_list[1].step_id == jobstep2.id
assert testcase_list[1].name == 'project.tests.test_baz'
assert testcase_list[1].result == Result.passed
assert testcase_list[1].message is None
assert testcase_list[1].duration == 18
assert testcase_list[1].reruns == 2
assert testcase_list[2].step_id == jobstep.id
assert testcase_list[2].name == 'project.tests.test_foo'
assert testcase_list[2].result == Result.failed
assert testcase_list[2].message.startswith('Error: Duplicate Test')
assert testcase_list[2].message.endswith('\nSTEP1\nSTEP2\n')
assert testcase_list[2].duration == 12
assert testcase_list[2].reruns == 0
testartifacts = testcase_list[2].artifacts
assert len(testartifacts) == 2
a1 = testartifacts[0].file.get_file().read()
a2 = testartifacts[1].file.get_file().read()
assert {a1, a2} == {'first artifact', 'second artifact'}
# Stats for original step are unharmed:
assert _stat(jobstep, 'test_count') == 2
assert _stat(jobstep, 'test_failures') == 1
assert _stat(jobstep, 'test_duration') == 25
assert _stat(jobstep, 'test_rerun_count') == 0
# Stats for new step:
assert _stat(jobstep2, 'test_count') == 1
assert _stat(jobstep2, 'test_failures') == 0
assert _stat(jobstep2, 'test_duration') == 18
assert _stat(jobstep2, 'test_rerun_count') == 1
failures = FailureReason.query.filter_by(step_id=jobstep.id).all()
assert len(failures) == 0
failures = FailureReason.query.filter_by(step_id=jobstep2.id).all()
assert len(failures) == 1
assert failures[0].reason == 'duplicate_test_name'
| apache-2.0 | 8,390,092,016,119,533,000 | 37.121447 | 87 | 0.576696 | false |
DoraemonShare/yuqing | src/utils/utils.py | 1 | 4066 | #-*-coding:utf-8 -*-
cartypeMap = {
'"******************"': '"******************"',
'"******************"': '"******************"',
'"******************"': '"******************"',
'"******************"': '"******************"'
}
#目前舆情分析中关注的列,列名来自postgresql数据表
concern_colcumns = ['topic', 'publicationDate', 'replyNum',
'clicks', 'postUrl', 'postContent', 'qaClassification', 'postType']
#excel的表头
EXCEL_HEADER = [u'贴名', u'发帖日期', u'回复数', u'点击数', u'贴URL', u'正文', u'算法标注结果']
not_concern_postType = ['icon_01', 'icon_02', 'icon_03', 'icon_04', 'icon_05',
'read-box', 'icon_04', 'icon-jian-grey', 'icon_business',
'icon-piao-grey', 'icon_buy', 'icon_video', 'icon_official',
'icon_zuan', 'icon_zhibo', 'icon_jing']
concern_postType = ['','icon_tu icon-tu-grey', 'icon_hot', 'icon_new', 'icon-wen-grey', None, 'None']
# -*- coding:utf-8 -*-
import csv
import codecs
# import pandas
import sys
# try to fix '_csv.Error: field larger than field limit (131072)'
csv.field_size_limit(sys.maxint) #field larger than field limit (131072)
# from sqlalchemy import create_engine
# from sqlalchemy.orm import sessionmaker
# # from sqlalchemy.exc import OperationalError
# import psycopg2
# engine = create_engine('postgresql+psycopg2://postgres:[email protected]:5432/postgres')
# engine = create_engine('postgresql+psycopg2://postgres:909@localhost:5432/postgres')
# session = sessionmaker(bind = engine)
sentence_delimiters = ['?', '!', ';', '?', '!', '。', ';', '……', '…', '\n']
def as_text(v): ## 生成str, unicode字符串
if v is None:
return None
elif isinstance(v, bytes):
return v.decode('utf-8', errors='ignore')
elif isinstance(v, str):
return v
elif isinstance(v, unicode):
return v
else:
raise ValueError('Unknown type %r' % type(v))
def read_by_line(filename=None, delimiter=',', field=None):
'''
to read csv file row by row (with Generator), with specified csv delimiter
return specified fields or a whole csv-line
params:
csv_delimiter=',',csv delimiter, ',' default,
field=None, if None: return a whole csv line , if array, len(field) must be 1, or 2, if len(field)==1, return specified field, if len(field)==2, return line slice(include the end field)
'''
with codecs.open(filename, mode='rb', encoding='utf-8') as f:
try:
for row in f:
row = row.split(delimiter)
if field:
if len(field) == 1:
yield row[field[0]]
elif len(field) == 2:
yield row[field[0]:(field[1]+1)] #include the end field
else:
yield row
except Exception, e:
raise
TOPICS = ['油耗', '操控', '最满意的一点', '最不满意的一点',
'舒适性', '性价比', '内饰', '外观', '动力', '空间', '故障']
import pinyin
TOPICS_PINYIN = [pinyin.get(item, format='strip') for item in TOPICS]
import re
def cleaner(text):
'''
to clean text and return the text
'''
dirty_tag = [' ', '\n', '=', ' ', '&', 'pos108', '\N',
'&', 'http://club.autohome.com.cn/bbs/thread-c-.+-.+.html;',
'http://club.autohome.com.cn/bbs/thread-c-.+-.+.html', '-', '\r']
text = re.sub('|'.join(dirty_tag), '', text)
if len(text) > 10:
#去掉非中文
if re.findall(u'[\u4e00-\u9fa5]+', text):
return text
else:
return None
# if text == '\n' or text == ' ' or text == ' \n':
# return
# else:
# text = re.sub('|'.join(dirty_tag), '', text)
# return text
def str2unicode(s):
'''
将str转为unicode, utf8编码
'''
if isinstance(s, str):
return unicode(s, 'utf-8')
else:
return s | bsd-3-clause | 7,075,182,384,411,835,000 | 29.936 | 189 | 0.528712 | false |
borjaayerdi/pyjokes | pyjokes/jokes_en.py | 1 | 15994 | # -*- coding: utf-8 -*-
"""
Jokes from stackoverflow - provided under CC BY-SA 3.0
http://stackoverflow.com/questions/234075/what-is-your-best-programmer-joke?page=4&tab=votes#tab-top
"""
neutral = [
"Complaining about the lack of smoking shelters, the nicotine addicted Python programmers said there ought to be 'spaces for tabs'.",
"Ubuntu users are apt to get this joke.",
"Obfuscated Reality Mappers (ORMs) can be useful database tools.",
"I wonder if the MPs voting on the Digital Economy Bill think that Creative Commons is a Parliamentary initiative.",
"Asked to explain Unicode during an interview, Geoff went into detail about his final year university project. He was not hired.",
"Triumphantly, Beth removed Python 2.7 from her server in 2020. 'Finally!' she said with glee, only to see the announcement for Python 4.4.",
"An SQL query goes into a bar, walks up to two tables and asks, 'Can I join you?'",
"When your hammer is C++, everything begins to look like a thumb.",
"If you put a million monkeys at a million keyboards, one of them will eventually write a Java program. The rest of them will write Perl.",
"To understand recursion you must first understand recursion.",
"Friends don't let friends use Python 2.7",
"I suggested holding a 'Python Object Oriented Programming Seminar', but the acronym was unpopular.",
"'Knock, knock.' 'Who's there?' ... very long pause ... 'Java.'",
"How many programmers does it take to change a light bulb? None, that's a hardware problem.",
"What's the object-oriented way to become wealthy? Inheritance",
"Why don't jokes work in octal? Because 7 10 11.",
"How many programmers does it take to change a light bulb? None, they just make darkness a standard.",
"Two bytes meet. The first byte asks, 'Are you ill?' The second byte replies, 'No, just feeling a bit off.'",
"Two threads walk into a bar. The barkeeper looks up and yells, 'Hey, I want don't any conditions race like time last!'",
"Old C programmers don't die, they're just cast into void.",
"Eight bytes walk into a bar. The bartender asks, 'Can I get you anything?' 'Yeah,' reply the bytes. 'Make us a double.'",
"Why did the programmer quit his job? Because they didn't get arrays.",
"Why do Java programmers have to wear glasses? Because they don't see sharp.",
"Software developers like to solve problems. If there are no problems handily available, they will create their own problems.",
".NET was named .NET so that it wouldn't show up in a Unix directory listing.",
"Hardware: The part of a computer that you can kick.",
"A programmer was found dead in the shower. Next to their body was a bottle of shampoo with the instructions 'Lather, Rinse and Repeat'.",
"Optimist: The glass is half full. Pessimist: The glass is half empty. Programmer: The glass is twice as large as necessary.",
"In C we had to code our own bugs. In C++ we can inherit them.",
"How come there is not obfuscated Perl contest? Because everyone would win.",
"If you play a Windows CD backwards, you'll hear satanic chanting ... worse still, if you play it forwards, it installs Windows.",
"How many programmers does it take to kill a cockroach? Two: one holds, the other installs Windows on it.",
"What do you call a programmer from Finland? Nerdic.",
"What did the Java code say to the C code? A: You've got no class.",
"Why did Microsoft name their search engine BING? Because It's Not Google.",
"Pirates go 'arg!', computer pirates go 'argv!'",
"Software salesmen and used-car salesmen differ in that the latter know when they are lying.",
"Child: Dad, why does the sun rise in the east and set in the west? Dad: Son, it's working, don't touch",
"Why do programmers confuse Halloween with Christmas? Because OCT 31 == DEC 25",
"How many Prolog programmers does it take to change a lightbulb? false.",
"Real programmers can write assembly code in any language",
"Waiter: Would you like coffee or tea? Programmer: Yes.",
"What do you get when you cross a cat and a dog? Cat dog sin theta.",
"If loving you is ROM I don't wanna read write.",
"A programmer walks into a foo...",
"A programmer walks into a bar and orders 1.38 root beers. The bartender informs her it's a root beer float. She says 'Make it a double!'",
"What is Benoit B. Mandelbrot's middle name? Benoit B. Mandelbrot.",
"Why are you always smiling? That's just my... regular expression.",
"ASCII stupid question, get a stupid ANSI.",
"A programmer had a problem. He thought to himself, 'I know, I'll solve it with threads!'. has Now problems. two he",
"Why do sin and tan work? Just cos.",
"Java: Write once, run away.",
"I would tell you a joke about UDP, but you would never get it.",
"A QA engineer walks into a bar. Runs into a bar. Crawls into a bar. Dances into a bar. Tiptoes into a bar. Rams a bar. Jumps into a bar.",
"My friend's in a band called '1023 Megabytes'... They haven't got a gig yet!",
"I had a problem so I thought I'd use Java. Now I have a ProblemFactory.",
"QA Engineer walks into a bar. Orders a beer. Orders 0 beers. Orders 999999999 beers. Orders a lizard. Orders -1 beers. Orders a sfdeljknesv.",
"A product manager walks into a bar, asks for drink. Bartender says no, but will consider adding later.",
"How do you generate a random string? Put a first year Computer Science student in Vim and ask them to save and exit.",
"I've been using Vim for a long time now, mainly because I can't figure out how to exit.",
"How do you know whether a person is a Vim user? Don't worry, they'll tell you.",
"Waiter: He's choking! Is anyone a doctor? Programmer: I'm a Vim user.",
"3 Database Admins walked into a NoSQL bar. A little while later they walked out because they couldn't find a table.",
"How to explain the movie Inception to a programmer? When you run a VM inside another VM, inside another VM ... everything runs real slow!",
"What do you call a parrot that says \"Squawk! Pieces of nine! Pieces of nine!\"? A parrot-ey error.",
"There are only two hard problems in Computer Science: cache invalidation, naming things and off-by-one-errors.",
"There are 10 types of people: those who understand binary and those who don't",
"There are 2 types of people: those who can interpret incomplete data sets...",
"There are II types of people: Those who understand Roman Numerals and those who don't.",
"There are 10 types of people: those who understand hexadecimal and 15 others",
"There are 10 types of people: those who understand binary, those who don't, and those who were expecting this joke to be in trinary.",
"There are 10 types of people: those who understand trinary, those who don't, and those who have never heard of it.",
"What do you call eight hobbits? A hobbyte.",
"The best thing about a Boolean is even if you are wrong, you are only off by a bit.",
"A good programmer is someone who always looks both ways before crossing a one-way street.",
"There are two ways to write error-free programs; only the third one works.",
]
adult = [
"Programming is like sex: One mistake and you have to support it for the rest of your life.",
"Software is like sex: It's better when it's free.",
"Software is like sex: It's never REALLY free.",
"There are 10 types of people: those who understand binary, and those who get laid.",
"Why programmers like UNIX: unzip, strip, touch, finger, grep, mount, fsck, more, yes, fsck, fsck, fsck, umount, sleep",
"If your mom was a collection class, her insert method would be public.",
"Your momma's so fat that not even Dijkstra is able to find a shortest path around her.",
"C++ - where your friends have access to your private members.",
"The only intuitive user interface is the nipple. After that, it's all learned.",
"What's the difference between software development and sex? In sex, you don't get a bonus for releasing early.",
"Your momma's so fat, the recursive function calculating her mass causes a stack overflow.",
]
"""
Jokes from The Internet Chuck Norris DB (ICNDB) (http://www.icndb.com/) - provided under CC BY-SA 3.0
http://api.icndb.com/jokes/
"""
chuck = [
"When Chuck Norris throws exceptions, it's across the room.",
"All arrays Chuck Norris declares are of infinite size, because Chuck Norris knows no bounds.",
"Chuck Norris doesn't have disk latency because the hard drive knows to hurry the hell up, or else.",
"Chuck Norris writes code that optimises itself.",
"Chuck Norris can't test for equality because he has no equal.",
"Chuck Norris doesn't need garbage collection because he doesn't call .Dispose(), he calls .DropKick().",
"Chuck Norris's first program was kill -9.",
"Chuck Norris burst the dot com bubble.",
"All browsers support the hex definitions #chuck and #norris for the colours black and blue.",
"MySpace isn't really your space, it's Chuck's (he just lets you use it).",
"Chuck Norris can write infinitely recursive functions and have them return.",
"Chuck Norris can solve the Towers of Hanoi in one move.",
"The only design pattern Chuck Norris knows is the God Object Pattern.",
"Chuck Norris finished World of Warcraft.",
"Project managers never ask Chuck Norris for estimations.",
"Chuck Norris doesn't use web standards as the web will conform to him.",
"'It works on my machine' always holds true for Chuck Norris.",
"Chuck Norris doesn't do Burn Down charts, he does Smack Down charts.",
"Chuck Norris can delete the Recycling Bin.",
"Chuck Norris's beard can type 140 words per minute.",
"Chuck Norris can unit test entire applications with a single assertion, 'it works'.",
"Chuck Norris doesn't bug hunt as that signifies a probability of failure, he goes bug killing.",
"Chuck Norris's keyboard doesn't have a Ctrl key because nothing controls Chuck Norris.",
"Chuck Norris can overflow your stack just by looking at it.",
"To Chuck Norris, everything contains a vulnerability.",
"Chuck Norris doesn't sudo, the shell just knows it's him and does what it's told.",
"Chuck Norris doesn't need a debugger, he just stares at the code until it confesses.",
"Chuck Norris can access private methods.",
"Chuck Norris can instantiate an abstract class.",
"Chuck Norris does not need to know about Class Factory Pattern. He can instantiate interfaces.",
"The class object inherits from Chuck Norris",
"For Chuck Norris, NP-Hard = O(1).",
"Chuck Norris knows the last digit of Pi.",
"Chuck Norris's Internet connection is faster upstream than downstream because even data has more incentive to run from him than to him.",
"Chuck Norris solved the Travelling Salesman problem in O(1) time: break salesman into N pieces; kick each piece to a different city.",
"No statement can catch the ChuckNorrisException.",
"Chuck Norris doesn't pair program.",
"Chuck Norris can write multi-threaded applications with a single thread.",
"Chuck Norris doesn't need to use AJAX because pages are too afraid to postback anyways.",
"Chuck Norris doesn't use reflection, reflection asks politely for his help.",
"There is no Esc key on Chuck Norris' keyboard, because no one escapes Chuck Norris.",
"Chuck Norris can binary search unsorted data.",
"Chuck Norris doesn't needs try-catch, exceptions are too afraid to raise.",
"Chuck Norris went out of an infinite loop.",
"If Chuck Norris writes code with bugs, the bugs fix themselves.",
"Chuck Norris hosting is 101% uptime guaranteed.",
"Chuck Norris's keyboard has the Any key.",
"Chuck Norris can access the database from the UI.",
"Chuck Norris's programs never exit, they are terminated.",
"Chuck Norris insists on strongly-typed programming languages.",
"The Chuck Norris protocol design method has no status, requests or responses, only commands.",
"Chuck Norris's programs occupy 150% of CPU, even when they are not running.",
"Chuck Norris can spawn threads that complete before they are started.",
"Chuck Norris's programs do not accept input.",
"Chuck Norris can install iTunes without installing Quicktime.",
"Chuck Norris doesn't need an OS.",
"Chuck Norris's OSI network model has only one layer - Physical.",
"Chuck Norris can compile syntax errors.",
"Every SQL statement that Chuck Norris codes has an implicit 'COMMIT' in its end.",
"Chuck Norris does not need to type-cast. The Chuck-Norris Compiler (CNC) sees through things. All the way down. Always.",
"Chuck Norris does not code in cycles, he codes in strikes.",
"Chuck Norris compresses his files by doing a flying round house kick to the hard drive.",
"Chick Norris solved the halting problem.",
"With Chuck Norris P = NP. There's no nondeterminism with Chuck Norris decisions.",
"Chuck Norris can retrieve anything from /dev/null.",
"No one has ever pair-programmed with Chuck Norris and lived to tell the tale.",
"No one has ever spoken during review of Chuck Norris' code and lived to tell the tale.",
"Chuck Norris doesn't use a GUI, he prefers COMMAND line.",
"Chuck Norris doesn't use Oracle, he is the Oracle.",
"Chuck Norris can dereference NULL.",
"A diff between your code and Chuck Norris's is infinite.",
"The Chuck Norris Eclipse plugin made alien contact.",
"Chuck Norris is the ultimate mutex, all threads fear him.",
"Don't worry about tests, Chuck Norris's test cases cover your code too.",
"Each hair in Chuck Norris's beard contributes to make the world's largest DDOS.",
"Chuck Norris's log statements are always at the FATAL level.",
"Chuck Norris's database has only one table, 'Kick', which he drops frequently.",
"Chuck Norris completed World of Warcraft.",
"When Chuck Norris breaks the build, you can't fix it, because there is not a single line of code left.",
"Chuck Norris types with one finger. He points it at the keyboard and the keyboard does the rest.",
"Chuck Norris's programs can pass the Turing Test by staring at the interrogator.",
"If you try to kill -9 Chuck Norris's programs, it backfires.",
"Chuck Norris performs infinite loops in under 4 seconds.",
"Chuck Norris can overwrite a locked variable.",
"Chuck Norris knows the value of NULL, and he can sort by it too.",
"Chuck Norris can install a 64-bit operating system on 32-bit machines.",
"Chuck Norris can write to an output stream.",
"Chuck Norris can read from an input stream.",
"Chuck Norris never has to build his program to machine code. Machines have learnt to interpret Chuck Norris's code.",
"Chuck Norris's unit tests don't run. They die.",
"Chuck Norris causes the Blue Screen of Death.",
"Chuck Norris can make a class that is both abstract and final.",
"Chuck Norris could use anything in java.util.* to kill you, including the javadocs.",
"Code runs faster when Chuck Norris watches it.",
"Chuck Norris doesn't use REST, he waits.",
"Everyone likes Chuck Norris on Facebook, whether they choose to or not",
"You can't follow Chuck Norris on Twitter, because he follows you",
"Chuck Norris's calculator has only 3 keys: 0, 1, and NAND.",
"Chuck Norris only uses global variables. He has nothing to hide.",
"Chuck Norris once implemented an HTTP server in a single printf call. It is now the heart of Apache webserver.",
"Chuck Norris writes directly in binary. He then writes the source code as documentation for other programers.",
"Chuck Norris once shifted a bit so hard, it ended up on a different computer.",
]
jokes_en = {
'neutral': neutral,
'adult': adult,
'chuck': chuck,
'all': neutral + adult + chuck,
}
| bsd-3-clause | 117,854,388,900,222,850 | 73.046296 | 147 | 0.714768 | false |
hjlbs/ctf | 2021/bcactf/honorsabcs/exploit.py | 1 | 1158 | from pwn import *
#p = remote('bin.bcactf.com', 49155)
p = process('./honors-abcs')
e = ELF('./honors-abcs')
## Read until the prompt
p.readuntil('1: ')
'''
gets() reads until it encounters a newline which allows you to overflow the 50-byte response buffer.
Here is the stack layout
response {Frame offset -0x58} This is the buffer that we fill
character {Frame offset -0x1c}
FILE * {Frame offset -0x18}
gets_result {Frame offset -0x10}
grade {Frame offset -0x0c} We want to overwrite this so that grade * 4 > 100
saved_rbp {Frame offset -0x08}
return_address {Frame offset 0x00}
'''
'''
One thing to be careful of. The overwrite occurs during the call to the gets() function. If your string
starts with an 'a' then the grade variable will be overwritten with '0 * 4' which you don't want. So start
with any other character.
'''
## add enough bytes to reach grade 0x58 - 0xc
response = 'b'*(0x58 - 0xc)
# The next bytes overflow grade. We just need a few
response += 'zz'
p.send(response + '\n')
p.readline()
p.readline()
p.readline()
flag = p.readline()
print('Flag: ', flag)
p.close()
| gpl-3.0 | 448,063,655,555,177,340 | 24.173913 | 106 | 0.668394 | false |
aewallin/openvoronoi | python_examples/offset/offset_2_ttt.py | 1 | 8593 | import openvoronoi as ovd # https://github.com/aewallin/openvoronoi
import ovdvtk # for VTK visualization, https://github.com/aewallin/openvoronoi
import truetypetracer as ttt # https://github.com/aewallin/truetype-tracer
import offset2vtk # vtk visualization helper https://github.com/aewallin/openvoronoi
import time
import vtk
import math
# insert points into VoronoiDiagram, return list of point IDs
# polygon = [p1,p2,p3,p4,p5]
# where we assume the polygon is closed, e.g. p5 connects to p1
# each point is a 2D point (p[0], p[1])
def insert_polygon_points(vd, polygon):
pts = []
for p in polygon:
pts.append(ovd.Point(p[0], p[1])) # this just converts (p[0], p[1]) format points into ovd.Point
id_list = []
print "inserting ", len(pts), " point-sites:"
m = 0
# pts = [pt1, pt2, pt3, pt4, pt5] where each pt is of class ovd.Point
for p in pts:
id_list.append(vd.addVertexSite(p)) # note we store and return the Point ID returned here!
print " ", m, " added vertex ", id_list[len(id_list) - 1]
m = m + 1
return id_list
# insert polygon line-segments based on a list of IDs returned by insert_polygon_points()
# id_list = [0, 1, 2, 3, 4, 5] defines a closed polygon. the
# 0->1->2->3->4->5->0
# the IDs _must_ refer to points that have been previously inserted with vd.addVertexSite()
#
def insert_polygon_segments(vd, id_list):
j = 0
print "inserting ", len(id_list), " line-segments:"
for n in range(len(id_list)):
n_nxt = n + 1
if n == (len(id_list) - 1):
n_nxt = 0
print " ", j, "inserting segment ", id_list[n], " - ", id_list[n_nxt]
# this inserts a line-segment id_list[n] -> id_list[n_nxt] into the VoronoiDiagram
vd.addLineSite(id_list[n], id_list[n_nxt])
j = j + 1
# insert many polygons into vd
# segs is a list of polygons:
# segs = [poly1, poly2, poly3, ...]
# poly defines a closed polygon as a a list of points
# poly1 = [ [x1,y1], [x2,y2], [x3,y3], ..., [xN,yN] ]
# where the last point [xN,yN] in the list connects to the first [x1,y1]
#
def insert_many_polygons(vd, segs):
polygon_ids = []
t_before = time.time()
# all points must be inserted into the vd first!
for poly in segs:
poly_id = insert_polygon_points(vd, poly)
polygon_ids.append(poly_id)
t_after = time.time()
pt_time = t_after - t_before
# all line-segments are inserted then
t_before = time.time()
for ids in polygon_ids:
insert_polygon_segments(vd, ids)
t_after = time.time()
seg_time = t_after - t_before
return [pt_time, seg_time] # return timing-info, for benchmarking
# translate all segments by x,y
def translate(segs, x, y):
out = []
for seg in segs:
seg2 = []
for p in seg:
p2 = []
p2.append(p[0] + x)
p2.append(p[1] + y)
seg2.append(p2)
out.append(seg2)
return out
# call truetype-tracer to get font input geometry
# text = the text-string we want
# scale = used to scale the geometry to fit within a unit-circle
#
# output is a list of lists:
# [ [p1,p2,p3,p4,p5,p1] ,
# [p6,p7,p8,p9,p10, ... ,pN, p6],
# ...
# ]
# each point is a 2D point ( p[0], p[1] )
# each sub-list corresponds to a closed loop of line-segments
# e.g. p1->p2->p3->p4->p5->p1
#
# If the main(outer) geometry is given in e.g. CW orientation, then
# islands (closed loops within the main geometry) are given in CCW orientation
def ttt_segments(text, scale):
wr = ttt.SEG_Writer()
wr.arc = False
wr.conic = False
wr.cubic = False
wr.conic_biarc_subdivision = 10 # this has no effect?
wr.conic_line_subdivision = 50 # this increases nr of points
wr.cubic_biarc_subdivision = 10 # no effect?
wr.cubic_line_subdivision = 10 # no effect?
wr.setFont(3)
wr.scale = float(1) / float(scale)
ttt.ttt(text, wr)
segs = wr.get_segments()
return segs
# the segments come out of truetype-tracer in a slightly wrong format
# truetype-tracer outputs closed polygons with identical points
# at the start and end of the point-list. here we get rid of this repetition.
# input:
# [ [p1,p2,p3,p4,p5,p1] ,
# [p6,p7,p8,p9,p10, ... ,pN, p6],
# ...
# ]
# this functions simply removes the repeated end-point from each segment
# output:
# [ [p1,p2,p3,p4,p5] ,
# [p6,p7,p8,p9,p10, ... ,pN],
# ...
# ]
def modify_segments(segs):
segs_mod = []
for seg in segs:
first = seg[0]
last = seg[len(seg) - 1]
assert (first[0] == last[0] and first[1] == last[1])
seg.pop()
seg.reverse() # to get interior or exterior offsets
segs_mod.append(seg)
# drawSegment(myscreen, seg)
return segs_mod
if __name__ == "__main__":
# this sets up a VTK viewport where we can draw in 3D
w = 1920 # width and height of VTK viewport
h = 1080
# w=1024
# h=1024
myscreen = ovdvtk.VTKScreen(width=w, height=h)
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version())
myscreen.render()
scale = 1
far = 1
camPos = far
zmult = 3
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0.0, 0, 0)
# create a VoronoiDiagram
# use far=1.0 for now. This means all input geometry should fit within a unit circle!
# 120 is a binning-parameter for nearest neighbor search. sqrt(n) where we have n points should be optimal
vd = ovd.VoronoiDiagram(far, 120)
# for vtk visualization of the VoronoiDiagram
# (not required for offsetting or drawing offsets)
vod = ovdvtk.VD(myscreen, vd, float(scale), textscale=0.01, vertexradius=0.003)
vod.drawFarCircle()
vod.textScale = 0.02
vod.vertexRadius = 0.0031
vod.drawVertices = 0
vod.drawVertexIndex = 0
vod.drawGenerators = 0
vod.offsetEdges = 0
vd.setEdgeOffset(0.05) # for visualization only. NOT offsetting!
# get segments from ttt
# this is the input geometry to VoronoiDiagram. It could also come from a text-file
# see the description of each function for details on the format
segs = ttt_segments("LinuxCNC", 40000)
segs = translate(segs, -0.06, 0.05)
segs = modify_segments(segs)
# build a VD from the input geometry
times = insert_many_polygons(vd, segs)
print "all sites inserted. "
print "VD check: ", vd.check() # sanity check
# this filters the diagram so we are left with only the interior or the exterior
# of the polygon. If the filtering is omitted we get offsets on both sides of the input geometry.
# try True/False here and see what happens
pi = ovd.PolygonInterior(False)
vd.filter_graph(pi)
# Create an Offset class, for offsetting.
of = ovd.Offset(vd.getGraph()) # pass the created graph to the Offset class
ofs_list = []
t_before = time.time()
for t in [0.002 * x for x in range(1, 20)]:
ofs = of.offset(t) # produce offsets at distance t
ofs_list.append(ofs)
t_after = time.time()
oftime = t_after - t_before
# offset output format
# ofs will be a list of offset-loops.
# [loop1, loop2, loop3, ...]
# each offset-loop contains offset-elements
# loop1 = [ofs1, ofs2, ofs3, ...]
# offset elements can be either lines or arcs
# an offset element is a list:
# ofs1 = [p, r, cen, cw]
# p = the end-point of the offset-element
# r = the radius if it is an arc, -1 for lines
# cen = the center-point if it is an arc
# cw = clockwise/anticlockwise True/False flag for arcs
# now we draw the offsets in VTK
print len(ofs_list), " offsets to draw:"
m = 0
for ofs in ofs_list:
print m, " / ", len(ofs_list)
offset2vtk.drawOffsets2(myscreen, ofs)
m = m + 1
# draw some text on how long Offset ran
oftext = ovdvtk.Text()
oftext.SetPos((50, 100))
oftext_text = "Offset in {0:.3f} s CPU time.".format(oftime)
oftext.SetText(oftext_text)
myscreen.addActor(oftext)
# turn off the whole VD so we can more clearly see the offsets
# a VD filtered with both True and False is essentially invisible (both the interior and exterior of a polygon removed)
pi = ovd.PolygonInterior(True)
vd.filter_graph(pi)
# display timing-info on how long the VD build took
vod.setVDText2(times)
vod.setAll()
print "PYTHON All DONE."
myscreen.render()
myscreen.iren.Start()
| lgpl-2.1 | -6,805,446,223,300,510,000 | 33.23506 | 123 | 0.635285 | false |
apmechev/GRID_LRT | docs/conf.py | 1 | 5577 | # -*- coding: utf-8 -*-
#
# GRID_LRT documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 5 09:40:38 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GRID_LRT'
copyright = u'2019, Alexandar Mechev'
author = u'Alexandar Mechev'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import GRID_LRT
version = GRID_LRT.__version__
# The full version, including alpha/beta/rc tags.
release = GRID_LRT.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = "classic"
html_theme = "sphinx_rtd_theme"
#html_theme_options = {
# "rightsidebar": "false",
# "sidebarwidth": 300,
# "relbarbgcolor": "black"
#}
def skip(app, what, name, obj, skip, options):
if name == "__init__":
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
app.add_javascript('copybutton.js')
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'GRID_LRTdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GRID_LRT.tex', u'GRID\\_LRT Documentation',
u'Alexandar Mechev', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'grid_lrt', u'GRID_LRT Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GRID_LRT', u'GRID_LRT Documentation',
author, 'GRID_LRT', 'Distributing LOFAR processing on the Dutch Grid infrastructure',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None,
'couchdb':('http://couchdb-python.readthedocs.io/en/latest/',None)}
| gpl-3.0 | 2,674,878,722,036,451,300 | 29.47541 | 90 | 0.671149 | false |
xclaeys/ElastoPhi | postprocessing/graphes_output_err_decrease.py | 1 | 2917 | # -*-coding:Utf-8 -*
import Vue.Figure as figure
import Vue.Donnee as donnee
import Lecture.FonctionLectureClassique as classique
import numpy as np
import sys
########################################################################################
#------------------------------- Input -----------------------------
########################################################################################
if (len(sys.argv)==2):
filename=sys.argv[1]
print(filename+" will be printed")
else:
print("You must give the name of the output file")
sys.exit(1)
outputname_err="/".join(filename.split("/")[0:-1])+"/graphe_"+(filename.split("/")[-1]).split(".")[0]
########################################################################################
#------------------------------- Figure -----------------------------
########################################################################################
colors=["m","b","c","r","g","y","k","firebrick","purple"]
markers=["^","o",".","v"]
(dist,rank) = classique.lecture(filename,0,1)
(err1,err2) = classique.lecture(filename,2,3)
Dist = []
Rank = []
Err1 = []
Err2 = []
Courbes_dist = []
Courbes_rank = []
Courbes_err1 = []
Courbes_err2 = []
compt=0
ymax_err=0
ymin_err=1e30
offset = 49
for i in range(1,6):
Rank.append(rank[compt+0:compt+offset])
Dist.append(dist[compt+0])
Err1.append(err1[compt+0:compt+offset])
Err2.append(err2[compt+0:compt+offset])
ymax_err=max(ymax_err,max(err1[compt+0:compt+offset]))
ymax_err=max(ymax_err,max(err2[compt+0:compt+offset]))
ymin_err=min(ymin_err,min(err1[compt+0:compt+offset]))
ymin_err=min(ymin_err,min(err2[compt+0:compt+offset]))
compt+=offset
ncolor=0
for i in range(0,len(Dist)):
line1={"linestyle":"-","linewidth":3,"linecolor":colors[ncolor]}
line2={"linestyle":"--","linewidth":3,"linecolor":colors[ncolor]}
marker={"markerstyle":"None","markersize":10,"fillstyle":"full"}
Courbes_err1.append(donnee.Ligne(nom=r"ACA - distance="+str(Dist[i]),ordonnee=Err1[i],abscisse=Rank[i],line=line1,marker=marker))
Courbes_err2.append(donnee.Ligne(nom=r"SVD - distance="+str(Dist[i]),ordonnee=Err2[i],abscisse=Rank[i],line=line2,marker=marker))
ncolor+=1
xlim=[min(Rank[0])*0.75,max(Rank[0])*1.01]
ylim_erro=[ymin_err*0.75,ymax_err*1.25]
xlabel={"label":"Rank","fontsize":20}
ylabel_erro={"label":"Relative error","fontsize":20}
# titre={"titre":"Test","fontsize":20,"loc":"center"}
legende={"loc":"upper left","bbox_to_anchor":(1.01,1),"ncol":1,"fontsize":12}
Figure_erro=figure.Graphe1D(id=0,legende=legende,xlim=xlim,ylim=ylim_erro,xlabel=xlabel,ylabel=ylabel_erro,yscale="log",axis="off",format="pdf")
for courbe in Courbes_err1:
Figure_erro.AjoutCourbe(courbe)
for courbe in Courbes_err2:
Figure_erro.AjoutCourbe(courbe)
Figure_erro.TraceGraphe1D()
Figure_erro.EnregistreFigure(outputname_err)
Figure_erro.FermeFigure()
| lgpl-3.0 | -2,422,340,530,487,132,000 | 27.598039 | 144 | 0.569421 | false |
diffeo/Datawake | memex-datawake-stream/src/datawakeio/HBASEEntityDataConnector.py | 1 | 1852 | import happybase
from datawakeio.data_connector import ExtractedDataConnector
class HBASEDataConnector(ExtractedDataConnector):
def __init__(self, hbase_host):
ExtractedDataConnector.__init__(self)
self.hbase_host = hbase_host
self.hbase_conn = None
def open(self):
self.hbase_conn = happybase.Connection(self.hbase_host)
def close(self):
if self.hbase_conn is not None:
self.hbase_conn.close()
def _checkConn(self):
self.open()
def insertHBASE(self, rowkey_prefix, items, table):
try:
self._checkConn()
hbase_table = self.hbase_conn.table(table)
batch_put = hbase_table.batch(batch_size=len(items))
for i in items:
batch_put.put(row="%s%s" % (rowkey_prefix, i), data={"colFam:c": ""})
batch_put.send()
finally:
self.close()
def insert_entities(self, url, entity_type, entity_values):
rowkey_prefix = "%s\0%s\0" % (url, entity_type)
self.insertHBASE(rowkey_prefix, entity_values, "general_extractor_web_index_hbase")
def insert_domain_entities(self, domain, url, entity_type, entity_values):
rowkey_prefix = "%s\0%s\0%s\0" % (domain, url, entity_type)
self.insertHBASE(rowkey_prefix, entity_values, "domain_extractor_web_index_hbase")
def get_domain_entity_matches(self, domain, type, values):
try:
self._checkConn()
hbase_table = self.hbase_conn.table("datawake_domain_entities_hbase")
rowkey = "%s\0%s\0" % (domain, type)
found = []
for value in values:
for item in hbase_table.scan(row_prefix="%s%s" % (rowkey, value)):
found.append(value)
return found
finally:
self.close() | apache-2.0 | -8,978,908,006,801,545,000 | 33.962264 | 91 | 0.591793 | false |
FrodeSolheim/fs-uae-launcher | amitools/vamos/lib/lexec/Pool.py | 1 | 1784 | from amitools.vamos.log import log_exec
from amitools.vamos.error import *
from .Puddle import Puddle
class Pool:
def __init__(self, mem, alloc, flags, size, thresh, poolid):
self.alloc = alloc
self.mem = mem
self.minsize = size
self.flags = flags
self.thresh = thresh
self.name = " in Pool %x" % poolid
self.puddles = []
def __del__(self):
while len(self.puddles) > 0:
puddle = self.puddles.pop()
puddle.__del__()
def __str__(self):
poolstr = ""
for puddle in self.puddles:
if poolstr == "":
poolstr = "{%s}" % puddle
else:
poolstr = "%s,{%s}" % (poolstr,puddle)
return poolstr
def AllocPooled(self, label_mgr, name, size):
result = None
if size >= self.thresh:
puddle = Puddle(self.mem, self.alloc, label_mgr, name, size)
if puddle != None:
self.puddles.append(puddle)
result = puddle.AllocPooled(name + self.name, size)
else:
for puddle in self.puddles:
result = puddle.AllocPooled(name + self.name, size)
if result != None:
break
# none of the puddles had enough memory
if result == None:
puddle = Puddle(self.mem, self.alloc, label_mgr, name, self.minsize)
if puddle != None:
self.puddles.append(puddle)
result = puddle.AllocPooled(name + self.name, size)
if result == None:
log_exec.info("AllocPooled: Unable to allocate memory (%x)", size)
return result
def FreePooled(self, mem, size):
if mem != 0:
for puddle in self.puddles:
if puddle.contains(mem,size):
puddle.FreePooled(mem,size)
return
raise VamosInternalError("FreePooled: invalid memory, not in any puddle : ptr=%06x" % mem)
| gpl-2.0 | -7,964,768,728,106,835,000 | 29.237288 | 96 | 0.598094 | false |
mfalesni/cfme_tests | cfme/tests/containers/test_table_views.py | 1 | 3237 | from random import choice
from collections import OrderedDict
import pytest
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.containers.provider import ContainersProvider
pytestmark = [
pytest.mark.tier(2),
pytest.mark.usefixtures('setup_provider'),
pytest.mark.provider([ContainersProvider], scope='function')
]
VIEWS = ('Grid View', 'Tile View', 'List View')
# We're using OrderedDict in order to be able to set keys and values
# to DefaultView and keep the order of the LUT
objects_mapping = OrderedDict({ # <object> : <ui name>
ContainersProvider: 'Containers Providers',
'container_images': 'Container Images',
'container_image_registries': 'Image Registries',
'container_projects': 'Projects',
'container_routes': 'Routes',
'container_nodes': 'Nodes',
'container_pods': 'Pods',
'container_services': 'Services',
'containers': 'Containers',
'container_replicators': 'Replicators'
})
@pytest.yield_fixture(scope='function')
def random_default_views(appliance):
"""This fixture setup random default views for container objects.
Revert the default views to the original on exit"""
# Collecting the original default views and Generating random views LUT for test:
original_default_views, tested_default_views = OrderedDict(), OrderedDict()
for collection_name, ui_name in objects_mapping.items():
original_default_views[collection_name] = (
appliance.user.my_settings.default_views.get_default_view(ui_name))
tested_default_views[collection_name] = choice(VIEWS)
appliance.user.my_settings.default_views.set_default_view(objects_mapping.values(),
tested_default_views.values())
yield tested_default_views
# setting back the default views to the original state:
appliance.user.my_settings.default_views.set_default_view(objects_mapping.values(),
original_default_views.values())
@pytest.mark.polarion('CMP-10568')
def test_default_views(appliance, random_default_views):
for collection_name in objects_mapping.keys():
obj = (ContainersProvider if collection_name is ContainersProvider
else getattr(appliance.collections, collection_name))
view = navigate_to(obj, 'All', use_resetter=False)
assert (random_default_views[collection_name].lower() ==
view.toolbar.view_selector.selected.lower()), (
"Failed to setup default view \"{}\" for {}".format(
view, objects_mapping[collection_name])
)
@pytest.mark.polarion('CMP-10570')
def test_table_views(appliance):
for collection_name in objects_mapping.keys():
obj = (ContainersProvider if collection_name is ContainersProvider
else getattr(appliance.collections, collection_name))
view = navigate_to(obj, 'All')
view_to_select = choice(VIEWS)
view.toolbar.view_selector.select(view_to_select)
assert view_to_select.lower() == view.toolbar.view_selector.selected.lower(), (
"Failed to set view \"{}\" For {}".format(view, collection_name)
)
| gpl-2.0 | 5,295,304,683,553,151,000 | 42.16 | 94 | 0.672536 | false |
eaplatanios/tensorflow | tensorflow/python/kernel_tests/manip_ops_test.py | 1 | 7281 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for manip_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import manip_ops
from tensorflow.python.platform import test as test_lib
# pylint: disable=g-import-not-at-top
try:
from distutils.version import StrictVersion as Version
# numpy.roll for multiple shifts was introduced in numpy version 1.12.0
NP_ROLL_CAN_MULTISHIFT = Version(np.version.version) >= Version("1.12.0")
except ImportError:
NP_ROLL_CAN_MULTISHIFT = False
# pylint: enable=g-import-not-at-top
class RollTest(test_util.TensorFlowTestCase):
def _testRoll(self, np_input, shift, axis):
expected_roll = np.roll(np_input, shift, axis)
with self.test_session():
roll = manip_ops.roll(np_input, shift, axis)
self.assertAllEqual(roll.eval(), expected_roll)
def _testGradient(self, np_input, shift, axis):
with self.test_session():
inx = constant_op.constant(np_input.tolist())
xs = list(np_input.shape)
y = manip_ops.roll(inx, shift, axis)
# Expected y's shape to be the same
ys = xs
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, y, ys, x_init_value=np_input)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_input, shift, axis):
self._testRoll(np_input, shift, axis)
if np_input.dtype == np.float32:
self._testGradient(np_input, shift, axis)
def testIntTypes(self):
for t in [np.int32, np.int64]:
self._testAll(np.random.randint(-100, 100, (5)).astype(t), 3, 0)
if NP_ROLL_CAN_MULTISHIFT:
self._testAll(
np.random.randint(-100, 100, (4, 4, 3)).astype(t), [1, -2, 3],
[0, 1, 2])
self._testAll(
np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t), [0, 1, -2],
[1, 2, 3])
def testFloatTypes(self):
for t in [np.float32, np.float64]:
self._testAll(np.random.rand(5).astype(t), 2, 0)
if NP_ROLL_CAN_MULTISHIFT:
self._testAll(np.random.rand(3, 4).astype(t), [1, 2], [1, 0])
self._testAll(np.random.rand(1, 3, 4).astype(t), [1, 0, -3], [0, 1, 2])
def testComplexTypes(self):
for t in [np.complex64, np.complex128]:
x = np.random.rand(4, 4).astype(t)
self._testAll(x + 1j * x, 2, 0)
if NP_ROLL_CAN_MULTISHIFT:
x = np.random.rand(2, 5).astype(t)
self._testAll(x + 1j * x, [1, 2], [1, 0])
x = np.random.rand(3, 2, 1, 1).astype(t)
self._testAll(x + 1j * x, [2, 1, 1, 0], [0, 3, 1, 2])
def testNegativeAxis(self):
self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1)
self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)
# Make sure negative axis shoudl be 0 <= axis + dims < dims
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of range"):
manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32),
3, -10).eval()
def testInvalidInputShape(self):
# The input should be 1-D or higher, checked in shape function.
with self.assertRaisesRegexp(
ValueError, "Shape must be at least rank 1 but is rank 0"):
manip_ops.roll(7, 1, 0)
def testRollInputMustVectorHigherRaises(self):
# The input should be 1-D or higher, checked in kernel.
tensor = array_ops.placeholder(dtype=dtypes.int32)
shift = 1
axis = 0
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"input must be 1-D or higher"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={tensor: 7})
def testInvalidAxisShape(self):
# The axis should be a scalar or 1-D, checked in shape function.
with self.assertRaisesRegexp(
ValueError, "Shape must be at most rank 1 but is rank 2"):
manip_ops.roll([[1, 2], [3, 4]], 1, [[0, 1]])
def testRollAxisMustBeScalarOrVectorRaises(self):
# The axis should be a scalar or 1-D, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = 1
axis = array_ops.placeholder(dtype=dtypes.int32)
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"axis must be a scalar or a 1-D vector"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={axis: [[0, 1]]})
def testInvalidShiftShape(self):
# The shift should be a scalar or 1-D, checked in shape function.
with self.assertRaisesRegexp(
ValueError, "Shape must be at most rank 1 but is rank 2"):
manip_ops.roll([[1, 2], [3, 4]], [[0, 1]], 1)
def testRollShiftMustBeScalarOrVectorRaises(self):
# The shift should be a scalar or 1-D, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = array_ops.placeholder(dtype=dtypes.int32)
axis = 1
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"shift must be a scalar or a 1-D vector"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [[0, 1]]})
def testInvalidShiftAndAxisNotEqualShape(self):
# The shift and axis must be same size, checked in shape function.
with self.assertRaisesRegexp(ValueError, "both shapes must be equal"):
manip_ops.roll([[1, 2], [3, 4]], [1], [0, 1])
def testRollShiftAndAxisMustBeSameSizeRaises(self):
# The shift and axis must be same size, checked in kernel.
tensor = [[1, 2], [3, 4]]
shift = array_ops.placeholder(dtype=dtypes.int32)
axis = [0, 1]
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"shift and axis must have the same size"):
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [1]})
def testRollAxisOutOfRangeRaises(self):
tensor = [1, 2]
shift = 1
axis = 1
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of range"):
manip_ops.roll(tensor, shift, axis).eval()
if __name__ == "__main__":
test_lib.main()
| apache-2.0 | 4,231,725,798,294,550,000 | 40.135593 | 80 | 0.636726 | false |
Kagami/shitsu | shitsu/utils/__init__.py | 1 | 4409 | ##################################################
# shitsu - tiny and flexible xmpp bot framework
# Copyright (C) 2008-2012 Kagami Hiiragi <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##################################################
# unescape function by Fredrik Lundh
# <http://effbot.org/zone/re-sub.htm#unescape-html>
##################################################
import re
import random
import urllib2
import urlparse
import traceback
import htmlentitydefs
from shitsu.utils import fix_socket
reload(fix_socket)
def trim(docstring):
docstring = docstring.strip()
return "\n".join([line.strip() for line in docstring.splitlines()])
def sandbox(fn):
def new(*args, **kwargs):
try:
fn(*args, **kwargs)
except Exception:
traceback.print_exc()
return new
host_rec = re.compile(r"^([-A-Za-z0-9]{1,63}\.)*[-A-Za-z0-9]{1,63}\.?$")
private_hosts_rec = re.compile(
r"^("
r"127\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}|localhost(\.localdomain)?\.?|"
r"192\.168\.[0-9]{1,3}\.[0-9]{1,3}|10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}|"
r"172\.(1[6-9]|2[0-9]|3[0-1])\.[0-9]{1,3}\.[0-9]{1,3}"
r")$", re.I)
def fix_host(host, forbid_private=True):
"""Check validness of hostname and fix idna hosts.
Optionally forbid private hosts.
"""
if len(host) > 255:
return
try:
host = host.encode("idna")
except UnicodeError:
return
if not host_rec.match(host):
return
if forbid_private and private_hosts_rec.match(host):
return
return host
def fix_url(url, forbid_private=True):
"""Check and fix url's hostname via fix_host."""
p = urlparse.urlsplit(url)
userpass, at, hostport = p.netloc.partition("@")
if not at: userpass, hostport = "", userpass
host, colon, port = hostport.partition(":")
host = fix_host(host, forbid_private)
if not host:
return
netloc = "".join([userpass, at, host, colon, port])
url_out = urlparse.urlunsplit(
(p.scheme, netloc, p.path, p.query, p.fragment))
return url_out.encode("utf-8")
default_url_timeout = 4
default_max_page_size = 1 * 1024 * 1024
request_headers = {
"User-Agent": ("Mozilla/5.0 (Windows NT 6.1; rv:9.0) "
"Gecko/20100101 Firefox/9.0")
}
def get_url(url, max_page_size=default_max_page_size, return_headers=False,
timeout=default_url_timeout, forbid_private=True):
url = fix_url(url, forbid_private)
if not url:
return ""
request = urllib2.Request(url, None, request_headers)
try:
f = urllib2.urlopen(request, timeout=timeout)
data = f.read(max_page_size)
except Exception:
return ""
else:
if return_headers:
return data, f.info()
else:
return data
def unescape(text):
"""Removes HTML or XML character references and
entities from a text string.
@param text The HTML (or XML) source text.
@return The plain text, as a Unicode string, if necessary.
Source: http://effbot.org/zone/re-sub.htm#unescape-html
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def get_id():
return "".join(map(lambda _: str(random.randint(0, 9)), xrange(10)))
| gpl-3.0 | -3,346,007,296,900,773,400 | 30.492857 | 79 | 0.585167 | false |
kevinconway/require.py | require/__init__.py | 1 | 7262 | """Alternate import logic that provides for multiple dependency versions."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import contextlib
import inspect
import os
import sys
from os import path
# If this is the first import of this module then store a reference to the
# original, builtin import statement. This is used later for the optional
# patching, and restoration, of the import command.
BUILTINS_NAME = '__builtin__' if '__builtin__' in sys.modules else 'builtins'
if '__original__import' not in sys.modules:
sys.modules['__original__import'] = sys.modules[BUILTINS_NAME].__import__
class ModuleCache(object):
"""Replacment for sys.modules that respects the physical path of an import.
The standard sys.modules cache can only cache on version of a module that
has been imported. This replacement uses the file path of the requesting
module (the one performing the import) as a secondary key when drawing
from the cache.
"""
def __init__(self):
"""Initialize the module cache."""
self._cache = defaultdict(dict)
def set(self, name, path, module):
"""Store a module in the cache with the given path key.
Args:
name (str): The name of the import.
path (str): The absolute path of the requesting module directory.
module (object): The Python module object to store.
"""
self._cache[name][path] = module
def cached(self, name, path):
"""Determine if an import is already cached.
Args:
name (str): The name of the import.
path (str): The absolute path of the requesting module directory.
Returns:
Bool: True if cached else False.
"""
return name in self._cache and path in self._cache[name]
def get(self, name, path, default=None):
"""Fetch a module from the cache with a given path key.
Args:
name (str): The name of the import.
path (str): The absolute path of the requesting module directory.
default: The value to return if not found. Defaults to None.
"""
return self._cache[name].get(path, default)
def get_nearest(self, name, path, default=None):
"""Fetch the module from the cache nearest the given path key.
Args:
name (str): The name of the import.
path (str): The absolute path of the requesting module directory.
default: The value to return if not found. Defaults to None.
If the specific path key is not present in the cache, this method will
search the cache for the nearest parent path with a cached value. If
a parent cache is found it is returned. Otherwise the given default
value is returned.
"""
if self.cached(name, path):
return self.get(name, path, default)
for parent in sorted(self._cache[name], key=len, reverse=True):
if path.startswith(parent):
# Set the cache for quicker lookups later.
self.set(name, path, self.get(name, parent))
return self.get(name, path, default)
return default
@contextlib.contextmanager
def local_modules(path, pymodules='.pymodules'):
"""Set the nearest pymodules directory to the first sys.path element.
Args:
path (str): The path to start the search in.
pymodules (str): The name of the pymodules directory to search for.
The default value is .pymodules.
If no valid pymodules directory is found in the path no sys.path
manipulation will take place.
"""
path = os.path.abspath(path)
previous_path = None
target_path = None
while previous_path != path:
if os.path.isdir(os.path.join(path, pymodules)):
target_path = path
break
previous_path, path = path, os.path.dirname(path)
if target_path:
sys.path.insert(1, os.path.join(target_path, pymodules))
try:
yield target_path
finally:
if target_path:
sys.path.pop(1)
class Importer(object):
"""An import statement replacement.
This import statement alternative uses a custom module cache and path
manipulation to override the default Python import behaviour.
"""
def __init__(self, cache=None, pymodules='.pymodules'):
"""Initialize the importer with a custom cache.
Args:
cache (ModuleCache): An instance of ModuleCache.
pymodules (str): The name to use when searching for pymodules.
"""
self._cache = cache or ModuleCache()
self._pymodules = pymodules or '.pymodules'
@staticmethod
def _calling_dir():
"""Get the directory containing the code that called require.
This function will look 2 or 3 frames up from the stack in order to
resolve the directory depending on whether require was called
directly or proxied through __call__.
"""
stack = inspect.stack()
current_file = __file__
if not current_file.endswith('.py'):
current_file = current_file[:-1]
calling_file = inspect.getfile(stack[2][0])
if calling_file == current_file:
calling_file = inspect.getfile(stack[3][0])
return path.dirname(path.abspath(calling_file))
def require(
self,
name,
locals=None,
globals=None,
fromlist=None,
level=None,
):
"""Import modules using the custom cache and path manipulations."""
# Default and allowed values change after 3.3.
level = -1 if sys.version_info[:2] < (3, 3) else 0
calling_dir = self._calling_dir()
module = self._cache.get_nearest(name, calling_dir)
if module:
return module
with local_modules(calling_dir, self._pymodules) as pymodules:
module = sys.modules['__original__import'](
name,
locals,
globals,
fromlist,
level,
)
if self._pymodules in repr(module):
del sys.modules[name]
# Create the module cache key if it doesn't already exist.
self._cache.set(name, pymodules, module)
# Enjoy your fresh new module object.
return module
def __call__(self, *args, **kwargs):
"""Proxy functions for require."""
return self.require(*args, **kwargs)
require = Importer()
def patch_import(importer=require):
"""Replace the builtin import statement with the wrapped version.
This function may be called multiple times without having negative side
effects.
"""
sys.modules[BUILTINS_NAME].__import__ = importer
def unpatch_import():
"""Restore the builtin import statement to the original version.
This function may be called multiple time without having negative side
effects.
"""
sys.modules[BUILTINS_NAME].__import__ = sys.modules['__original__import']
| apache-2.0 | 8,249,302,086,919,257,000 | 29.512605 | 79 | 0.623382 | false |
Vrekrer/magdynlab | instruments/srs_ds335.py | 1 | 4144 | # coding=utf-8
# Author: Diego González Chávez
# email : [email protected] / [email protected]
#
# This class controls the:
# Signal Generator
# Stanford Research Systems : DS335
#
# TODO:
# Make documentation
from .instruments_base import InstrumentBase as _InstrumentBase
__all__ = ['SRS_DS335']
class SRS_DS335(_InstrumentBase):
def __init__(self,
GPIB_Address=15, GPIB_Device=0,
ResourceName=None, logFile=None):
if ResourceName is None:
ResourceName = 'GPIB%d::%d::INSTR' % (GPIB_Device, GPIB_Address)
super().__init__(ResourceName, logFile)
self._IDN = 'SRS_SR830'
self.VI.write_termination = self.VI.LF
self.VI.read_termination = self.VI.LF
self._unit = 'VP' # Volts peak to peak
@property
def amplitude(self):
'''
Sets or return the output voltage amplitude.
Use the "unit" property to set the units used (Vpp or Vrms).
'''
amp_str = self.querry('AMPL?')
self._unit = amp_str[-2:]
return float(amp_str[:4])
@amplitude.setter
def amplitude(self, vAmp):
self.write('AMPL %0.2f%s' % (vAmp, self._unit))
@property
def unit(self):
'''
Sets or return the voltage units (Vpp or Vrms).
Changing the unit corrects the output voltage value
to keep it at the same phisical value.
'''
self.amplitude # read unit from hardware
return {'VP': 'Vpp', 'VR': 'Vrms'}[self._unit]
@unit.setter
def unit(self, vUnit):
newUnit = {'Vpp': 'VP', 'Vrms': 'VR'}.get(vUnit, 'VP')
amp = self.amplitude # read amplitude and unit from hardware
oldUnit = self._unit
self._unit = newUnit
unitChange_str = '%sto%s' % (oldUnit, newUnit)
unitChange_factors = {'VPtoVR': 0.5**0.5, 'VRtoVP': 2**0.5}
if unitChange_str in unitChange_factors:
self.amplitude = amp * unitChange_factors[unitChange_str]
@property
def frequency(self):
'''Sets or return the output frequency in Hz'''
return self.query_float('FREQ?')
@frequency.setter
def frequency(self, vFreq):
self.write('FREQ %0.6f' % vFreq)
@property
def offset(self):
'''Sets or return the output offset in volts'''
return self.query_float('OFFS?')
@offset.setter
def offset(self, vOffset):
self.write('OFFS %0.2f' % vOffset)
@property
def loadImpedance(self):
'''
Sets or return the output source impedance mode
"HighZ" or "50 Ohm"
'''
val = self.query('TERM?')
return {'1': 'HighZ', '0': '50 Ohm'}[val]
@loadImpedance.setter
def loadImpedance(self, vTerm):
term_str = {'HighZ': '1', '50 Ohm': '0'}.get(vTerm, '1')
self.write('TERM %s' % term_str)
@property
def syncOutput(self):
'''
Return the sync output state or sets it to "ON" or "OFF"
'''
val = self.query('SYNC?')
return {'1': 'ON', '0': 'OFF'}[val]
@syncOutput.setter
def syncOutput(self, vSync):
sync_str = {'ON': '1', 'OFF': '0'}.get(vSync, '1')
self.write('SYNC %s' % sync_str)
@property
def function(self):
val = self.query('FUNC?')
return {'0': 'Sine', '1': 'Square', '2': 'Triange',
'3': 'Ramp', '4': 'Noise'}[val]
@function.setter
def function(self, vFunct):
'''
Sets or return the output function
"Sine", "Square", "Triange", "Ramp" or "Noise"
'''
funct_str = {'Sine': '0', 'Square': '1', 'Triange': '2',
'Ramp': '3', 'Noise': '4'}.get(vFunct, '0')
self.write('FUNC %s' % funct_str)
def Display(self, show_funct='Amp'):
'''
Changes de hardware dysplay to show:
"Amplitude" ('Amp'), "Frequency" (Freq) or "Offset" ('Offs')
'''
dps_str = {'Amplitude': '2', 'Frequency': '1', 'Offset': '3',
'Amp': '2', 'Freq': '1', 'Offs': '3'}.get(show_funct, '2')
self.write('KEYS %s' % dps_str)
| mit | 5,892,871,699,193,613,000 | 29.681481 | 77 | 0.549734 | false |
PALab/pyjamaseis | pyjamaseis/pyjamaseisv2.0.py | 1 | 89157 | #=================================================================================================================
# Structure of PyjAmaseis
#
# - IMPORTS
# - STATION INFORMATION USER INTERFACE CODE (class myFrame4)
# - SECONDARY OPTIONS UI WINDOW CODE (class selectionWindow)
# - CODE FOR FRAME WHICH APPEARS AT BOTTOM OF PLOTTING WINDOW (class lowerFrame)
# - DATA SELECTION AND EXTRACTION CODE (class dataHandler)
# - INITIALIZATION OF PLOTTING CODE (class Plotting)
# - CLASS FOR HANDLING TK FRAMES WHICH MUST BE IN MAIN THREAD (class mFrame)
# - alignToBottomRight Function - aligns secondary window to bottom right hand corner of screen
# - secondaryWindow Function - creates the Options window
# - Collecting Function - collects and process data read from the TC1
# - plotPrevious Function - loads and plots pre-recorded data
# - saveHourData Function - saves data recorded by TC1
# - getSerialPort Function - finds the serial port the TC1 is connected to
# - serial_ports Functions - returns all active serial ports
# - initializeHeader Function used to create a header object for headers in a SAC object
# - plotData - called by the Plotting function to plot data
# - calculateYAxisLabels - creates 24 hour UTC labels for the y axis, these are saved in an array
# - calculateYAxisLabelsOneHour - creates y axis labels for the current hour in UTC divided into 5 minute sections
# - xAxisLabels Function - Creates the labels which appear on the x axis of teh plottting window
# - window_close Function - causes the collecting and plotting processes to stop before closing windows
# - directory_handler Function - checks for a directory or creates a new one
# - getHourData Function - looks for an loads previously recorded data
# - if __name__ == '__main__': - the is where the code starts
#
#=================================================================================================================
### Importing all required libraries for running PyjAmaseis
### v1.0 change: The cross-platform screenshot module pyscreenshot is imported instead of the PIL module ImageGrab
### which is Windows-only. Tkinter messagebox is also imported.
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import sys
import platform
import Tkinter as tk
import tkMessageBox as msgbx
import time as Time
import serial
from obspy import read, Trace, UTCDateTime
from obspy.core.stream import Stream
from obspy.core import AttribDict
from obspy.core.trace import Stats
import datetime as dt
from datetime import datetime
from decimal import *
from multiprocessing import Queue
import pyscreenshot
from threading import Thread
import wx
from pygeocoder import Geocoder
import os
import errno
import glob
import fileinput
import pycurl
import base64
#### Initial window presented to user when launching PyjAmaseis for the first time
#### This window will require the user to enter the station information which will be later used when saving SAC files
#### Class was auto generate by using wxGlade
class MyFrame4(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame4.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.iconFile = "icons/icon.ico"
self.icon = wx.Icon(self.iconFile, wx.BITMAP_TYPE_ICO)
self.SetIcon(self.icon)
self.bitmap_1 = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap("logo.gif", wx.BITMAP_TYPE_ANY))
self.label_4 = wx.StaticText(self, wx.ID_ANY, ("Station Information\n"))
self.label_6 = wx.StaticText(self, wx.ID_ANY, ("Station ID:"))
self.text_ctrl_2 = wx.TextCtrl(self, wx.ID_ANY, "")
self.label_7 = wx.StaticText(self, wx.ID_ANY, ("Station Name:"))
self.text_ctrl_3 = wx.TextCtrl(self, wx.ID_ANY, "")
self.label_8 = wx.StaticText(self, wx.ID_ANY, ("Street Address:"))
self.text_ctrl_4 = wx.TextCtrl(self, wx.ID_ANY, "")
self.label_5 = wx.StaticText(self, wx.ID_ANY, ("Geographic Coordinates\n"))
self.label_9 = wx.StaticText(self, wx.ID_ANY, ("Longitude:"))
self.text_ctrl_6 = wx.TextCtrl(self, wx.ID_ANY, "")
self.label_10 = wx.StaticText(self, wx.ID_ANY, ("Latitude:"))
self.text_ctrl_7 = wx.TextCtrl(self, wx.ID_ANY, "")
self.label_11 = wx.StaticText(self, wx.ID_ANY, ("Elevation:"))
self.text_ctrl_8 = wx.TextCtrl(self, wx.ID_ANY, "")
self.panel_1 = wx.Panel(self, wx.ID_ANY)
self.button_2 = wx.Button(self, wx.ID_ANY, ("Begin"))
self.__set_properties()
self.__do_layout()
# end wxGlade
self.Bind(wx.EVT_BUTTON, self.begin, id = self.button_2.Id)
self.Bind(wx.EVT_TEXT, self.checkAddress, id = self.text_ctrl_4.Id)
def checkAddress(self, e):
## This method makes calls to the get the geo coordinates of the entered address in the - street address field
try:
results = Geocoder.geocode(self.text_ctrl_4.GetValue())
longitude, latitude = results[0].coordinates
self.text_ctrl_6.SetValue(str(longitude))
self.text_ctrl_7.SetValue(str(latitude))
self.text_ctrl_8.SetValue(str(0.0))
except:
pass
def begin(self, e):
#### Station Information entered is saved into text file, everytime application is loaded,
#### the information stored in this file will be read and saved in memory for use when saving SAC files -
#### this information goes into the header files of SAC
#writing user entered information to text file
file = open("Station Information.txt", "w")
file.write("Station ID:"+self.text_ctrl_2.GetValue()+"\n")
file.write("Station Name:"+self.text_ctrl_3.GetValue()+"\n")
file.write("Station Address:"+self.text_ctrl_4.GetValue()+"\n")
file.write("Latitude:"+self.text_ctrl_6.GetValue()+"\n")
file.write("Longitude:"+self.text_ctrl_7.GetValue()+"\n")
file.write("Elevation:"+self.text_ctrl_8.GetValue()+"\n")
file.write("DCShift:0"+"\n")
file.close()
self.Close()
#close and exit mainloop
def __set_properties(self):
# begin wxGlade: MyFrame4.__set_properties
self.SetTitle("PyjAmaseis v1.0")
self.SetSize((804, 456))
self.SetBackgroundColour(wx.Colour(255, 255, 255))
self.SetForegroundColour(wx.Colour(0, 0, 0))
self.label_4.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD, 1, ""))
self.label_5.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD, 1, ""))
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame4.__do_layout
#--- Initial GUI setup. Creates a grid and button layout functionalities ---
sizer_10 = wx.BoxSizer(wx.HORIZONTAL)
sizer_11 = wx.BoxSizer(wx.VERTICAL)
sizer_4 = wx.BoxSizer(wx.VERTICAL)
sizer_7 = wx.BoxSizer(wx.HORIZONTAL)
sizer_6 = wx.BoxSizer(wx.HORIZONTAL)
sizer_5 = wx.BoxSizer(wx.HORIZONTAL)
sizer_12 = wx.BoxSizer(wx.VERTICAL)
sizer_15 = wx.BoxSizer(wx.HORIZONTAL)
sizer_14 = wx.BoxSizer(wx.HORIZONTAL)
sizer_13 = wx.BoxSizer(wx.HORIZONTAL)
sizer_10.Add(self.bitmap_1, 0, 0, 0)
sizer_11.Add(self.label_4, 0, wx.LEFT | wx.TOP | wx.EXPAND, 5)
sizer_13.Add(self.label_6, 1, wx.LEFT | wx.EXPAND, 5)
sizer_13.Add(self.text_ctrl_2, 2, wx.RIGHT, 5)
sizer_12.Add(sizer_13, 1, wx.EXPAND, 0)
sizer_14.Add(self.label_7, 1, wx.LEFT | wx.EXPAND, 5)
sizer_14.Add(self.text_ctrl_3, 2, wx.RIGHT | wx.ALIGN_CENTER_HORIZONTAL, 5)
sizer_12.Add(sizer_14, 1, wx.EXPAND, 0)
sizer_15.Add(self.label_8, 1, wx.LEFT | wx.EXPAND, 5)
sizer_15.Add(self.text_ctrl_4, 2, wx.RIGHT, 5)
sizer_12.Add(sizer_15, 1, wx.EXPAND, 0)
sizer_11.Add(sizer_12, 1, wx.EXPAND, 0)
sizer_11.Add(self.label_5, 0, wx.LEFT | wx.TOP | wx.EXPAND, 5)
sizer_5.Add(self.label_9, 1, wx.LEFT, 5)
sizer_5.Add(self.text_ctrl_6, 2, wx.RIGHT | wx.EXPAND, 5)
sizer_4.Add(sizer_5, 1, wx.EXPAND, 0)
sizer_6.Add(self.label_10, 1, wx.LEFT, 5)
sizer_6.Add(self.text_ctrl_7, 2, wx.RIGHT | wx.EXPAND, 5)
sizer_4.Add(sizer_6, 1, wx.EXPAND, 0)
sizer_7.Add(self.label_11, 1, wx.LEFT, 5)
sizer_7.Add(self.text_ctrl_8, 2, wx.RIGHT | wx.EXPAND, 5)
sizer_4.Add(sizer_7, 1, wx.EXPAND, 0)
sizer_11.Add(sizer_4, 1, wx.EXPAND, 0)
sizer_11.Add(self.panel_1, 1, wx.EXPAND, 0)
sizer_11.Add(self.button_2, 1, wx.RIGHT | wx.TOP | wx.BOTTOM | wx.EXPAND | wx.ALIGN_RIGHT, 5)
sizer_10.Add(sizer_11, 1, wx.EXPAND, 0)
self.SetSizer(sizer_10)
self.Layout()
self.Centre()
# end wxGlade
# end of class MyFrame4
#### This class represents the secondary options window that is launching when the real time plotting of data begins
#### Signals are sent over a secondary queue that listens for when the user wants to change between a 24 Hour plot to a 1 hour plot
#### A Y Shift is also signaled to shift the graph up or down on the y axis
#### Class was auto generate by using wxGlade
class selectionWindow(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: SecondaryWindow.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, None, wx.ID_ANY, "", style= wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX)
self.iconFile = "icons/icon.ico"
self.icon = wx.Icon(self.iconFile, wx.BITMAP_TYPE_ICO)
self.SetIcon(self.icon)
#For Plotting Options
self.label_2 = wx.StaticText(self, wx.ID_ANY, ("Plotting Options: "), style=wx.ALIGN_LEFT)
self.button_3 = wx.Button(self, wx.ID_ANY, ("24 Hour Plotting"))
self.panel_3 = wx.Panel(self, wx.ID_ANY)
self.button_4 = wx.Button(self, wx.ID_ANY, ("1 Hour Plotting"))
self.panel_4 = wx.Panel(self, wx.ID_ANY)
self.spin_button_1 = wx.SpinButton(self, wx.ID_ANY , style=wx.SP_VERTICAL)
self.label_1 = wx.StaticText(self, wx.ID_ANY, ("Graph Shift"), style=wx.ALIGN_CENTRE)
self.panel_5 = wx.Panel(self, wx.ID_ANY)
#For dividing lines
self.div_lin1 = wx.StaticLine(self, -1, size=(3,210),style=wx.LI_VERTICAL)
#For Data Options
self.dat_label = wx.StaticText(self, wx.ID_ANY, ("Data Options: "), style=wx.ALIGN_LEFT)
self.extract_button = wx.Button(self, wx.ID_ANY, ("Extract Data"))
self.extract_button.Disable()
self.dataAccess = None
self.hourData = None
self.extrSaveOnly = wx.RadioButton(self, wx.ID_ANY, label='Save Selection',style=wx.RB_GROUP)
self.extrDisplayOnly = wx.RadioButton(self, wx.ID_ANY, label='Display Selection')
self.extrBoth = wx.RadioButton(self, wx.ID_ANY, label='Save and Display\nSelection')
self.display_button = wx.Button(self, wx.ID_ANY, ("Display Data\n from File"))
#Bindings of buttons and boxes
self.Bind(wx.EVT_BUTTON, self.twentyFourHourPlot, id = self.button_3.Id)
self.Bind(wx.EVT_BUTTON, self.oneHourPlot, id = self.button_4.Id)
self.Bind(wx.EVT_SPIN_UP, self.graphMoveUp, id = self.spin_button_1.Id)
self.Bind(wx.EVT_SPIN_DOWN, self.graphMoveDown, id = self.spin_button_1.Id)
self.Bind(wx.EVT_BUTTON, self.extractData, id = self.extract_button.Id)
self.Bind(wx.EVT_BUTTON, self.displayData, id = self.display_button.Id)
self.Bind(wx.EVT_CLOSE, self.doNothingIfExitButtonPressed)
self.Bind(wx.EVT_MAXIMIZE, self.doNothingIfExitButtonPressed)
self.__set_properties()
self.__do_layout()
# end wxGlade
def doNothingIfExitButtonPressed(self,e):
a=5
def close(self):
self.Destroy()
def twentyFourHourPlot(self, e):
#Send signal via queue 2 to the collecting process to inform the plotting process to re adjust the axis to show 24 hour
queue2.put("24-Hour-Plot")
def oneHourPlot(self, e):
#Send signal via queue 2 to the collecting process to inform the plotting process to re adjust the axis to show 1 hour
queue2.put("1-Hour-Plot")
def graphMoveUp(self, e):
#Send signal via queue 2 to the collecting process to change the dcshift value
queue2.put("UP")
def graphMoveDown(self, e):
#Send signal via queue 2 to the collecting process to change the dcshift value
queue2.put("DOWN")
#This method is the main method which handles collecting and saving a region of data which has been selected by dataHandler. It is invoked
#when Extract Data button is pressed. It gets the start and end times of the selection, finds how many hours are included, builds a list of directories
#where this data exists (or gets it from hourSeismicData via the 'now' marker), then puts all this data in an array which is saved in a .sac file
#Note that this method only supports selection intervals which include up to a maximum of 24 hours.
def extractData(self, e):
global stationId, mainWin
if self.dataAccess != None:
start = self.dataAccess.initialTime
end = self.dataAccess.endTime
interval = end[1]-start[1]
if interval < 0:
interval = interval+24
interval += 1 #Total number of hours selected (where an hour is counted even if only part of it is selected)
directoryList = []
for hour in range(int(start[1]), int(start[1]+interval)):
if hour < 24:
year, month, day = start[0].year, start[0].month, start[0].day
else:
year, month, day, hour = end[0].year, end[0].month, end[0].day, hour-24
timeTuple = (int(year), int(month), int(day), int(hour))
if len(str(hour)) < 2:
hour = '0'+str(hour)
if len(str(day)) < 2:
day = '0'+str(day)
if len(str(month)) < 2:
month = '0'+str(month)
directory = [timeTuple, stationId+'/'+str(year)+'/'+str(month)+'/'+str(day)+'/'+str(year)[-2:]+str(month)+str(day)+str(hour)+stationId+'.sac']
directoryList.append(directory)
now = datetime.utcnow()
for i in range(len(directoryList)):
if not os.path.exists(directoryList[i][1]):
if (end[0].year, end[0].month, end[0].day, end[1]) == (now.year, now.month, now.day, now.hour):
directoryList[i][1] = 'now'
else:
msgbx.showerror("Error", "Some or all of the selected time\ndoes not have recorded data. Please\nselect a region of time which has\ncontinuous data.")
return
elif directoryList[i][0] == (int(now.year), int(now.month), int(now.day), int(now.hour)):
directoryList[i][1] = directoryList[i][1] + 'now'
hourSeisDat, hourTime = self.hourData[0], self.hourData[1]
extrxtData, tot_time = np.array([], dtype=np.float64), 0
for i in range(len(directoryList)):
if i == 0:
if directoryList[i][1][-3:] != 'now':
trace = read(pathname_or_url = directoryList[0][1], format = 'SAC')
trace = trace.pop(0)
trace_dat = trace.data
extrxtData = np.concatenate((extrxtData, trace_dat[int(start[2]*len(trace_dat)):]))
tot_time += 3600-start[2]*3600
else:
total_time = hourTime.minute*60+hourTime.second+hourTime.microsecond/1000000.0
start_index = int(start[2]*3600/total_time*len(hourSeisDat))
end_index = int(end[2]*3600/total_time*len(hourSeisDat))
print 'Ind', start_index, end_index
if len(directoryList[i][1]) > 3:
trace = read(pathname_or_url = directoryList[0][1][:-3], format = 'SAC')
trace = trace.pop(0)
hourSeisDat = np.concatenate((trace.data, hourSeisDat))
extrxtData = np.concatenate((extrxtData, hourSeisDat[start_index:end_index]))
tot_time += (end[2]-start[2])*3600
elif i != len(directoryList)-1:
trace = read(pathname_or_url = directoryList[i][1], format = 'SAC')
trace = trace.pop(0)
trace_dat = trace.data
extrxtData = np.concatenate((extrxtData, trace_dat[:]))
tot_time += 3600
elif i == len(directoryList)-1:
if directoryList[i][1][-3:] != 'now':
trace = read(pathname_or_url = directoryList[i][1], format = 'SAC')
trace = trace.pop(0)
trace_dat = trace.data
extrxtData = np.concatenate((extrxtData, trace_dat[:int(end[2]*len(trace_dat))]))
else:
total_time = hourTime.minute*60+hourTime.second+hourTime.microsecond/1000000.0
end_index = int(end[2]*3600/total_time*len(hourSeisDat))
if len(directoryList[i][1]) > 3:
trace = read(pathname_or_url = directoryList[0][1][:-3], format = 'SAC')
trace = trace.pop(0)
hourSeisDat = np.concatenate((trace.data, hourSeisDat))
extrxtData = np.concatenate((extrxtData, hourSeisDat[:end_index]))
tot_time += end[2]*3600
latitude, longitude, elevation = self.hourData[2][0], self.hourData[2][1], self.hourData[2][2]
sampling_rate = len(extrxtData)/tot_time
stats = initializeHeader(longitude, latitude , elevation, start[0])
stats.npts = len(extrxtData)
stats.sampling_rate = sampling_rate
stats.delta = 1/sampling_rate
st = Stream(Trace(data=extrxtData, header=stats))
self.dataAccess.dataDeselector('resize')
if self.extrSaveOnly.GetValue() or self.extrBoth.GetValue():
filename = self.file_dialog('save', start[0], end[0])
st.write(filename, format='SAC')
if self.extrDisplayOnly.GetValue() or self.extrBoth.GetValue():
queue3.put(st)
tkframes.data_ready()
#Method for handling the file saving dialog box when data is extracted (13/01/16)
def file_dialog(self, mode, start=None, end=None):
if mode == 'save':
start = str(start.year)+'-'+str(start.month)+'-'+str(start.day)+'-'+str(start.hour)+'.'+str(start.minute)+'.'+str(round(start.second,2))
end = str(end.year)+'-'+str(end.month)+'-'+str(end.day)+'-'+str(end.hour)+'.'+str(end.minute)+'.'+str(round(end.second,2))
fileBrowser = wx.FileDialog(self, 'Select Location to Save Data', os.path.expanduser('~'), start+'_to_'+end+'.sac', 'SAC files (*.sac)|*.sac', wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
elif mode == 'open':
fileBrowser = wx.FileDialog(self, 'Select Data File to Display', os.path.expanduser('~'), '', 'SAC files (*.sac)|*.sac', wx.FD_OPEN)
fileBrowser.ShowModal()
path = fileBrowser.GetPath()
if mode == 'save' and path[-4:] != '.sac':
path += '.sac'
return path
def displayData(self, e=None):
pathName = self.file_dialog('open')
stream = read(pathname_or_url = pathName, format = 'SAC')
queue3.put(stream)
tkframes.data_ready()
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle(("Options"))
self.SetSize((325, 240))
self.SetBackgroundColour(wx.Colour(240, 240, 240))
self.panel_3.SetBackgroundColour(wx.Colour(240, 240, 240))
self.panel_4.SetBackgroundColour(wx.Colour(240, 240, 240))
self.panel_5.SetBackgroundColour(wx.Colour(240, 240, 240))
self.label_2.SetFont(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.dat_label.SetFont(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.label_1.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
self.extrSaveOnly.SetFont(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.extrDisplayOnly.SetFont(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.extrBoth.SetFont(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
#Main Sizer
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
#Plotting Options
sizer_2 = wx.BoxSizer(wx.VERTICAL)
sizer_2a = wx.BoxSizer(wx.HORIZONTAL)
sizer_2b = wx.BoxSizer(wx.HORIZONTAL)
sizer_2.Add((4,6), 0, wx.EXPAND, 0)
sizer_2a.Add(self.label_2, 1, wx.ALIGN_CENTER_VERTICAL, 8)
sizer_2.Add(sizer_2a, 0, wx.LEFT, 9)
sizer_2.Add((4,10), 0, wx.EXPAND, 0)
sizer_2.Add(self.button_3, 1, wx.LEFT | wx.RIGHT | wx.EXPAND, 8)
sizer_2.Add(self.panel_3, 1, wx.EXPAND, 0)
sizer_2.Add(self.button_4, 1, wx.LEFT | wx.RIGHT | wx.EXPAND, 8)
sizer_2.Add(self.panel_4, 1, wx.EXPAND, 0)
sizer_2b.Add(self.spin_button_1, 2, wx.LEFT | wx.EXPAND, 20)
sizer_2b.Add(self.label_1, 4, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_2.Add(sizer_2b, 1, wx.EXPAND, 0)
sizer_2.Add(self.panel_5, 1, wx.EXPAND, 0)
#First dividing line
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_3.Add(self.div_lin1, 1, wx.ALIGN_CENTER_VERTICAL, 0)
#Data Options
sizer_4 = wx.BoxSizer(wx.VERTICAL)
sizer_4a = wx.BoxSizer(wx.HORIZONTAL)
sizer_4.Add((4,6), 0, wx.EXPAND, 0)
sizer_4a.Add(self.dat_label, 1, wx.ALIGN_CENTER_VERTICAL, 8)
sizer_4.Add(sizer_4a, 0, wx.LEFT, 3)
sizer_4.Add((4,6), 0, wx.EXPAND, 0)
sizer_4.Add(self.extrSaveOnly, 0, wx.LEFT | wx.RIGHT, 0)
sizer_4.Add(self.extrDisplayOnly, 0, wx.LEFT | wx.RIGHT, 0)
sizer_4.Add(self.extrBoth, 0, wx.LEFT | wx.RIGHT, 0)
sizer_4.Add((4,5), 0, wx.EXPAND, 0)
sizer_4.Add(self.extract_button, 0, wx.LEFT | wx.RIGHT | wx.EXPAND, 5)
sizer_4.Add((4,20), 0, wx.EXPAND, 0)
sizer_4.Add(self.display_button, 1, wx.LEFT | wx.RIGHT | wx.EXPAND, 10)
#Putting everything in main sizer
sizer_1.Add((4,1), 0, wx.EXPAND, 0)
sizer_1.Add(sizer_2, 5, wx.RIGHT | wx.EXPAND, 0)
sizer_1.Add(sizer_3, 1, wx.RIGHT | wx.EXPAND, 0)
sizer_1.Add(sizer_4, 5, wx.RIGHT, 2)
sizer_1.Add((4,1), 0, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
### Class to handle the frame which appears at the bottom of the main plotting window. New v2.0 (18/12/15).
class lowerFrame():
def __init__(self, master):
bckgrnd = '#E6E6E6'
self.frame = tk.Frame(master, bd=1, relief='sunken', bg=bckgrnd)
time_label = tk.Label(self.frame, text='Current Time: ', bg=bckgrnd)
time_label.pack(side='left', pady=1, padx=4)
self.currentLabel = tk.Label(self.frame, text="", bg=bckgrnd)
self.currentLabel.pack(side='left', pady=1)
self.mouselocLabel = tk.Label(self.frame, text=" "*20, bg=bckgrnd)
self.mouselocLabel.pack(side='right', pady=1, padx=4)
loc_time_label = tk.Label(self.frame, text='Time at mouse location: ', bg=bckgrnd)
loc_time_label.pack(side='right', pady=1)
self.mode = "24-Hour-Plot" #Changed in plotData when mode is changed. Makes it easy to tell mode when mouse_move is called.
self.firstHour = datetime.utcnow()
##Function to display the time at the mouse location on the plot. THis is called when the mouse is moved over the plot (see mpl_connect binding of fig in Plotting). (18/12/15)
def mouse_move(self, event, graph_constant):
x_pos, y_pos, time = event.xdata, event.ydata, False
if x_pos and y_pos and self.mode == "24-Hour-Plot":
hour = 23-int(((y_pos-32750)+graph_constant/2)//graph_constant)
hour = hour+self.firstHour.hour
if hour > 23:
hour = hour-24
if (y_pos+graph_constant-32750) < (graph_constant/2):
hour = hour-1
elif (y_pos+graph_constant-32750) > graph_constant*24+(graph_constant/2):
hour = hour+1
minute = int(x_pos)
second = round((x_pos%1)*60, 4)
time = True
elif x_pos and y_pos and self.mode == "1-Hour-Plot":
hour = self.firstHour.hour
minute = int(x_pos)+(11-int(((y_pos-32750)+graph_constant/2)//graph_constant))*5
if (y_pos+graph_constant-32750) < (graph_constant/2):
minute = minute-5
elif (y_pos+graph_constant-32750) > graph_constant*12+(graph_constant/2):
minute = minute+5
second = round((x_pos%1)*60, 4)
time = True
if time:
hour, minute, second = str(hour), str(minute), str(second)
if int(hour) < 10:
hour = '0'+hour
if int(minute) < 10:
minute = '0'+minute
if float(second) < 10:
second = '0'+second
if len(str(second)) < 7:
second = second + '0'*(7-len(second))
time = hour+':'+minute+':'+second
self.mouselocLabel.config(text=time)
if not x_pos and not y_pos:
self.mouselocLabel.config(text='Not Available')
### Class responsible for data selection and extraction and associated bindings (05/01/16)
class dataHandler():
def __init__(self, fig, ax, graphHeightConst, mode_getter_class):
self.fig = fig
self.canvas = fig.canvas
self.ax = ax
self.pressId = self.canvas.mpl_connect('button_press_event', self.dataSelector)
self.graphHeightConst = graphHeightConst
self.mode_getter = mode_getter_class
self.activeSelection=False
self.hourAccess = None
self.hourData = None
self.displayItems = None
def dataSelector(self, event):
global options_window
if event.button == 1:
x_dat_pos, y_dat_pos = event.xdata, event.ydata
x_pixel, y_pixel = event.x, event.y #Measured from bottom left hand corner of TkAgg Canvas.
if x_dat_pos and y_dat_pos:
self.mode = mode = self.mode_getter.mode
data_buffer = self.data_buffer(y_dat_pos)
self.initialTime = self.findTime(x_dat_pos, y_dat_pos, data_buffer)
now, then = datetime.utcnow(), self.initialTime[0]
if then < now:
self.activeSelection=True
options_window.extract_button.Enable()
bbox = self.ax.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted())
width, height = bbox.width*self.fig.dpi, bbox.height*self.fig.dpi
if mode == "24-Hour-Plot":
self.frame_height = height/25
self.plot_width = width/60
self.mode_no = 60
elif mode == "1-Hour-Plot":
self.frame_height = height/13
self.plot_width = width/5
self.mode_no = 5
self.window_height = self.canvas._tkcanvas.winfo_height()
self.originalx = x_pixel
self.original_xdat = x_dat_pos
self.originaly = self.window_height-y_pixel-(self.frame_height/self.graphHeightConst*data_buffer) #self.frame_height/self.graphHeightConst is pixels/data
self.initialTime = self.findTime(x_dat_pos, y_dat_pos, data_buffer)
self.drawFrame()
self.moveId = self.canvas.mpl_connect('motion_notify_event', self.resizeFrame)
self.releaseId = self.canvas.mpl_connect('button_release_event', self.dataExtractor)
#Function to find the times which correspond to the ends of the selection area
def findTime(self, x_dat_pos, y_dat_pos, data_buffer):
time = []
if self.mode == "24-Hour-Plot":
hour = 23-((y_dat_pos+data_buffer-self.graphHeightConst/4-32750)//self.graphHeightConst)+self.hourAccess.firstHour.hour
date = self.hourAccess.firstHour
if hour>23:
hour = hour-24
date = self.hourAccess.firstHour + dt.timedelta(days=1)
minute = (int(x_dat_pos)*60+(x_dat_pos%1*60))/3600 #Decimal fraction of hour where initial selection is
time.append(datetime(date.year, date.month, date.day, int(hour), int(minute*60), int(minute*3600)-int(minute*60)*60, int(minute*3600000000)-int(minute*3600)*1000000))
time.append(hour)
time.append(minute)
elif self.mode == "1-Hour-Plot":
minute = ((11-((y_dat_pos+data_buffer-self.graphHeightConst/4-32750)//self.graphHeightConst))*5*60+(x_dat_pos//1*60+(x_dat_pos%1*60)))/3600 #Decimal fraction of hour where initial selection is
date = self.hourAccess.firstHour
time.append(datetime(date.year, date.month, date.day, date.hour, int(minute*60), int(minute*3600)-int(minute*60)*60, int(minute*3600000000)-int(minute*3600)*1000000))
time.append(self.hourAccess.firstHour.hour)
time.append(minute)
return time
#This function is the primary function for matching the selection area to where the mouse is moved. The selection area is drawn from 1-pixel thick
#frames. There is either four frames (for one line selections), or eight (for multiple line selection--extras are initiated and included in self.extra_frames)
#Several refernce points are established in the make_extra_frames function (the x-pixel for the left of the plot, and the left and right distances to the
#edge of the plot from the original position where the selection was first started). As the mouse is moved, the frames are configured (in size and position).
def resizeFrame(self, event):
x_pixel, y_pixel = event.x, event.y
x_dat_pos, y_dat_pos, newy = event.xdata, event.ydata, None
x_win, y_win = mainWin.winfo_rootx(), mainWin.winfo_rooty()
if y_dat_pos:
newy = self.window_height-y_pixel-(self.frame_height/self.graphHeightConst*self.data_buffer(y_dat_pos))
if self.findTime(x_dat_pos, y_dat_pos, self.data_buffer(y_dat_pos))[0] < datetime.utcnow():
if x_dat_pos and self.originaly < self.window_height-y_pixel < self.originaly+self.frame_height: #For selection of one line of the trace only
self.currentEndy = y_dat_pos
self.currentEndx = x_dat_pos
self.clear_extras()
self.leftVert.config(height = self.frame_height+1)
self.rightVert.config(height = self.frame_height+1)
if x_pixel > self.originalx:
self.rightVert.place_configure(x = x_pixel, y = self.originaly)
self.topHoriz.config(width = x_pixel-self.originalx)
self.botHoriz.config(width = x_pixel-self.originalx)
self.botHoriz.place_configure(anchor = 'nw', y=self.originaly+self.frame_height,x=self.originalx)
self.topHoriz.place_configure(anchor = 'nw', x=self.originalx, y=self.originaly)
elif x_pixel < self.originalx:
self.rightVert.place_configure(x = x_pixel, y = self.originaly)
self.topHoriz.config(width = self.originalx-x_pixel)
self.botHoriz.config(width = self.originalx-x_pixel)
self.botHoriz.place_configure(anchor = 'ne', y=self.originaly+self.frame_height, x=self.originalx)
self.topHoriz.place_configure(anchor = 'ne', y=self.originaly, x=self.originalx)
elif x_dat_pos and (self.mode=='24-Hour-Plot' and 32750-self.graphHeightConst/2<y_dat_pos<32750+self.graphHeightConst*24-self.graphHeightConst/2)\
or (self.mode=='1-Hour-Plot' and 32750-self.graphHeightConst/2<y_dat_pos<32750+self.graphHeightConst*12-self.graphHeightConst/2): #For selection of multiple lines of the trace.
try:
if self.extra_frames:
pass
except:
self.extra_frames = self.make_extra_frames()
self.currentEndy = y_dat_pos
self.currentEndx = x_dat_pos
side_height = abs(self.originaly-newy)
frames = self.extra_frames
self.leftVert.config(height = self.frame_height) #Height of verticals has to be reduced by one for an unknown reason
self.rightVert.config(height = self.frame_height)
if newy > self.originaly:
self.rightVert.place_configure(x = x_pixel, y = newy)
self.topHoriz.config(width = self.to_right_width)
self.botHoriz.config(width = self.to_left_width+(x_pixel-self.originalx))
self.botHoriz.place_configure(anchor = 'nw', y = newy+self.frame_height-1, x = self.left_of_plot)
self.topHoriz.place_configure(anchor = 'nw', x=self.originalx, y=self.originaly)
frames[2].config(width = self.to_left_width)
frames[3].config(width = self.to_right_width-(x_pixel-self.originalx), bg = 'red')
frames[0].config(height=side_height), frames[1].config(height=side_height)
frames[0].place_configure(anchor = 'nw', x = self.left_of_plot, y = self.originaly+self.frame_height)
frames[1].place_configure(anchor = 'ne', x = self.left_of_plot+self.mode_no*self.plot_width, y = self.originaly)
frames[2].place_configure(anchor = 'nw', x = self.left_of_plot, y = self.originaly+self.frame_height-1)
frames[3].place_configure(anchor = 'nw', x = x_pixel, y = newy)
elif newy < self.originaly:
self.rightVert.place_configure(x = x_pixel, y = newy)
self.topHoriz.config(width = self.to_right_width-(x_pixel-self.originalx))
self.botHoriz.config(width = self.to_left_width)
self.botHoriz.place_configure(anchor = 'ne', y=self.originaly+self.frame_height-1, x=self.originalx)
self.topHoriz.place_configure(anchor = 'ne', y = newy, x = self.left_of_plot+self.mode_no*self.plot_width)
frames[2].config(width = self.to_left_width+(x_pixel-self.originalx), bg = 'red')
frames[3].config(width = self.to_right_width, bg = 'red')
frames[0].config(height=side_height), frames[1].config(height=side_height)
frames[0].place_configure(anchor = 'nw', x = self.left_of_plot, y = newy+self.frame_height)
frames[1].place_configure(anchor = 'ne', x = self.left_of_plot+self.mode_no*self.plot_width, y = newy)
frames[2].place_configure(anchor = 'nw', x = self.left_of_plot, y = newy+self.frame_height-1)
frames[3].place_configure(anchor = 'nw', x = self.originalx, y = self.originaly)
def clear_extras(self):
try:
for widget in self.extra_frames:
tkframes.destroy(widget)
del self.extra_frames
except:
pass
def make_extra_frames(self):
self.to_left_width = int(self.original_xdat*self.plot_width)
self.to_right_width = int((self.mode_no-self.original_xdat)*self.plot_width)
self.left_of_plot = np.ceil(self.originalx-self.original_xdat*self.plot_width)
left_vert = tkframes.Frame(height = self.frame_height, width = 1, bg = 'red')
right_vert = tkframes.Frame(height = self.frame_height, width = 1, bg = 'red')
top_to_left = tkframes.Frame(height = 1, width = 1, bg = 'red')
bottom_to_right = tkframes.Frame(bg = 'red', height = 1, width = 1)
bottom_to_right.place(), top_to_left.place(), right_vert.place(), left_vert.place()
return (left_vert, right_vert, top_to_left, bottom_to_right)
#Function which handles the mouse release event after the data area has been selected. From here, extraction and saving is handled in selectionWindow.
def dataExtractor(self, event):
global options_window
if event.button == 1:
self.canvas.mpl_disconnect(self.pressId)
self.canvas.mpl_disconnect(self.moveId)
self.deselectId = self.canvas.mpl_connect('button_press_event', self.dataDeselector)
self.canvas.mpl_disconnect(self.releaseId)
self.endTime = self.findTime(self.currentEndx, self.currentEndy, self.data_buffer(self.currentEndy))
#Now that start and end times are established (the aim of this class), the data can be extracted and saved or displayed by the extracting and
#saving functions in the selectionWindow class, which contains the button to initiate this task.
#To clear selection on click (or otehr call such as resize or changing mode)
def dataDeselector(self, event):
global options_window, tkframes
if self.activeSelection and (event == 'resize' or event.button == 1):
self.clear_extras()
try:
for widget in self.selection_frame:
tkframes.destroy(widget)
self.canvas.mpl_disconnect(self.deselectId)
self.pressId = self.canvas.mpl_connect('button_press_event', self.dataSelector)
except AttributeError:
print 'Attribute Error Occurred'
self.activeSelection = False
options_window.extract_button.Disable()
#Function to initiate the four 1-pixel width frames which make up the selection area. (Note extra frames initiated in make_extra_frames when required)
def drawFrame(self):
global tkframes
self.topHoriz = tkframes.Frame(height = 1, width = 3, bg = 'red')
self.botHoriz = tkframes.Frame(height = 1, width = 3, bg = 'red')
self.leftVert = tkframes.Frame(height = self.frame_height+1, width = 1, bg = 'red')
self.rightVert = tkframes.Frame(height = self.frame_height+1, width = 1, bg = 'red')
self.topHoriz.place(x = self.originalx, y = self.originaly)
self.topHoriz.place(x = self.originalx, y = self.originaly)
self.leftVert.place(x = self.originalx, y = self.originaly)
self.botHoriz.place(x = self.originalx, y = self.originaly+self.frame_height-1)
self.rightVert.place(x = self.originalx+3, y = self.originaly)
self.selection_frame = (self.topHoriz, self.botHoriz, self.leftVert, self.rightVert)
#For finding the difference (in terms of yaxis height, not pixels) between the clicked location and the next highest mid-point between traces.
def data_buffer(self, y_dat_pos):
if y_dat_pos:
data_buffer = (np.ceil((y_dat_pos-32750)*2/self.graphHeightConst)*(self.graphHeightConst/2))
mode = self.mode
if data_buffer < 0:
data_buffer = self.graphHeightConst/2
elif mode == "24-Hour-Plot" and y_dat_pos > 23*self.graphHeightConst+32750:
data_buffer = 23*self.graphHeightConst+self.graphHeightConst/2
elif mode == "1-Hour-Plot" and y_dat_pos > 11*self.graphHeightConst+32750:
data_buffer = 11*self.graphHeightConst+self.graphHeightConst/2
elif data_buffer/self.graphHeightConst%1==0:
data_buffer = data_buffer+self.graphHeightConst/2
data_buffer = data_buffer+32750-y_dat_pos
return data_buffer
return None
###Initialization fo the main plotting window. This is a class which is called from the __main__ thread so that the mainloop() is in the main thread.
###Previously, this was the target for the plottingProcess thread, but the current architecture achieves the same plotting functionality (via root.after)
###while allowing for the mainloop of the plotting window to be in the main thread.
class Plotting():
def __init__(self, queue, queue2):
global mainWin, plotting_loop, options_window
looping = True
while looping:
if not queue.empty():
value = queue.get()
if value == "Start Plotting Process":
looping = False
elif not plotting_loop:
looping = False
if plotting_loop: #In case program has been closed before now (i.e. if no TC1 connected and user has selected to exit).
timeNow = datetime.time(datetime.now())
time = timeNow.minute + (timeNow.second + timeNow.microsecond/1000000.0)/60.0
lastX = time
lastY = 90250
connect = True
step = 0
x=[]
y=[]
mode = "24-Hour-Plot"
self.root = tk.Tk()
mainWin = self.root
mainWin.protocol("WM_DELETE_WINDOW", window_close) #Closes options window and ends processes. New in v2.0.
mainWin.wm_title("PyAmaseis v1.0")
### v1.0 change: Conditional added. .ico not supported on Linux. zoomed not
### supported on linux.
if platform.system() == 'Linux':
mainWin.iconbitmap(r'@icons/icon1.xbm')
else:
mainWin.iconbitmap(r'icons/icon.ico')
mainWin.wm_state('zoomed')
graphHeightConst = 2500
fig = plt.figure(figsize=(13,9)) #15,10
# v1.0 change: AttributeError: 'Figure' object has no attribute 'set_tight_layout' on Linux
if platform.system() != 'Linux':
fig.set_tight_layout(0.4)
ax = fig.add_subplot(1,1,1)
ax.set_xlim(0,60)
ax.set_ylim(30250,92750)
ax.set_xlabel('Minute')
ax.set_ylabel('Hour (UTC)')
yAxis = [30250,92750]
y1 = (np.arange(min(yAxis), max(yAxis)+1,graphHeightConst))
y2 = calculateYAxisLabels()
ax = xAxisLabels(ax, 24)
plt.yticks(y1, y2)
ax.yaxis.grid(color = '#0000FF', linestyle = '-')
ax.set_axisbelow(True)
line, = ax.plot(x, y, color='k')
canvas = FigureCanvasTkAgg(fig, master=mainWin)
canvas._tkcanvas.config(highlightthickness=0)
bottomFrame = lowerFrame(mainWin)
bottomFrame.frame.pack(side='bottom', expand=1, fill = tk.BOTH)
canvas._tkcanvas.pack(side=tk.TOP, expand=1, fill = tk.BOTH)
canvas.draw()
dataInteractive = dataHandler(fig, ax, graphHeightConst, bottomFrame)
options_window.dataAccess = dataInteractive
dataInteractive.hourAccess = bottomFrame
self.displayItems = None
dataInteractive.displayItems = self.displayItems
fig.canvas.mpl_connect('motion_notify_event', lambda event: bottomFrame.mouse_move(event, graphHeightConst))
mainWin.update_idletasks()
geometry = mainWin.geometry()
geometry = geometry[:geometry.find('+')]
mainWin.after(0, plotData,queue, queue2, fig, ax, canvas, bottomFrame, mainWin, lastY, lastX, connect, line, mode, geometry, dataInteractive)
###Any tk Frames used in this program must originate from the __main__ thread. Hence, this class, which is only called from the __main__ thread, initiates a
###list of tk frames that can be used from other threads but still have their mainloops in the __main__ thread. The frames are mostly used in dataHandler.
class mFrame(tk.Frame):
def __init__(self, queue3, root):
tk.Frame.__init__(self)
self.max_no = 20
self.frames = []
self.root = root
for i in range(self.max_no):
self.frames.append(tk.Frame(mainWin))
self.frame_index = 0
self.queue3 = queue3
self.figureCount = 0
self.windows = []
def Frame(self, **kwargs):
frame = self.frames[self.frame_index]
self.frame_index+=1
frame.config(**kwargs)
return frame
def destroy(self, widget):
widget.destroy()
index = self.frames.index(widget)
del self.frames[index]
self.frames.append(tk.Frame(mainWin))
self.frame_index = self.frame_index-1
def data_ready(self):
self.current_data = queue3.get()
self.plot()
def plot(self):
if self.figureCount < 3:
self.figureCount += 1
window = tk.Toplevel(master=self.root)
window.lower()
self.windows.append(window)
window.protocol("WM_DELETE_WINDOW", lambda: self.toplevel_close(window))
if platform.system() == 'Linux':
mainWin.iconbitmap(r'@icons/icon1.xbm')
else:
mainWin.iconbitmap(r'icons/icon.ico')
window.title('Data Display')
fig = matplotlib.figure.Figure()
start = str(self.current_data[0].stats['starttime'])
end = str(self.current_data[0].stats['endtime'])
fig.suptitle("Data Extraction: "+start[:start.find('T')]+', '+start[start.find('T')+1:-1]+'\nto '+end[:end.find('T')]+', '+end[end.find('T')+1:-1])
ax = fig.add_subplot(1,1,1)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
canvas = FigureCanvasTkAgg(fig, master=window)
toolbarFrame = tk.Frame(window)
toolbar = NavigationToolbar2TkAgg(canvas, toolbarFrame)
toolbarFrame.pack(side=tk.BOTTOM, expand=1, fill = tk.BOTH)
canvas._tkcanvas.pack(side=tk.TOP, expand=1, fill = tk.BOTH)
self.current_data.plot(fig=fig)
window.lift()
else:
msgbx.showinfo("Maximum Reached", "The maximum number of data displays has been reached. Close an open data display before proceeding.")
def toplevel_close(self, window):
deleted = False
for i in range(len(self.windows)):
if not deleted and self.windows[i] == window:
self.windows[i].destroy()
del self.windows[i]
deleted = True
self.figureCount = self.figureCount-1
#### This method aligns the Options window to the bottom right hand corner of the screen so it doesn't come in the way of plotting
def alignToBottomRight(win):
dw, dh = wx.DisplaySize()
w, h = win.GetSize()
x = dw - w
y = dh - h
win.SetPosition((x-20, y-65))
#### This method creates the Options window
def secondaryWindow(queue2, queue3):
global options_window #New in v2.0.
app = wx.App(False)
options_window = selectionWindow()
app.SetTopWindow(options_window)
alignToBottomRight(options_window)
options_window.Show()
options_window.Raise()
app.MainLoop()
#### This is the Collecting method (Thread) responsible for reading data from the TC1, sending this data via a queue to plotting thread/method, saving data into SAC, listening to commands from Options window, and uploading SAC files to NZSeis server after saving them
def Collecting(queue, queue2, queue3):
global collecting_loop, stationId, options_window
#Stats header information initialization
stationId = 01
stationName = 'Unknown'
stationAddress = 'Unknown'
longitude = 0.0
latitude = 0.0
elevation = 0.0
dcShift = 0
oldDCShift = 0
#Check if user has already entered Station information, if yes, then go straight into 24 hour live plotting, if no create the initial station information input window
if(os.path.exists('Station Information.txt') == False):
app = wx.App(False)
frame_5 = MyFrame4(None, wx.ID_ANY, "")
app.SetTopWindow(frame_5)
frame_5.Center()
frame_5.Show()
app.MainLoop()
else:
pass
#Once user has entered the station information and that information is saved into a txt file, it is read line by line by the following lines of code and is parsed to extract the data required or header information
file = open("Station Information.txt", "r")
informationArray = file.readlines()
for line in informationArray:
if "Station ID" in line:
stationId = line[line.find(":")+1:line.find("\n")]
if "Station Name" in line:
stationName = line[line.find(":")+1:line.find("\n")]
if "Station Address" in line:
stationAddress = line[line.find(":")+1:line.find("\n")]
if "Longitude" in line:
longitude = line[line.find(":")+1:line.find("\n")]
if "Latitude" in line:
latitude = line[line.find(":")+1:line.find("\n")]
if "Elevation" in line:
elevation = line[line.find(":")+1:line.find("\n")]
if "DCShift" in line:
dcShift = int(line[line.find(":")+1::])
oldDCShift = int(line[line.find(":")+1::])
file.close()
#initializing further required variables
mode = "None"
currentMode = "24Hour"
graphHeightConst = 2500 #distance between each 1 hour plot on the 24 hour plot
totalHoursConst = 23 #used to decrement the hour so that once the plot reaches the end of 24 hours the plot is cleared and plotting starts from the top
skipConst = 1 #currently not used, but in place to skip reading values coming in from the TC1 - eg. if it is 2, then it will read every second value
count = 0
lastHour = datetime.time(datetime.now()).hour
hasHourChanged = False
plotLimit = graphHeightConst*7
goldenNumber = 32750 #the center line of each plot, where it oscillates - used to fix y axis according to this (32750 - graphHeightConstant which gives lower limit + graphHeightConstant * 25 (or how many ever hours gives upper limit))
upperLim = 36000 #the top limit of each plot
lowerLim = 28000 #bottom limit of each plot
plotClear = False
#hourMillisecondData = np.array([], dtype = np.float64)
tempMillisecond = np.array([], dtype = np.float64)
serialNumber = None
serialPort = None
#Returns the serialPort that the TC1 is connected to
serialPort = getSerialPort()
#This while loop ensures user has connected the TC1 before continuing
while serialPort == None:
redundantRoot = tk.Tk() #Parent for error dialog to display on top of. This is done so it can then be hidden and destroyed.
redundantRoot.withdraw()
yes_or_no = msgbx.askokcancel(message="Please Connect TC-1 Seismometer", title="Error", parent=redundantRoot)
redundantRoot.destroy()
if yes_or_no:
serialPort = getSerialPort()
else:
window_close(True)
return
serialPort = serial.Serial(serialPort)
serialPort.flushInput()
serialPort.flushOutput()
#The following two lines create the secondary options window
secondaryWindowProcess = Thread(target= secondaryWindow, args=(queue2,queue3,))
secondaryWindowProcess.start()
queue.put("Start Plotting Process")
#create a stats object that holds all the station information retrieved from the txt file
stats = initializeHeader(longitude, latitude , elevation)
hourSeismicData, stats = getHourData(stats) #stores the data from the hour, populated with data from previous recordings in the hour or zeroes
hourTimeData = np.array([], dtype = np.float64)
tempSeismicData = np.array([]) #used to store 18 value read from the tc1 and sent is sent to the plotting array, then cleared for next 18 values
queue.put(['prev', hourSeismicData, currentMode, 'None', graphHeightConst, dcShift, skipConst, stats]) #bad idea. change this becasue it will take too long, and the length of the data array will be too short by the time collecting process is started.
while collecting_loop:
try:
#Checks whether the user has changed the view selection in the options window from 24 hour to 1 hour or has increased or decreased the graphShift
if(queue2.empty() == False):
readingQueue2 = queue2.get()
if readingQueue2 == "24-Hour-Plot":
currentMode = "24Hour"
now = Time.time()
queue.put(['prev', hourSeismicData, currentMode, '24-Hour-Plot', graphHeightConst, dcShift, skipConst, stats])
totalHoursConst = 23
tempSeismicData = np.array([])
tempMillisecond = np.array([])
if readingQueue2 == "1-Hour-Plot":
currentMode = "1Hour"
now = Time.time()
queue.put(['prev', hourSeismicData, currentMode, '1-Hour-Plot', graphHeightConst, dcShift, skipConst, stats])
tempSeismicData = np.array([])
tempMillisecond = np.array([])
if readingQueue2 == "UP":
tempSeismicData = np.array([])
tempMillisecond = np.array([])
dcShift += 100
for line in fileinput.input('Station Information.txt', inplace=True):
print line.replace('DCShift:'+str(oldDCShift), 'DCShift:'+str(dcShift)),
oldDCShift = dcShift
if readingQueue2 == "DOWN":
tempSeismicData = np.array([])
tempMillisecond = np.array([])
dcShift -= 100
#Every time the user changes the graphshift - the value in against the graphShift header in the StationInformation.txt file is updated
for line in fileinput.input('Station Information.txt', inplace=True):
print line.replace('DCShift:'+str(oldDCShift), 'DCShift:'+str(dcShift)),
oldDCShift = dcShift
#Read from the TC1 seismometer.
#Causes problems if seismometer not connected properly or if python is run multiple times?? (09/12/15). See exception handler below.
reading = int(serialPort.readline())
timeNow = datetime.time(datetime.now())
time = timeNow.minute + (timeNow.second + timeNow.microsecond/1000000.0)/60.0
hourTime = timeNow.minute*60+timeNow.second + timeNow.microsecond/1000000.0
hour = timeNow.hour
plotClear = False
if currentMode == "24Hour":
#Depending on the hour and viewMode which is 24 or 1 hour plotting, the data value that is read is translated to the appropriate height
data = [int(reading+(graphHeightConst*totalHoursConst))+dcShift]
if currentMode == "1Hour":
minute = (datetime.time(datetime.now())).minute
if minute < 5:
data = [int(reading+(graphHeightConst*11))+dcShift]
if minute < 10 and minute >= 5:
data = [int(reading+(graphHeightConst*10))+dcShift]
if minute < 15 and minute >= 10:
data = [int(reading+(graphHeightConst*9))+dcShift]
if minute < 20 and minute >= 15:
data = [int(reading+(graphHeightConst*8))+dcShift]
if minute < 25 and minute >= 20:
data = [int(reading+(graphHeightConst*7))+dcShift]
if minute < 30 and minute >= 25:
data = [int(reading+(graphHeightConst*6))+dcShift]
if minute < 35 and minute >= 30:
data = [int(reading+(graphHeightConst*5))+dcShift]
if minute < 40 and minute >= 35:
data = [int(reading+(graphHeightConst*4))+dcShift]
if minute < 45 and minute >= 40:
data = [int(reading+(graphHeightConst*3))+dcShift]
if minute < 50 and minute >= 45:
data = [int(reading+(graphHeightConst*2))+dcShift]
if minute < 55 and minute >= 50:
data = [int(reading+(graphHeightConst*1))+dcShift]
if minute < 60 and minute >= 55:
data = [int(reading+(graphHeightConst*0))+dcShift]
if (hour != lastHour):
## Everytime the hour changes the following code saves hour long SAC Files
lastHour = hour
fileName, stats, directory = saveHourData(stats, hourSeismicData, stationId,longitude, latitude , elevation)
hourSeismicData = np.array([])
hourTimeData = np.array([], dtype = np.float64)
##Uploads SAC file right after creating it
contentType = "application/octet-stream" #image/png
c = pycurl.Curl()
c.setopt(c.URL, 'https://nzseis.phy.auckland.ac.nz/pyjamaseis/upload/')
c.setopt(c.HTTPHEADER, ['Authorization:'+'Basic %s' % base64.b64encode("kofi:pyjamaseis")])
c.setopt(c.HTTPPOST, [("payload",(c.FORM_FILE, directory+fileName, c.FORM_CONTENTTYPE, contentType)), ("mode","sac")])
try:
c.perform()
c.close()
except pycurl.error, error:
errno, errstr = error
print 'An error occurred: ', errstr
totalHoursConst = totalHoursConst-1
if(totalHoursConst == -1):
plotClear = True
totalHoursConst = 23
hasHourChanged = True
if ((count % skipConst == 0) or hasHourChanged):
if ((tempSeismicData.size >= 18) or hasHourChanged):
##After every 18 values are read from the TC1 seismometer, the array containing these values along with the tempMillisecond array which contains the exact time the value was read put on the queue for the plotting process to read
queue.put([tempSeismicData, tempMillisecond, hasHourChanged, plotClear, mode])
mode = "None"
#the arrays are cleared
tempSeismicData = np.array([])
tempMillisecond = np.array([])
hasHourChanged = False
options_window.hourData = (hourSeismicData, datetime.utcnow(), (latitude,longitude,elevation))
else:
if currentMode == "1Hour":
tempSeismicData = np.append(tempSeismicData,data)
if time < 5:
tempMillisecond = np.append(tempMillisecond,time)
elif time < 10:
tempMillisecond = np.append(tempMillisecond,time - 5)
elif time < 15:
tempMillisecond = np.append(tempMillisecond,time - 10)
elif time < 20:
tempMillisecond = np.append(tempMillisecond,time - 15)
elif time < 25:
tempMillisecond = np.append(tempMillisecond,time - 20)
elif time < 30:
tempMillisecond = np.append(tempMillisecond,time - 25)
elif time < 35:
tempMillisecond = np.append(tempMillisecond,time - 30)
elif time < 40:
tempMillisecond = np.append(tempMillisecond,time - 35)
elif time < 45:
tempMillisecond = np.append(tempMillisecond,time - 40)
elif time < 50:
tempMillisecond = np.append(tempMillisecond,time - 45)
elif time < 55:
tempMillisecond = np.append(tempMillisecond,time - 50)
elif time < 60:
tempMillisecond = np.append(tempMillisecond,time - 55)
hourSeismicData = np.append(hourSeismicData,reading)
hourTimeData = np.append(hourTimeData, hourTime)
else:
tempSeismicData = np.append(tempSeismicData,data)
tempMillisecond = np.append(tempMillisecond,time)
hourSeismicData = np.append(hourSeismicData,reading)
hourTimeData = np.append(hourTimeData, hourTime)
count += 1
except:
#Exception handler for seismometer connection error mentioned above. (09/12/15)
exc_type = sys.exc_info()[0]
if str(exc_type).find('SerialException') != -1:
msgbx.showerror("Error", "PyjAmaSeis has detected a seismometer connection error.\nPlease exit PyjAmaSeis and reconnect seismometer.")
window_close()
else:
print exc_type
queue.put((stats, hourSeismicData, stationId, longitude, latitude , elevation, hourTimeData)) #saves data when program closes.
return
##This function is responsible for plotting data whcih is pre-loaded and has not been read from the seismometer in real-time. (11/12/15)
def plotPrevious(hour_data=None, currentMode=None, mode=None, graphHeightConst=None, dcShift=None, skipConst=None, stats=None):
data_array = hour_data
delta = stats['delta']
if currentMode == "24Hour":
data_array = data_array+(graphHeightConst*23+dcShift)
time_array = np.arange(0,len(data_array))*delta/60
queue.put([data_array, time_array, False, False, mode])
if currentMode == "1Hour":
tot_length = 0
for i in range(12):
i = i+1
if ((i)*300/delta) <= len(data_array):
data = np.array(data_array[tot_length:int(((i)*300/delta))])+(graphHeightConst*(12-i))+dcShift
time_array = np.arange(0,5,delta/60) #Want one less than 5
if len(time_array) == len(data)+1:
time_array = time_array[:len(data)]
if tot_length == 0:
queue.put([data, time_array, False, False, "1st-1-Hour-Plot"])
else:
queue.put([data, time_array, False, False, mode])
tot_length += len(data)
elif ((i-1)*300/delta) <= len(data_array):
data = np.array(data_array[int(((i-1)*300/delta)):])+(graphHeightConst*(12-i))+dcShift
if i != 1:
time_array = np.arange(0,(len(data_array)-tot_length))*delta/60
else:
time_array = np.arange(0,len(data_array))*delta/60
mode = "1st-1-Hour-Plot"
if len(time_array) == len(data)+1:
print len(time_array), len(data)
time_array = time_array[:len(data)]
queue.put([data, time_array, False, False, mode])
##This function (newv2.0) saves the seismic data from the hour. (11/12/15)
def saveHourData(stats, hourSeismicData, stationId, longitude, latitude , elevation):
now = UTCDateTime()
diff = now-stats['starttime']
sampling_rate = len(hourSeismicData)/diff
delta = 1/sampling_rate
stats['ntps'] = len(hourSeismicData)
stats['sampling_rate'] = sampling_rate
stats['delta'] = delta
st = Stream([Trace(data=hourSeismicData, header=stats)])
print 'Start:', stats['starttime'], 'End:', now, 'Length:', len(hourSeismicData)
sacdateAndTime = str(stats['starttime']).split('T')
sacdate = sacdateAndTime[0].split('-')
sactime = sacdateAndTime[1].split(':')
sacyear = sacdate[0][2:]
sacmonth = sacdate[1]
sacday = sacdate[2]
sachour = sactime[0]
fileName = str(sacyear+sacmonth+sacday+sachour+stats['station']+".sac") #v1.0 change. Removed minute from filename.
directory = stationId+'/'+str(sacdate[0])+'/'+sacmonth+'/'+sacday+'/'
directory_handler(directory)
st.write(directory+fileName, format='SAC')
stats = initializeHeader(longitude, latitude , elevation)
return fileName, stats, directory
#### This method gets all the active usb ports and selects the port that the TC1 is connected to by doing property comparisons that are unique to the TC1 connected port
def getSerialPort():
try:
activePorts = serial_ports()
for port in activePorts:
serialPort = serial.Serial(port)
if (serialPort.baudrate == 9600):
if (serialPort.parity == 'N'):
if (serialPort.timeout == None):
if (serialPort.xonxoff == False):
if platform.system() == 'Linux': #new v2.0. TC1 will be a /dev/ttyACM* port on linux.
if serialPort.port.find('/dev/ttyACM') != -1:
serialPort.close()
return port
else:
serialPort.close()
return port
#if(serialPort.inWaiting() != 0):
# return port
except:
print("Device not found")
#### Method Returns all active usb ports
def serial_ports():
"""Lists serial ports
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of available serial ports
"""
if sys.platform.startswith('win'):
ports = ['COM' + str(i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this is to exclude your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
#### Initializes the Header information for the SAC File
def initializeHeader(longitude, latitude , elevation, start=None):
global stationId
stats = Stats()
stats.network = 'RU'
stats.station = stationId
stats.location = latitude+'N.'+longitude+'E'
stats.channel = ' '
stats._format = 'SAC'
stats.calib = 1.0
stats.sampling_rate = 18.7647228241 #This is just a preliminary value (for get_hour_data). This is changed before saving with stats as header.
if start:
stats.starttime = UTCDateTime(start)
else:
#starttime in stats is no longer the current time this function is called, but the start of the current hour (11/12/15)
time = str(datetime.utcnow())
year, month, day = time.split('-')[0], time.split('-')[1], time.split('-')[2].split()[0] #utcnow() in form of 2015-12-10 03:21:24.769079
hour = time.split()[1].split(':')[0]
start = UTCDateTime(int(year),int(month),int(day),int(hour),0,0)
stats.starttime = UTCDateTime(start)
return stats
###Plotting process responsible for plotting data sent from the Collecting process, also responsible for managing Plotting window, and changing and refreshing the axis from 24 hour to 1 hour plots, This process saves screenshots of plot after ever hour and Uploads to NZSeis server
### This method receives several input parameters such as queue figure axis... the queue is read for arrays of values sent by the collecting process
### This data is then plotted according to the plot selection (24 or 1 hour) on the ax object
### This method is also responsible for managing the connectivity between the lines drawn
def plotData(queue, queue2, fig, ax, canvas, bottomFrame, root, lastY, lastX, connect, line, mode, geometry, dataInteractive):
global plotting_loop, options_window, mainWin
#Embedded callback function (also see code below) to make sure previously recorded data is plotted after the window has been resized. (17/12/15)
def resize(root, geometry, mode):
root.update_idletasks()
new_geometry = root.geometry()
new_geometry = new_geometry[:new_geometry.find('+')] #Only concerned about when window is resized, not moved. (18/12/15)
if new_geometry != geometry:
dataInteractive.dataDeselector('resize') #Must be in this if statement
queue2.put(mode)
return new_geometry
if(queue.empty() == False):
#read the arrays and values sent by the collecting process. If _continue is changed to False if this gets a call to plot previous data.
values, _continue = queue.get(), True
geometry = resize(root, geometry, mode)
##
if values[4] == "24-Hour-Plot": #Only when data is put in queue by plotPrevious (15/12/15)
dataInteractive.dataDeselector('resize')
connect = True
lastX = 0
lastY = 0
mode = "24-Hour-Plot" #This variable is local to plotData and is not the same as mode in Collecting (that's values[4])
bottomFrame.mode = "24-Hour-Plot"
bottomFrame.firstHour = datetime.utcnow()
graphHeightConst = 2500
ax.cla()
ax.set_xlim(0,60)
ax.set_ylim(30250,92750)
ax.set_xlabel('Minute')
ax.set_ylabel('Hour (UTC)')
yAxis = [30250,92750]
y1 = (np.arange(min(yAxis), max(yAxis)+1,graphHeightConst))
y2 = calculateYAxisLabels()
ax = xAxisLabels(ax, 24)
plt.yticks(y1, y2)
ax.yaxis.grid(color = '#0000FF', linestyle = '-')
ax.set_axisbelow(True)
canvas.draw()
if values[4] == "1-Hour-Plot" or values[4] == "1st-1-Hour-Plot": #Only when data is put in queue by plotPrevious (15/12/15)
dataInteractive.dataDeselector('resize')
connect = True
lastX = 0
lastY = 0
mode = "1-Hour-Plot" #This variable is local to plotData and is not the same as mode in Collecting (that's values[4])
bottomFrame.mode = "1-Hour-Plot"
bottomFrame.firstHour = datetime.utcnow()
if values[4] == "1st-1-Hour-Plot":
values[4] = "1-Hour-Plot"
graphHeightConst = 2500
ax.cla()
ax.set_xlim(0,5)
ax.set_ylim(30250,62750)
ax.set_xlabel('Minute')
ax.set_ylabel('Hour (UTC)')
yAxis = [30250,62750]
y1 = (np.arange(min(yAxis), max(yAxis)+1,graphHeightConst))
y2 = calculateYAxisLabelsOneHour()
ax = xAxisLabels(ax, 1)
plt.yticks(y1, y2)
ax.yaxis.grid(color = '#0000FF', linestyle = '-')
ax.set_axisbelow(True)
canvas.draw()
if values[0] == 'prev':
plotPrevious(*values[1:])
_continue = False #Don't continue executing function
##
if _continue:
y = values[0]
x = values[1]
#The following if statement and its content are incharge of inserting the last value of the the previous array to the front of the new array so the line would start from the last point to get connectivity between each line drawn
if(values[0].size != 0 and mode == "1-Hour-Plot" and values[4] != "1-Hour-Plot"):
if(lastX != 0 and lastY != 0):
y = np.insert(y, 0, lastY)
x = np.insert(x, 0, lastX)
lastY = values[0][-1]
lastX = values[1][-1]
for value in x:
if value > 4.998 or ((value > 4.9) and (str(datetime.utcnow()).split(':')[1] == '00')): #Addition to conditional to prevent probelems if the plotting of the last set is actually slightly after the hour has changed. (10/12/15)
lastX = 0
lastY = 0
x = np.array([])
y = np.array([])
#The following if statement and its content are incharge of inserting the last value of the the previous array to the front of the new array so the line would start from the last point to get connectivity between each line drawn
if (connect == True and mode == "24-Hour-Plot"):
if(lastX != 0 and lastY != 0):
y = np.insert(y, 0, lastY)
x = np.insert(x, 0, lastX)
if (values[0].size != 0 and mode == "24-Hour-Plot"):
lastY = values[0][-1]
lastX = values[1][-1]
#print 'Last:', lastY, lastX
if (values[2] == True and mode == "24-Hour-Plot"):
timestamp = open('timestamp.txt', 'a')
connect = False
# calculating time for the screenshot name when saving it
# v1.0 change: pyscreenshot.grab_to_file used instead of ImageGrab.grab().save()
now = str(datetime.utcnow())
now2 = now.split(' ',1 )
now3 = now2[1].split(':',1)
now3 = int(now3[0])-1
if (now3 == -1):
now3 = 23
name = str(now2[0]+'-'+str(now3)+".png")
timestamp.write(str(now2[0]+'-'+str(now3)))
timestamp.close()
yr_mnth_day = now2[0].split('-')
directory = stationId+'/'+yr_mnth_day[0]+'/'+yr_mnth_day[1]+'/'+yr_mnth_day[2]+'/'
directory_handler(directory)
#New Conditional v2.0. Screenshots causing problems with X server on ubuntu.
if platform.system() != 'Linux':
pyscreenshot.grab_to_file(directory+now2[0]+'-'+str(now3)+".png")
#upload image to NZSeis server - using the password and user name - kofi:pyjamaseis
contentType = 'image/png'
c = pycurl.Curl()
c.setopt(c.URL, 'https://nzseis.phy.auckland.ac.nz/pyjamaseis/upload/')
c.setopt(c.HTTPHEADER, ['Authorization:'+'Basic %s' % base64.b64encode("kofi:pyjamaseis")])
c.setopt(c.HTTPPOST, [("payload",(c.FORM_FILE, name, c.FORM_CONTENTTYPE, contentType)), ("mode","image")])
try:
c.perform()
c.close()
except pycurl.error, error:
errno, errstr = error
print 'An error occurred: ', errstr
else:
connect = True
if (values[2] == True and mode == "1-Hour-Plot"):
timestamp = open('timestamp.txt', 'a')
# calculating time for the screenshot name when saving it
# v1.0 change: pyscreenshot.grab_to_file used instead of ImageGrab.grab().save()
now = str(datetime.utcnow())
now2 = now.split(' ',1 )
now3 = now2[1].split(':',1)
now3 = int(now3[0])-1
if (now3 == -1):
now3 = 23
name = str(now2[0]+'-'+str(now3)+".png")
timestamp.write(str(now2[0]+'-'+str(now3)))
timestamp.close()
yr_mnth_day = now2[0].split('-')
directory = stationId+'/'+yr_mnth_day[0]+'/'+yr_mnth_day[1]+'/'+yr_mnth_day[2]+'/'
directory_handler(directory)
#New Conditional v2.0. Screenshots causing problems with X server on ubuntu.
if platform.system() != 'Linux':
pyscreenshot.grab_to_file(directory+now2[0]+'-'+str(now3)+".png")
#upload image to NZSeis server - using the password and user name - kofi:pyjamaseis
contentType = 'image/png'
c = pycurl.Curl()
c.setopt(c.URL, 'https://nzseis.phy.auckland.ac.nz/pyjamaseis/upload/')
c.setopt(c.HTTPHEADER, ['Authorization:'+'Basic %s' % base64.b64encode("kofi:pyjamaseis")])
c.setopt(c.HTTPPOST, [("payload",(c.FORM_FILE, name, c.FORM_CONTENTTYPE, contentType)), ("mode","image")])
try:
c.perform()
c.close()
except pycurl.error, error:
errno, errstr = error
print 'An error occurred: ', errstr
graphHeightConst = 2500
dataInteractive.dataDeselector('resize')
bottomFrame.firstHour = datetime.utcnow()
ax.cla()
ax.set_xlim(0,5)
ax.set_ylim(30250,62750)
ax.set_xlabel('Minute')
ax.set_ylabel('Hour (UTC)')
yAxis = [30250,62750]
y1 = (np.arange(min(yAxis), max(yAxis)+1,graphHeightConst))
y2 = calculateYAxisLabelsOneHour()
ax = xAxisLabels(ax, 1)
plt.yticks(y1, y2)
ax.yaxis.grid(color = '#0000FF', linestyle = '-')
ax.set_axisbelow(True)
canvas.draw()
fig.canvas.mpl_connect('motion_notify_event', lambda event: bottomFrame.mouse_move(event, graphHeightConst))
x = np.array([])
y = np.array([])
##
#Get the current time to display on the main plotting window
now = str(datetime.utcnow())
now1 = now.split('.',1)
timeNow = now1[0]+' - UTC'
bottomFrame.currentLabel.configure(text=timeNow) #sets the time as a label on the plot
if(values[3] == True and mode == "24-Hour-Plot"):
graphHeightConst = 2500
dataInteractive.dataDeselector('resize')
ax.cla()
ax.set_xlim(0,60) #05/01/16
ax.set_ylim(30250,92750)
ax.set_xlabel('Minute')
ax.set_ylabel('Hour (UTC)')
yAxis = [30250,92750]
y1 = (np.arange(min(yAxis), max(yAxis)+1,graphHeightConst))
y2 = calculateYAxisLabels()
ax = xAxisLabels(ax, 24)
plt.yticks(y1, y2)
ax.yaxis.grid(color = '#0000FF', linestyle = '-')
ax.set_axisbelow(True)
line, = ax.plot(x, y, color='k')
canvas.draw()
fig.canvas.mpl_connect('motion_notify_event', lambda event: bottomFrame.mouse_move(event, graphHeightConst))
x = np.array([])
y = np.array([])
line.set_data(x,y)
ax.draw_artist(line)
canvas.blit(ax.bbox) #Makes motion_notify events much faster. If this is tabbed in 2, then motion_notify events only update every second. Hopefully no adverse memory effects. (09/01/16)
if plotting_loop:
root.after(0, plotData,queue, queue2, fig, ax, canvas, bottomFrame, root, lastY, lastX, connect, line, mode, geometry, dataInteractive)
### Calculates labels required to represent the y axis for a 24 hour plot
def calculateYAxisLabels():
#24 hour labels
yaxislabels = []
#Gets current hour and generates an array containing values of the following 24 hours
now = str(datetime.utcnow())
now = now.split(' ',1)
now = now[1].split(':',1)
d = datetime.strptime(now[0], "%H")
d = str(d.strftime("%I %p")).split(' ',1)
currentHour = int(d[0])
ampm = str(" "+d[1])
hourAfter = currentHour + 1
hourAfterAmPm = ampm
if hourAfter == 12:
if(hourAfterAmPm == ' AM'):
hourAfterAmPm = ' PM'
else:
hourAfterAmPm = ' AM'
if hourAfter == 13:
hourAfter = 1
yaxislabels.append(str(currentHour)+ampm)
while currentHour != hourAfter or ampm != hourAfterAmPm:
yaxislabels.append(str(hourAfter)+ hourAfterAmPm)
hourAfter += 1
if hourAfter == 12:
if(hourAfterAmPm == ' AM'):
hourAfterAmPm = ' PM'
else:
hourAfterAmPm = ' AM'
if hourAfter == 13:
hourAfter = 1
yaxislabels.append('')
return yaxislabels[::-1]
### Calculates labels required to represent the y axis for a 1 hour plot
def calculateYAxisLabelsOneHour():
#24 hour labels
yaxislabels = []
#Gets current hour and generates an array containing values of that hour divided into 5 minute sections
now = str(datetime.utcnow())
now = now.split(' ',1)
now = now[1].split(':',1)
d = datetime.strptime(now[0], "%H")
d = str(d.strftime("%I %p")).split(' ',1)
start = 00
currentHour = int(d[0])
for i in range(0, 12):
if(start<10):
yaxislabels.append(str(currentHour)+':0'+str(start))
else:
yaxislabels.append(str(currentHour)+':'+str(start))
start = start+5
yaxislabels.append('')
return yaxislabels[::-1]
## Function to find the labels for the x axis and draw grid.
def xAxisLabels(ax, mode):
if mode == 24:
x_list = []
for i in range(61): #(17/12/15)
if i%5 == 0:
x_list.append('+'+str(i))
else:
x_list.append('')
ax.set_xticks(np.arange(0,61,5))
ax.set_xticks(np.arange(0,61,1), minor=True)
ax.set_xticklabels([':00',':05',':10',':15',':20',':25',':30',':35',':40',':45',':50',':55',''])
ax.set_xticklabels(['']*61, minor=True)
ax.xaxis.grid(which = 'minor', color = '#7DCEA0', linestyle = ':')
ax.xaxis.grid(which = 'major', color = '#51bd80', linestyle = ':')
ax.xaxis.set_tick_params(labeltop='on')
return ax
elif mode == 1:
x_list = []
for i in range(31): #(17/12/15)
if i%6 == 0:
x_list.append('+'+str(i/6))
else:
x_list.append('')
ax.set_xticks(np.arange(0,6,1))
ax.set_xticks(np.arange(0,5.1,0.1666666666666666666666666666666), minor=True)
ax.set_xticklabels(['+0','+1','+2','+3','+4','+5'])
ax.set_xticklabels(['']*31, minor=True)
ax.xaxis.grid(which = 'minor', color = '#7DCEA0', linestyle = ':')
ax.xaxis.grid(which = 'major', color = '#51bd80', linestyle = ':')
ax.xaxis.set_tick_params(labeltop='on')
return ax
###Function to define what occurs when the main plotting window is closed. This is taken as exiting PyjAmaseis, so all windows and processes are ended. (07/12/15)
def window_close(condition=False):
global plotting_loop, collecting_loop, mainWin, options_window
plotting_loop, collecting_loop = False, False
if not condition: #Condition is True if program has not yet fully started (TC1 not connected error dialog exit press)
options_window.close()
mainWin.quit()
##Function (new v2.0) to support the new file saving system. Tries to make directory, and if directory already exists, ignores the exception raised. All other exceptions are reported. (09/12/15)
def directory_handler(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
##Function to populate hourSeismicData array with any previous readings in that hour before readings start.
def getHourData(stats):
sampling_rate = stats['sampling_rate']
time = str(datetime.utcnow())
year, month, day = time.split('-')[0], time.split('-')[1], time.split('-')[2].split()[0] #utcnow() in form of 2015-12-10 03:21:24.769079
hour = time.split()[1].split(':')[0]
filename = year[2:]+month+day+hour+stationId+'.sac'
directory = stationId+'/'+year+'/'+month+'/'+day+'/'+filename
if not os.path.exists(directory): #returns an array with appropriate number of zeroes since beginning of hour
hour_seconds = (datetime(int(year),int(month),int(day),int(hour),0,0) - datetime(1970,1,1)).total_seconds()
number_of_zeroes = int((Time.time()-hour_seconds)*sampling_rate)
return np.array([32750]*number_of_zeroes), stats
else: #returns array with data previously recorded in that hour
trace = read(pathname_or_url = directory, format = 'SAC')
trace = trace.pop(0)
data = trace.data
hour_seconds = (datetime(int(year),int(month),int(day),int(hour),0,0) - datetime(1970,1,1)).total_seconds()
number_of_zeroes = int((Time.time()-hour_seconds)*sampling_rate)-len(data)
return np.append(data, [32750]*number_of_zeroes), stats
### Main Method, this is where the application starts - 2 queues are created for passing data between these threads, and 2 process are created one for collecting the data and the other for plotting it
if __name__ == '__main__':
global collecting_loop, plotting_loop, options_window, tkframes #(09/12/15)
#Create 2 queues, one is used for communication between the collecting and plotting thread, the second is used between the collecting process and options window to send the selection information that the user does
queue = Queue()
queue2 = Queue()
queue3 = Queue()
#Create 2 threads
collecting_loop, plotting_loop = True, True
collectionProcess = Thread(target= Collecting, args=(queue,queue2,queue3,))
#Making threads daemons so that the program closes when processes in them stop (09/12/15).
collectionProcess.daemon = True
##Starting everything
collectionProcess.start()
#Previously, Plotting was the target for the plotting thread (plottingProcess, v1.0). This has been changed (14/01/16), as TkInter does not behave well when
#the mainloop is not in the Main Thread. Once the main window has been closed (see window_close, initiated by protocol of main window), the code after the
#mainloop can be executed to save the data before the entire program is closed. The while loops wait for the tuple of data from collecting to be placed in the
#queue.
window = Plotting(queue,queue2)
if plotting_loop: #This conditional is only False if the TC-1 is not connected on startup. No windows will have been created if this is the case, and the user has chosen to exit (see while loop near beginning of Collecting).
tkframes = mFrame(queue3, window.root)
window.root.mainloop()
#Wait until data is put into queue by Collecting, then save data and close.
while queue.empty():
waiting = 'Waiting for final data from Collecting'
trying = True
while trying:
if not queue.empty():
data = queue.get()
if type(data) == type((1,)):
trying = False
print 'Saving:'
print ''
saveHourData(data[0], data[1], data[2], data[3], data[4] , data[5])
print ''
print 'Done'
| gpl-2.0 | 3,916,063,271,660,287,500 | 49.946857 | 281 | 0.582119 | false |
luhn/pubsubclub | test2.py | 1 | 1562 | from twisted.internet import reactor
from autobahn.wamp1 import protocol as wamp
from autobahn.twisted.websocket import listenWS
from pubsubclub import (
ConsumerMixin, ProducerMixin, ConsumerServer, ProducerClient, consul,
)
class WampServerProtocol(wamp.WampServerProtocol):
def onSessionOpen(self):
print("Whoa")
self.registerForRpc(self, "http://example.com/pubsub#")
self.registerForPubSub('http://example.com/mytopic')
@wamp.exportRpc('publish')
def publish(self, data):
try:
self.dispatch(
data['channel'],
data['content'],
exclude=[self],
)
except:
import traceback
traceback.print_exc()
return {}
class WampServerFactory(ConsumerMixin, ProducerMixin, wamp.WampServerFactory):
protocol = WampServerProtocol
if __name__ == '__main__':
# import logging
# logging.basicConfig(level=logging.INFO)
from twisted.python import log
import sys
log.startLogging(sys.stderr)
consumer = ConsumerServer('0.0.0.0', 19001)
WampServerFactory.consumer = consumer
producer = ProducerClient([
('127.0.0.1', 19000),
])
WampServerFactory.producer = producer
server = WampServerFactory('ws://localhost:9901')
listenWS(server)
consumer.processor = server
"""
discovery = consul.ConsulDiscovery(
'http://localhost:8500/', 'pubsubclub', producer,
)
discovery.start()
"""
print('Starting...')
reactor.run()
| mit | -5,059,213,242,148,466,000 | 24.606557 | 78 | 0.638284 | false |
Johnzero/OE7 | OE-debug文件/PyWapFetion-master/PyWapFetion/Fetion.py | 1 | 5245 | #coding=utf-8
from cookielib import MozillaCookieJar
from urllib2 import Request, build_opener, HTTPHandler, HTTPCookieProcessor
from urllib import urlencode
import base64
import os
from Errors import *
from re import compile
from Cache import Cache
from gzip import GzipFile
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
idfinder = compile('touserid=(\d*)')
idfinder2 = compile('name="internalid" value="(\d+)"')
csrf_token = compile('<postfield name="csrfToken" value="(\w+)"/>')
codekey = compile('<img src="/im5/systemimage/verifycode(.*?).jpeg"')
__all__ = ['Fetion']
class Fetion(object):
def __init__(self, mobile, password=None, status='0',
cachefile='Fetion.cache', cookiesfile=''):
'''登录状态:
在线:400 隐身:0 忙碌:600 离开:100
'''
if cachefile:
self.cache = Cache(cachefile)
if not cookiesfile:
cookiesfile = '%s.cookies' % mobile
cookiejar = MozillaCookieJar(filename=cookiesfile)
if not os.path.isfile(cookiesfile):
open(cookiesfile, 'w').write(MozillaCookieJar.header)
cookiejar.load(filename=cookiesfile)
cookie_processor = HTTPCookieProcessor(cookiejar)
self.opener = build_opener(cookie_processor,
HTTPHandler)
self.mobile, self.password = mobile, password
if not self.alive():
self._login()
cookiejar.save()
self.changestatus(status)
def send2self(self, message, time=None):
if time:
htm = self.open('im/user/sendTimingMsgToMyselfs.action',
{'msg': message, 'timing': time})
else:
htm = self.open('im/user/sendMsgToMyselfs.action',
{'msg': message})
return '成功' in htm
def send(self, mobile, message, sm=False):
if mobile == self.mobile:
return self.send2self(message)
return self.sendBYid(self.findid(mobile), message, sm)
def addfriend(self, mobile, name='xx'):
htm = self.open('im/user/insertfriendsubmit.action',
{'nickname': name, 'number': phone, 'type': '0'})
return '成功' in htm
def alive(self):
htm = self.open('im/index/indexcenter.action')
return '心情' in htm or '正在登陆' in htm
def deletefriend(self, id):
htm = self.open('im/user/deletefriendsubmit.action?touserid=%s' % id)
return '删除好友成功!' in htm
def changestatus(self, status='0'):
url = 'im5/index/setLoginStatus.action?loginstatus=' + status
for x in range(2):
htm = self.open(url)
return 'success' in htm
def logout(self, *args):
self.opener.open('http://f.10086.cn/im/index/logoutsubmit.action')
__enter__ = lambda self: self
__exit__ = __del__ = logout
def _login(self):
htm = ''
data = {
'm': self.mobile,
'pass': self.password,
}
while '图形验证码错误' in htm or not htm:
page = self.open('/im5/login/loginHtml5.action')
matches = codekey.findall(page)
if matches:
captcha = matches[0]
img = self.open('/im5/systemimage/verifycode%s.jpeg' % captcha)
open('verifycode.jpeg', 'wb').write(img)
captchacode = raw_input('captchaCode:')
data['captchaCode'] = captchacode
htm = self.open('/im5/login/loginHtml5.action', data)
self.alive()
return '登录' in htm
def sendBYid(self, id, message, sm=False):
url = 'im/chat/sendShortMsg.action?touserid=%s' % id
if sm:
url = 'im/chat/sendMsg.action?touserid=%s' % id
htm = self.open(url,
{'msg': message, 'csrfToken': self._getcsrf(id)})
if '对方不是您的好友' in htm:
raise FetionNotYourFriend
return '成功' in htm
def _getid(self, mobile):
htm = self.open('im/index/searchOtherInfoList.action',
{'searchText': mobile})
try:
return idfinder.findall(htm)[0]
except IndexError:
try:
return idfinder2.findall(htm)[0]
except:
return None
except:
return None
def findid(self, mobile):
if hasattr(self, 'cache'):
id = self.cache[mobile]
if not id:
self.cache[mobile] = id = self._getid(mobile)
return id
return self._getid(mobile)
def open(self, url, data=''):
request = Request('http://f.10086.cn/%s' % url, data=urlencode(data))
htm = self.opener.open(request).read()
try:
htm = GzipFile(fileobj=StringIO(htm)).read()
finally:
return htm
def _getcsrf(self, id=''):
if hasattr(self, 'csrf'):
return self.csrf
url = ('im/chat/toinputMsg.action?touserid=%s&type=all' % id)
htm = self.open(url)
try:
self.csrf = csrf_token.findall(htm)[0]
return self.csrf
except IndexError:
print htm
raise FetionCsrfTokenFail
| agpl-3.0 | -1,393,321,632,573,504,300 | 31.13125 | 79 | 0.571873 | false |
umangv/LitHub | LitHub/fbconnect/utils.py | 1 | 5141 | # Copyright 2011 Kalamazoo College Computer Science Club
# <[email protected]>
# This file is part of LitHub.
#
# LitHub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LitHub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LitHub. If not, see <http://www.gnu.org/licenses/>.
from django.utils.http import urlencode
from django.conf import settings
from django.core.urlresolvers import reverse
import urllib2
from urlparse import parse_qs
import json
def lazy_prop(func):
"""Wrapper for properties that should be evaluated lazily
This calls the actual method only once per instance. On the first time
the property is read, it's value is stored in self.__dict__. The next
time onwards, the stored value is returned.
Note that this wrapper also wraps the property wrapper on the method, so
only the @lazy_prop wrapper needs to be used.
"""
def wrap(self, *args, **kwargs):
if not func.__name__ in self.__dict__:
self.__dict__[func.__name__] = func(self, *args, **kwargs)
return self.__dict__[func.__name__]
return property(wrap)
class FBConnect(object):
"""Access and run queries using the Facebook Connect API"""
def __init__(self, code=None, view=None, access_token=None):
if code != None:
self.access_token = ""
self._get_access_token(code, view)
elif access_token != None:
self.access_token = access_token
elif access_token==None and code==None:
raise ValueError('code and access_token cannot both be None.')
def _get_access_token(self, code, view=None):
LOOKUP_URL = "https://graph.facebook.com/oauth/access_token?"
opts = {'client_id':settings.FB_APP_ID,
'redirect_uri':_url_receiving_code(view),
'client_secret':settings.FB_APP_SECRET,
'code':code}
try:
fb_resp = urllib2.urlopen(LOOKUP_URL + urlencode(opts))
result = fb_resp.read()
fb_resp.close()
except urllib2.HTTPError:
raise ValueError("The code was invalid or there was a problem" \
+ " connecting to Facebook")
resp = parse_qs(result)
if not resp.has_key('access_token'):
raise ValueError("No access token returned")
self.access_token = resp['access_token'][0]
@lazy_prop
def basic_info(self):
LOOKUP_URL = "https://graph.facebook.com/me?"
opts = {'access_token':self.access_token,}
try:
fb_resp = urllib2.urlopen(LOOKUP_URL + urlencode(opts))
results = fb_resp.read()
fb_resp.close()
except urllib2.HTTPError:
raise ValueError("The token was invalid or there was a " +\
"problem connecting to facebook")
return json.loads(results)
@lazy_prop
def networks(self):
LOOKUP_URL = "https://api.facebook.com/method/fql.query?"
opts = {'query':"SELECT affiliations FROM user WHERE uid=%s"%\
self.userid, 'access_token':self.access_token,
'format':'json'}
try:
fb_resp = urllib2.urlopen(LOOKUP_URL + urlencode(opts))
results = fb_resp.read()
fb_resp.close()
except urllib2.HTTPError:
raise ValueError("The token was invalid or there was a" + \
"problem connecting to facebook")
return json.loads(results)[0]['affiliations']
@lazy_prop
def userid(self):
return self.basic_info['id']
def publish_og(self, action, obj_type, obj, params=None):
opts = {'access_token':self.access_token,
obj_type:obj}
if params:
opts.update(params)
# Allows overriding any of the options in opts
try:
fb_resp = urllib2.urlopen(\
'https://graph.facebook.com/me/%s:%s'%(\
settings.FB_APP_NAMESPACE, action),
urlencode(opts))
id = fb_resp.read()
fb_resp.close()
except urllib2.HTTPError as e:
raise ValueError("There was a problem connecting to facebook.")
return id
def _url_receiving_code(view=None):
view = view or 'fbconnect.views.receive_code'
extra = reverse(view)
return settings.FB_REDIRECT_URL + extra
def redirect_to_fb_url(view=None):
base_url = "https://www.facebook.com/dialog/oauth?"
opts = {'client_id':settings.FB_APP_ID,
'redirect_uri':_url_receiving_code(view),
'scope':'email,publish_actions',}
return base_url + urlencode(opts)
| gpl-3.0 | -8,131,026,453,522,601,000 | 38.244275 | 76 | 0.613888 | false |
georgejhunt/HaitiDictionary.activity | palettes.py | 1 | 10881 | # Copyright (C) 2008, One Laptop Per Child
# Copyright (C) 2009, Tomeu Vizoso, Simon Schampijer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import tempfile
import urlparse
from gettext import gettext as _
from gi.repository import Gtk
from gi.repository import GObject
from sugar3.graphics.palette import Palette, Invoker
from sugar3.graphics.menuitem import MenuItem
from sugar3.graphics.icon import Icon
from sugar3 import profile
from sugar3.activity import activity
import downloadmanager
class MouseOutListener(GObject.GObject):
_com_interfaces_ = interfaces.nsIDOMEventListener
__gsignals__ = {
'mouse-out': (GObject.SignalFlags.RUN_FIRST,
None,
([])),
}
def __init__(self, target):
GObject.GObject.__init__(self)
self.target = target
def handleEvent(self, event):
self.emit('mouse-out')
class ContentInvoker(Invoker):
_com_interfaces_ = interfaces.nsIDOMEventListener
def __init__(self, browser):
Invoker.__init__(self)
self._position_hint = self.AT_CURSOR
self._browser = browser
self._mouseout_listener = None
self._popdown_handler_id = None
def get_default_position(self):
return self.AT_CURSOR
def get_rect(self):
return ()
def get_toplevel(self):
return None
def handleEvent(self, event):
if event.button != 2:
return
target = event.target
if target.tagName.lower() == 'a':
if target.firstChild:
title = target.firstChild.nodeValue
else:
title = None
self.palette = LinkPalette(self._browser, title, target.href,
target.ownerDocument)
self.notify_right_click()
elif target.tagName.lower() == 'img':
if target.title:
title = target.title
elif target.title:
title = target.alt
elif target.name:
title = target.name
else:
title = os.path.basename(urlparse.urlparse(target.src).path)
self.palette = ImagePalette(title, target.src,
target.ownerDocument)
self.notify_right_click()
else:
return
if self._popdown_handler_id is not None:
self._popdown_handler_id = self.palette.connect( \
'popdown', self.__palette_popdown_cb)
self._mouseout_listener = MouseOutListener(target)
wrapper = xpcom.server.WrapObject(self._mouseout_listener,
interfaces.nsIDOMEventListener)
target.addEventListener('mouseout', wrapper, False)
self._mouseout_listener.connect('mouse-out', self.__moved_out_cb)
def __moved_out_cb(self, listener):
self.palette.popdown()
def __palette_popdown_cb(self, palette):
if self._mouseout_listener is not None:
wrapper = xpcom.server.WrapObject(self._mouseout_listener,
interfaces.nsIDOMEventListener)
self._mouseout_listener.target.removeEventListener('mouseout',
wrapper, False)
del self._mouseout_listener
class LinkPalette(Palette):
def __init__(self, browser, title, url, owner_document):
Palette.__init__(self)
self._browser = browser
self._title = title
self._url = url
self._owner_document = owner_document
if title is not None:
self.props.primary_text = title
self.props.secondary_text = url
else:
self.props.primary_text = url
menu_item = MenuItem(_('Follow link'), 'browse-follow-link')
menu_item.connect('activate', self.__follow_activate_cb)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Follow link in new tab'),
'browse-follow-link-new-tab')
menu_item.connect('activate', self.__follow_activate_cb, True)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Keep link'))
icon = Icon(icon_name='document-save', xo_color=profile.get_color(),
icon_size=Gtk.IconSize.MENU)
menu_item.set_image(icon)
menu_item.connect('activate', self.__download_activate_cb)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Copy link'))
icon = Icon(icon_name='edit-copy', xo_color=profile.get_color(),
icon_size=Gtk.IconSize.MENU)
menu_item.set_image(icon)
menu_item.connect('activate', self.__copy_activate_cb)
self.menu.append(menu_item)
menu_item.show()
def __follow_activate_cb(self, menu_item, new_tab=False):
if new_tab:
new_browser = self._browser.open_new_tab(self._url)
else:
self._browser.load_uri(self._url)
self._browser.grab_focus()
def __copy_activate_cb(self, menu_item):
clipboard = Gtk.Clipboard()
targets = Gtk.target_list_add_uri_targets()
targets = Gtk.target_list_add_text_targets(targets)
targets.append(('text/x-moz-url', 0, 0))
clipboard.set_with_data(targets,
self.__clipboard_get_func_cb,
self.__clipboard_clear_func_cb)
def __clipboard_get_func_cb(self, clipboard, selection_data, info, data):
uri_targets = \
[target[0] for target in Gtk.target_list_add_uri_targets()]
text_targets = \
[target[0] for target in Gtk.target_list_add_text_targets()]
if selection_data.target in uri_targets:
selection_data.set_uris([self._url])
elif selection_data.target in text_targets:
selection_data.set_text(self._url)
elif selection_data.target == 'text/x-moz-url':
selection_data.set('text/x-moz-url', 8, self._url)
def __clipboard_clear_func_cb(self, clipboard, data):
pass
def __download_activate_cb(self, menu_item):
downloadmanager.save_link(self._url, self._title, self._owner_document)
class ImagePalette(Palette):
def __init__(self, title, url, owner_document):
Palette.__init__(self)
self._title = title
self._url = url
self._owner_document = owner_document
self.props.primary_text = title
self.props.secondary_text = url
menu_item = MenuItem(_('Keep image'))
icon = Icon(icon_name='document-save', xo_color=profile.get_color(),
icon_size=Gtk.IconSize.MENU)
menu_item.set_image(icon)
menu_item.connect('activate', self.__download_activate_cb)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Copy image'))
icon = Icon(icon_name='edit-copy', xo_color=profile.get_color(),
icon_size=Gtk.IconSize.MENU)
menu_item.set_image(icon)
menu_item.connect('activate', self.__copy_activate_cb)
self.menu.append(menu_item)
menu_item.show()
def __copy_activate_cb(self, menu_item):
file_name = os.path.basename(urlparse.urlparse(self._url).path)
if '.' in file_name:
base_name, extension = file_name.split('.')
extension = '.' + extension
else:
base_name = file_name
extension = ''
temp_path = os.path.join(activity.get_activity_root(), 'instance')
fd, temp_file = tempfile.mkstemp(dir=temp_path, prefix=base_name,
suffix=extension)
os.close(fd)
os.chmod(temp_file, 0664)
cls = components.classes['@mozilla.org/network/io-service;1']
io_service = cls.getService(interfaces.nsIIOService)
uri = io_service.newURI(self._url, None, None)
cls = components.classes['@mozilla.org/file/local;1']
target_file = cls.createInstance(interfaces.nsILocalFile)
target_file.initWithPath(temp_file)
cls = components.classes[ \
'@mozilla.org/embedding/browser/nsWebBrowserPersist;1']
persist = cls.createInstance(interfaces.nsIWebBrowserPersist)
persist.persistFlags = 1 # PERSIST_FLAGS_FROM_CACHE
listener = xpcom.server.WrapObject(_ImageProgressListener(temp_file),
interfaces.nsIWebProgressListener)
persist.progressListener = listener
persist.saveURI(uri, None, None, None, None, target_file)
def __download_activate_cb(self, menu_item):
downloadmanager.save_link(self._url, self._title, self._owner_document)
class _ImageProgressListener(object):
_com_interfaces_ = interfaces.nsIWebProgressListener
def __init__(self, temp_file):
self._temp_file = temp_file
def onLocationChange(self, webProgress, request, location):
pass
def onProgressChange(self, webProgress, request, curSelfProgress,
maxSelfProgress, curTotalProgress, maxTotalProgress):
pass
def onSecurityChange(self, webProgress, request, state):
pass
def onStatusChange(self, webProgress, request, status, message):
pass
def onStateChange(self, webProgress, request, stateFlags, status):
if (stateFlags & interfaces.nsIWebProgressListener.STATE_IS_REQUEST and
stateFlags & interfaces.nsIWebProgressListener.STATE_STOP):
clipboard = Gtk.Clipboard()
clipboard.set_with_data([('text/uri-list', 0, 0)],
_clipboard_get_func_cb,
_clipboard_clear_func_cb,
self._temp_file)
def _clipboard_get_func_cb(clipboard, selection_data, info, temp_file):
selection_data.set_uris(['file://' + temp_file])
def _clipboard_clear_func_cb(clipboard, temp_file):
if os.path.exists(temp_file):
os.remove(temp_file)
| gpl-2.0 | 158,557,187,725,631,200 | 35.029801 | 79 | 0.602151 | false |
mbohlool/client-python | kubernetes/client/models/v1beta2_stateful_set_list.py | 1 | 6301 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta2StatefulSetList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1beta2StatefulSet]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1beta2StatefulSetList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1beta2StatefulSetList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta2StatefulSetList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta2StatefulSetList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta2StatefulSetList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1beta2StatefulSetList.
:return: The items of this V1beta2StatefulSetList.
:rtype: list[V1beta2StatefulSet]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1beta2StatefulSetList.
:param items: The items of this V1beta2StatefulSetList.
:type: list[V1beta2StatefulSet]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1beta2StatefulSetList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta2StatefulSetList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta2StatefulSetList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta2StatefulSetList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta2StatefulSetList.
:return: The metadata of this V1beta2StatefulSetList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta2StatefulSetList.
:param metadata: The metadata of this V1beta2StatefulSetList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta2StatefulSetList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | -815,164,855,839,261,300 | 29.439614 | 281 | 0.594509 | false |
eminence/Minecraft-Overviewer | overviewer_core/assetmanager.py | 1 | 7994 | # This file is part of the Minecraft Overviewer.
#
# Minecraft Overviewer is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# Minecraft Overviewer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the Overviewer. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import codecs
import locale
import time
import logging
import traceback
from PIL import Image
import world
import util
from files import FileReplacer, mirror_dir
class AssetManager(object):
"""\
These objects provide an interface to metadata and persistent data, and at the
same time, controls the generated javascript files in the output directory.
There should only be one instances of these per execution.
"""
def __init__(self, outputdir, custom_assets_dir=None):
"""\
Initializes the AssetManager with the top-level output directory.
It can read/parse and write/dump the overviewerConfig.js file into this top-level
directory.
"""
self.outputdir = outputdir
self.custom_assets_dir = custom_assets_dir
self.renders = dict()
# look for overviewerConfig in self.outputdir
try:
with open(os.path.join(self.outputdir, "overviewerConfig.js")) as c:
overviewerConfig_str = "{" + "\n".join(c.readlines()[1:-1]) + "}"
self.overviewerConfig = json.loads(overviewerConfig_str)
except Exception, e:
if os.path.exists(os.path.join(self.outputdir, "overviewerConfig.js")):
logging.warning("A previous overviewerConfig.js was found, but I couldn't read it for some reason. Continuing with a blank config")
logging.debug(traceback.format_exc())
self.overviewerConfig = dict(tilesets=dict())
def get_tileset_config(self, name):
"Return the correct dictionary from the parsed overviewerConfig.js"
for conf in self.overviewerConfig['tilesets']:
if conf['path'] == name:
return conf
return dict()
def initialize(self, tilesets):
"""Similar to finalize() but calls the tilesets' get_initial_data()
instead of get_persistent_data() to compile the generated javascript
config.
"""
self._output_assets(tilesets, True)
def finalize(self, tilesets):
"""Called to output the generated javascript and all static files to
the output directory
"""
self._output_assets(tilesets, False)
def _output_assets(self, tilesets, initial):
if not initial:
get_data = lambda tileset: tileset.get_persistent_data()
else:
get_data = lambda tileset: tileset.get_initial_data()
# dictionary to hold the overviewerConfig.js settings that we will dumps
dump = dict()
dump['CONST'] = dict(tileSize=384)
dump['CONST']['image'] = {
'defaultMarker': 'signpost.png',
'signMarker': 'signpost_icon.png',
'bedMarker': 'bed.png',
'spawnMarker': 'http://google-maps-icons.googlecode.com/files/home.png',
'queryMarker': 'http://google-maps-icons.googlecode.com/files/regroup.png'
}
dump['CONST']['mapDivId'] = 'mcmap'
dump['CONST']['regionStrokeWeight'] = 2
dump['CONST']['UPPERLEFT'] = world.UPPER_LEFT;
dump['CONST']['UPPERRIGHT'] = world.UPPER_RIGHT;
dump['CONST']['LOWERLEFT'] = world.LOWER_LEFT;
dump['CONST']['LOWERRIGHT'] = world.LOWER_RIGHT;
# based on the tilesets we have, group them by worlds
worlds = []
for tileset in tilesets:
full_name = get_data(tileset)['world']
if full_name not in worlds:
worlds.append(full_name)
dump['worlds'] = worlds
dump['map'] = dict()
dump['map']['debug'] = True
dump['map']['cacheTag'] = str(int(time.time()))
dump['map']['north_direction'] = 'lower-left' # only temporary
dump['map']['center'] = [-314, 67, 94]
dump['map']['controls'] = {
'pan': True,
'zoom': True,
'spawn': True,
'compass': True,
'mapType': True,
'overlays': True,
'coordsBox': True,
'searchBox': True
}
dump['tilesets'] = []
for tileset in tilesets:
dump['tilesets'].append(get_data(tileset))
# write a blank image
blank = Image.new("RGBA", (1,1), tileset.options.get('bgcolor'))
blank.save(os.path.join(self.outputdir, tileset.options.get('name'), "blank." + tileset.options.get('imgformat')))
# write out config
jsondump = json.dumps(dump, indent=4)
with FileReplacer(os.path.join(self.outputdir, "overviewerConfig.js")) as tmpfile:
with codecs.open(tmpfile, 'w', encoding='UTF-8') as f:
f.write("var overviewerConfig = " + jsondump + ";\n")
#Copy assets, modify index.html
self.output_noconfig()
def output_noconfig(self):
# copy web assets into destdir:
global_assets = os.path.join(util.get_program_path(), "overviewer_core", "data", "web_assets")
if not os.path.isdir(global_assets):
global_assets = os.path.join(util.get_program_path(), "web_assets")
mirror_dir(global_assets, self.outputdir)
if self.custom_assets_dir:
# Could have done something fancy here rather than just overwriting
# the global files, but apparently this what we used to do pre-rewrite.
mirror_dir(self.custom_assets_dir, self.outputdir)
# write a dummy baseMarkers.js if none exists
if not os.path.exists(os.path.join(self.outputdir, "baseMarkers.js")):
with open(os.path.join(self.outputdir, "baseMarkers.js"), "w") as f:
f.write("// if you wants signs, please see genPOI.py\n");
# create overviewer.js from the source js files
js_src = os.path.join(util.get_program_path(), "overviewer_core", "data", "js_src")
if not os.path.isdir(js_src):
js_src = os.path.join(util.get_program_path(), "js_src")
with FileReplacer(os.path.join(self.outputdir, "overviewer.js")) as tmpfile:
with open(tmpfile, "w") as fout:
# first copy in js_src/overviewer.js
with open(os.path.join(js_src, "overviewer.js"), 'r') as f:
fout.write(f.read())
# now copy in the rest
for js in os.listdir(js_src):
if not js.endswith("overviewer.js") and js.endswith(".js"):
with open(os.path.join(js_src,js)) as f:
fout.write(f.read())
# Add time and version in index.html
indexpath = os.path.join(self.outputdir, "index.html")
index = codecs.open(indexpath, 'r', encoding='UTF-8').read()
index = index.replace("{title}", "Minecraft Overviewer")
index = index.replace("{time}", time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.localtime()).decode(locale.getpreferredencoding()))
versionstr = "%s (%s)" % (util.findGitVersion(), util.findGitHash()[:7])
index = index.replace("{version}", versionstr)
with FileReplacer(indexpath) as indexpath:
with codecs.open(indexpath, 'w', encoding='UTF-8') as output:
output.write(index)
| gpl-3.0 | 5,831,587,068,284,765,000 | 40.206186 | 147 | 0.603327 | false |
tkw1536/PythonCaseClass | case_class/case_class.py | 1 | 8346 | """
CaseClass implementation for the case_class module
Copyright (c) 2016 Tom Wiesing -- licensed under MIT, see LICENSE
"""
import inspect
from . import exceptions, clsutils, signature
#
# Meta-classes for the case class
#
class CaseClassMeta(type):
""" Meta-Class for case classes. """
instance_keys = {}
instance_values = {}
instance_list = []
def __new__(mcs, name, bases, attrs):
""" Creates a new class with MetaClass CaseClassMeta.
:param name: Name of the class to create.
:type name: str
:param bases: Base classes for the class.
:type bases: list
:param attrs: Attributes of this class.
:type attrs: dict
:rtype: CaseClassMeta
"""
# no case-to-case inheritance outside of the base classes
if _CaseClass not in bases and \
CaseClassMeta.inherits_from_case_class(bases):
raise exceptions.NoCaseToCaseInheritanceException(name)
# now we can just create it normally.
return super(CaseClassMeta, mcs).__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
""" Creates a new CaseClass() instance.
:param args: Arguments to this CaseClass instance.
:type args: list
:param kwargs: Keyword arguments to this CaseClass instance.
:type kwargs: dict
:rtype: CaseClass
"""
# Can not instantiate Abstract Case Class
if AbstractCaseClass in cls.__bases__:
raise exceptions.NotInstantiableAbstractCaseClassException(cls)
# may not instantiate sub classes of _CaseClass
if _CaseClass in cls.__bases__:
raise exceptions.NotInstantiableClassException(
"Cannot instantiate %s: " % (cls.__name__,) +
"Classes inheriting directly from _CaseClass may not be " +
"instantiated. ", cls)
# make sure we have the dictionary
if cls not in CaseClassMeta.instance_keys:
CaseClassMeta.instance_keys[cls] = []
CaseClassMeta.instance_values[cls] = {}
# Extract the instances for this class
ckey = CaseClassMeta.instance_keys[cls]
cval = CaseClassMeta.instance_values[cls]
# key we will use for this instance.
key = clsutils.get_class_parameters(cls, *args, **kwargs)
# try and return an existing instance.
try:
return cval[ckey.index(key)]
except ValueError:
pass
# create a new instance
instance = super(CaseClassMeta, cls).__call__(*args, **kwargs)
# store the instance
idx = len(ckey)
ckey.append(key)
cval[idx] = instance
# and return it
return instance
def __getitem__(cls, item):
""" Syntactic sugar to create new CaseClass instances.
:param item: Tuple representing parameters or slice instance.
:type item: Any
:rtype: CaseClass
"""
# allow CaseClass[:] to create a new CaseClass()
if isinstance(item, slice):
if item.start is None and item.stop is None and item.step is None:
return CaseClassMeta.__call__(cls)
# if we get a single item, it needs to be turned into a tuple.
elif not isinstance(item, tuple):
item = (item,)
# finally just do the same as in call.
return CaseClassMeta.__call__(cls, *item)
@staticmethod
def get_hash(cc):
""" Gets a hash for a CaseClass or None.
:param cc: CaseClass instance to get hash for
:type cc: CaseClass
:rtype: int
"""
if not isinstance(cc, CaseClass):
raise ValueError("Argument is not a CaseClass, can not get hash. ")
# get a key for the instance
cls = cc.__class__
key = (cc.case_args, cc.case_kwargs)
# extract the key
ckey = CaseClassMeta.instance_keys[cls]
idx = ckey.index(key)
# and return a hash of it
return hash((CaseClassMeta, ckey, idx))
@staticmethod
def is_concrete_caseclass(cls):
""" Checks if a class is a concrete case class via inheritance.
:param cls: Class to check.
:type cls: type
:rtype: bool
"""
return cls != AbstractCaseClass and CaseClass in cls.__bases__
@staticmethod
def inherits_from_case_class(bases):
""" Checks if this class inherits from a non-inheritable case class.
:param bases: List of bases of the class to check
:type bases: list
:rtype: bool
"""
# if we can inherit from it, we are already done.
if InheritableCaseClass in bases:
return False
for b in bases:
if CaseClassMeta.is_concrete_caseclass(b):
return True
return False
class _CaseClass(object):
""" A class used as base for all CaseClasses"""
pass
@clsutils.add_metaclass(CaseClassMeta)
class CaseClass(_CaseClass):
""" Represents a normal CaseClass. """
def __new__(cls, *args, **kwargs):
""" Creates a new CaseClass instance.
:param args: Parameters for this CaseClass instance.
:type args: list
:param kwargs: Keyword Arguments for this CaseClass instance.
:type kwargs: dict
:rtype: CaseClass
"""
# create a new instance
inst = super(CaseClass, cls).__new__(cls)
# set the class name
inst.__name = inst.__class__.__name__
# get the init signature
inst.__sig = clsutils.get_init_signature(inst.__class__)
# and the arguments
inst.__applied = inst.__sig(*args, **kwargs)
# and return the instance
return inst
def __hash__(self):
""" Returns a hash representing this case class.
:rtype: int
"""
return CaseClassMeta.get_hash(self)
def copy(self, *args, **kwargs):
""" Makes a copy of this CaseClass instance and exchanges the given
values.
:rtype: CaseClass
"""
updated = self.case_params.signature(*args, **kwargs)
return updated.call(self.__class__)
@property
def case_params(self):
""" Returns the parameters originally given to this CaseClass.
:rtype: CaseParameters
"""
return CaseParameters(self.__applied)
def __repr__(self):
""" Implements a representation for CaseClass instances. This is given
by the class name and the representation of all the parameters.
:rtype: str
"""
# name of the class and parameters
return "%s(%s)" % (self.__name, self.case_params)
class AbstractCaseClass(CaseClass, _CaseClass):
""" Represents a CaseClass that may not be instantiated but only inherited
from. """
pass
class InheritableCaseClass(CaseClass, _CaseClass):
""" Represent a CaseClass that may be inherited from. """
pass
class CaseParameters(CaseClass, dict):
""" Represents arguments given to a CaseClass. """
def __init__(self, sig):
""" Creates a new CaseArguments() instance.
:param sig: Applied Signature of the original init function.
:type sig: signature.AppliedSignature
"""
self.__sig = sig
# super(CaseParameters, self).__init__(self.__params)
def __getitem__(self, n):
""" Returns a positional CaseClass parameter.
:param n: Number of item to get.
:type n: int
:rtype: object
"""
# TODO: Check into the numerical things
return self.__sig[n]
def __getattr__(self, name):
""" Gets a parameter given to this CaseParameters instance by name.
:param name: Name of parameter to get
:type name: str
"""
return self.__sig[name]
@property
def signature(self):
""" Returns the applied Signature belonging to this CaseClasss.
:rtype: signature.AppliedSignature
"""
return self.__sig
def __str__(self):
""" Turns this CaseParameters instance into a string.
:rtype: str
"""
return str(self.__sig)
__all__ = ["AbstractCaseClass", "CaseClass", "InheritableCaseClass"]
| mit | -7,552,289,889,035,847,000 | 25.495238 | 79 | 0.593937 | false |
yakky/django-form-designer | form_designer/migrations/0011_auto__add_field_formdefinitionfield_choice_model_queryset.py | 1 | 10609 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FormDefinitionField.choice_model_queryset'
db.add_column(u'form_designer_formdefinitionfield', 'choice_model_queryset',
self.gf('django.db.models.fields.CharField')(default='objects', max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FormDefinitionField.choice_model_queryset'
db.delete_column(u'form_designer_formdefinitionfield', 'choice_model_queryset')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'form_designer.formdefinition': {
'Meta': {'object_name': 'FormDefinition'},
'action': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'allow_get_initial': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_logged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'error_message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'form_template_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log_data': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mail_from': ('form_designer.fields.TemplateCharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'mail_subject': ('form_designer.fields.TemplateCharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'mail_to': ('form_designer.fields.TemplateCharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'mail_uploaded_files': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'message_template': ('form_designer.fields.TemplateTextField', [], {'null': 'True', 'blank': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'default': "'POST'", 'max_length': '10'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'private_hash': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}),
'public_hash': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}),
'redirect_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'require_hash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'save_uploaded_files': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'submit_label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'success_clear': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'success_message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'success_redirect': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'form_designer.formdefinitionfield': {
'Meta': {'ordering': "['position']", 'object_name': 'FormDefinitionField'},
'choice_labels': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'choice_model': ('form_designer.fields.ModelNameField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'choice_model_empty_label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'choice_model_queryset': ('django.db.models.fields.CharField', [], {'default': "'objects'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'choice_values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'decimal_places': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'field_class': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'form_definition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['form_designer.FormDefinition']"}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_result': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'initial': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'max_digits': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'min_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'min_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'position': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'regex': ('form_designer.fields.RegexpExpressionField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'widget': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'form_designer.formlog': {
'Meta': {'object_name': 'FormLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'form_definition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['form_designer.FormDefinition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'form_designer.formvalue': {
'Meta': {'object_name': 'FormValue'},
'field_name': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'form_log': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'values'", 'to': u"orm['form_designer.FormLog']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('picklefield.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['form_designer'] | bsd-3-clause | -4,493,967,210,291,561,500 | 81.248062 | 187 | 0.557923 | false |
swharden/SWHLab | doc/uses/EPSCs-and-IPSCs/variance method/2016-12-16 tryout.py | 1 | 3941 | """
This script investigates how calculating phasic currents from voltage clamp
recordings may benefit from subtracting-out the "noise" determined from a
subset of the quietest pieces of the recording, rather than using smoothing
or curve fitting to guess a guassian-like RMS noise function.
"""
import os
import swhlab
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
POINTS_PER_SEC=20000
POINTS_PER_MS=int(POINTS_PER_SEC/1000)
CHUNK_POINTS=POINTS_PER_MS*10 # size of Y pieces to calculate variance from
PERCENT_STEP=10 # percentile steps to display
HIST_RESOLUTION=.1 # pA per bin
COLORMAP=plt.get_cmap('jet') # which color scheme do we want to use?
#COLORMAP=plt.get_cmap('winter') # which color scheme do we want to use?
def quietParts(data,percentile=10):
"""
Given some data (Y) break it into chunks and return just the quiet ones.
Returns data where the variance for its chunk size is below the given percentile.
CHUNK_POINTS should be adjusted so it's about 10ms of data.
"""
nChunks=int(len(Y)/CHUNK_POINTS)
chunks=np.reshape(Y[:nChunks*CHUNK_POINTS],(nChunks,CHUNK_POINTS))
variances=np.var(chunks,axis=1)
percentiles=np.empty(len(variances))
for i,variance in enumerate(variances):
percentiles[i]=sorted(variances).index(variance)/len(variances)*100
selected=chunks[np.where(percentiles<=percentile)[0]].flatten()
return selected
def ndist(data,Xs):
"""
given some data and a list of X posistions, return the normal
distribution curve as a Y point at each of those Xs.
"""
sigma=np.sqrt(np.var(data))
center=np.average(data)
curve=mlab.normpdf(Xs,center,sigma)
curve*=len(data)*HIST_RESOLUTION
return curve
if __name__=="__main__":
Y=np.load("sweepdata.npy")
# predict what our histogram will look like
padding=50
histCenter=int(np.average(Y))
histRange=(histCenter-padding,histCenter+padding)
histBins=int(abs(histRange[0]-histRange[1])/HIST_RESOLUTION)
# FIRST CALCULATE THE 10-PERCENTILE CURVE
data=quietParts(Y,10) # assume 10% is a good percentile to use
hist,bins=np.histogram(data,bins=histBins,range=histRange,density=False)
hist=hist.astype(np.float) # histogram of data values
curve=ndist(data,bins[:-1]) # normal distribution curve
hist[hist == 0] = np.nan
histValidIs=np.where(~np.isnan(hist))
histX,histY=bins[:-1][histValidIs],hist[histValidIs] # remove nans
baselineCurve=curve/np.max(curve) # max is good for smooth curve
# THEN CALCULATE THE WHOLE-SWEEP HISTOGRAM
hist,bins=np.histogram(Y,bins=histBins,range=histRange,density=False)
hist=hist.astype(np.float) # histogram of data values
hist[hist == 0] = np.nan
histValidIs=np.where(~np.isnan(hist))
histX,histY=bins[:-1][histValidIs],hist[histValidIs] # remove nans
histY/=np.percentile(histY,98) # percentile is needed for noisy data
# DETERMINE THE DIFFERENCE
diffX=bins[:-1][histValidIs]
diffY=histY-baselineCurve[histValidIs]
diffY[diffY<0]=np.nan
# NOW PLOT THE DIFFERENCE
plt.figure(figsize=(10,10))
plt.subplot(211)
plt.grid()
plt.plot(histX,histY,'b.',ms=10,alpha=.5,label="data points")
plt.plot(bins[:-1],baselineCurve,'r-',lw=3,alpha=.5,label="10% distribution")
plt.legend(loc='upper left',shadow=True)
plt.ylabel("normalized distribution")
plt.axis([histCenter-20,histCenter+20,0,1.5])
plt.subplot(212)
plt.grid()
plt.plot(diffX,diffY,'.',ms=10,alpha=.5,color='b')
plt.axvline(histCenter,color='r',lw=3,alpha=.5,ls='--')
plt.legend(loc='upper left',shadow=True)
plt.ylabel("difference")
plt.xlabel("histogram data points (pA)")
plt.margins(0,.1)
plt.axis([histCenter-20,histCenter+20,0,None])
plt.tight_layout()
plt.savefig("2016-12-16-tryout.png")
plt.show()
print("DONE") | mit | 2,619,447,392,169,634,000 | 36.903846 | 85 | 0.698046 | false |
gaasedelen/lighthouse | plugins/lighthouse/ui/coverage_settings.py | 1 | 4506 | import logging
from lighthouse.util.qt import *
from lighthouse.util.disassembler import disassembler
logger = logging.getLogger("Lighthouse.UI.Settings")
class TableSettingsMenu(QtWidgets.QMenu):
"""
A quick-access settings menu for Lighthouse.
"""
def __init__(self, parent=None):
super(TableSettingsMenu, self).__init__(parent)
self._visible_action = None
self._ui_init_actions()
self.setToolTipsVisible(True)
#--------------------------------------------------------------------------
# QMenu Overloads
#--------------------------------------------------------------------------
def event(self, event):
"""
Hook the QMenu event stream.
"""
action = self.activeAction()
# swallow clicks to checkbox/radiobutton actions to keep qmenu open
if event.type() == QtCore.QEvent.MouseButtonRelease:
if action and action.isEnabled() and action.isCheckable():
action.trigger()
event.accept()
return True
# handle any other events as wee normally should
return super(TableSettingsMenu, self).event(event)
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init_actions(self):
"""
Initialize the menu actions.
"""
# lighthouse colors
self._action_change_theme = QtWidgets.QAction("Change theme", None)
self._action_change_theme.setToolTip("Lighthouse color & theme customization")
self.addAction(self._action_change_theme)
self.addSeparator()
# painting
self._action_force_clear = QtWidgets.QAction("Force clear paint (slow!)", None)
self._action_force_clear.setToolTip("Attempt to forcefully clear stuck paint from the database")
self.addAction(self._action_force_clear)
self._action_disable_paint = QtWidgets.QAction("Disable painting", None)
self._action_disable_paint.setCheckable(True)
self._action_disable_paint.setToolTip("Disable the coverage painting subsystem")
self.addAction(self._action_disable_paint)
self.addSeparator()
# table actions
self._action_refresh_metadata = QtWidgets.QAction("Rebuild coverage mappings", None)
self._action_refresh_metadata.setToolTip("Refresh the database metadata and coverage mapping")
self.addAction(self._action_refresh_metadata)
self._action_dump_unmapped = QtWidgets.QAction("Dump unmapped coverage", None)
self._action_dump_unmapped.setToolTip("Print all coverage data not mapped to a function")
self.addAction(self._action_dump_unmapped)
self._action_export_html = QtWidgets.QAction("Generate HTML report", None)
self._action_export_html.setToolTip("Export the coverage table to HTML")
self.addAction(self._action_export_html)
self._action_hide_zero = QtWidgets.QAction("Hide 0% coverage", None)
self._action_hide_zero.setToolTip("Hide table entries with no coverage data")
self._action_hide_zero.setCheckable(True)
self.addAction(self._action_hide_zero)
def connect_signals(self, controller, lctx):
"""
Connect UI signals.
"""
self._action_change_theme.triggered.connect(lctx.core.palette.interactive_change_theme)
self._action_refresh_metadata.triggered.connect(lctx.director.refresh)
self._action_hide_zero.triggered[bool].connect(controller._model.filter_zero_coverage)
self._action_disable_paint.triggered[bool].connect(lambda x: lctx.painter.set_enabled(not x))
self._action_force_clear.triggered.connect(lctx.painter.force_clear)
self._action_export_html.triggered.connect(controller.export_to_html)
self._action_dump_unmapped.triggered.connect(lctx.director.dump_unmapped)
lctx.painter.status_changed(self._ui_painter_changed_status)
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
@disassembler.execute_ui
def _ui_painter_changed_status(self, painter_enabled):
"""
Handle an event from the painter being enabled/disabled.
"""
self._action_disable_paint.setChecked(not painter_enabled)
| mit | -7,577,214,251,421,822,000 | 41.509434 | 104 | 0.60253 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.