repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
yotam/pictureframe | examples/quickstart.py | 1 | 1883 | import numpy as np
from numpy.random import random_sample
from skimage import img_as_float
from skimage.io import imread
from pictureframe import PictureFrame
image = img_as_float(imread('sample_data/image.png'))
depth = img_as_float(imread('sample_data/depth.png'))
# CONSTRUCTORS
# dict of array name to array
pf = PictureFrame({"image": image, "depth": depth})
# quick look at the the data
print(pf)
# INDEXING
# we pass the indexing arguments down to numpy, so everything
# works as you would expect (apart from maybe broadcasting)
# slicing returns a new PictureFrame with views of original data
print(pf[30:, 80:220])
# cartesian indexing returns a new PictureFrame with copies of the data
# note the new shape
print(pf[[1, 3, 4], [40, 10, 11]])
# boolean or mask indexing works fine too
mask = np.random.random_sample(depth.shape) > 0.5
print(pf[mask])
# CONVENIENCE FUNCTIONS
# zoom function returns a new PictureFrame with resized data arrays
# order of interpolation is by default 2 for float arrays and 0 for
# integer arrays, but this can be overridden
print(pf.zoom(0.5))
# pandas/SQL-like groupby function iterates over sub-PictureFrames
# corresponding to each label value
# here we use an "external" array...
labels = imread('sample_data/labels.png')
for label, pf_group in pf.groupby(labels):
print(label, pf_group)
# however we can add it to the PictureFrame
pf['labels'] = labels
# and group by the name of the array
for k, pf_group in pf.groupby('labels'):
print(k, pf_group)
# browse function lets you see all array data with matplotlib
pf.browse()
# ASSIGNMENT
indices = np.array([10, 7, 3, 0, 12])
# copy some data to a new PictureFrame and alter the values
other_pf = pf[indices]
other_pf['image'] = random_sample(other_pf.image.shape)
# assignment of values between corresponding arrays handled internally
pf[indices] = other_pf
| bsd-3-clause | 5,895,956,978,169,367,000 | 26.289855 | 71 | 0.744025 | false | 3.368515 | false | false | false |
UMONS-GFA/pidas | pidas/test/log_temps_to_csv.py | 1 | 2935 | import sys
import subprocess
import time
import glob
import csv
import logging
import pandas as pd
from os import path, makedirs
from time import gmtime
from pidas.settings import PIDAS_DIR, SENSOR_LIST_FILE, DATA_FILE, CSV_HEADER
def get_sensor_list(sensor_list):
"""create a dataframe from sensor list name and position"""
df = pd.read_csv(sensor_list)
print(df)
return df
def read_temp_raw(device_file):
catdata = subprocess.Popen(['cat', device_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = catdata.communicate()
out_decode = out.decode('utf-8')
lines = out_decode.split('\n')
return lines
def read_temp(device_file=''):
lines = read_temp_raw(device_file)
while lines[0].strip()[-3:] != 'YES':
#time.sleep(0.2)
lines = read_temp_raw(device_file)
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos + 2:]
temp_c = float(temp_string) / 1000.0
return str(temp_c)
else:
return None
def get_temp_measures():
device_list = glob.glob('/sys/bus/w1/devices/28-*')
try:
sensors_df = get_sensor_list(path.join(PIDAS_DIR,SENSOR_LIST_FILE))
except OSError as e:
print(e)
exit(1)
temp_measures = []
for device in device_list:
head, sensor_id = path.split(device)
device_file = str(device) + '/w1_slave'
sensor_name = sensors_df['sensor_name'][sensors_df['sensor_id'] == sensor_id].values[0]
val = read_temp(device_file)
timestamp = str(int(time.time()))
measure = (sensor_id, sensor_name, val, timestamp)
temp_measures.append(measure)
return temp_measures
if __name__ == "__main__":
print("Begin")
log_path = path.join(PIDAS_DIR, 'logs')
file_path = path.join(PIDAS_DIR, DATA_FILE)
if not path.exists(log_path):
makedirs(log_path)
logging_level = logging.DEBUG
logging.Formatter.converter = gmtime
log_format = '%(asctime)-15s %(levelname)s:%(message)s'
logging.basicConfig(format=log_format, datefmt='%Y/%m/%d %H:%M:%S UTC', level=logging_level,
handlers=[logging.FileHandler(path.join(log_path,'log_temps_to_csv.log')),
logging.StreamHandler()])
logging.info('_____ Started _____')
logging.info('saving in' + file_path)
if not path.exists(file_path):
with open(file_path, "w") as output_file:
writer = csv.writer(output_file)
writer.writerow(CSV_HEADER)
while 1:
try:
temp_measures = get_temp_measures()
for measure in temp_measures:
with open(file_path, "a") as output_file:
writer = csv.writer(output_file)
writer.writerow(measure)
except KeyboardInterrupt:
print(' Exiting measures')
sys.exit()
| gpl-3.0 | 409,664,085,070,116,200 | 30.55914 | 100 | 0.604429 | false | 3.506571 | false | false | false |
mephistopheies/nn | examples/pretraining2.py | 1 | 1610 | import sys
sys.path.append('./../nn/')
import numpy as np
from pandas import read_csv
from nn.nn.MLP import MLP, sigmoid, d_sigmoid, d_identity, identity, tanh, d_tanh, mcrmse, xeuclidian, d_xeuclidian, hamming, euclidian
from nn.nn.RBM import RBM
from nn.nn.Norms import l2, d_l2, l1, d_l1
if __name__ == '__main__':
df = read_csv('./../data/africa-soil/training.csv')
x = df.as_matrix(columns=df.columns[1:3595])
x[:, -1] = (x[:, -1] == 'Topsoil') * 1.0
x = x.astype(float)
y = df.as_matrix(columns=df.columns[3595:])
y = y.astype(float)
# standartizing
x = (x - np.repeat(x.mean(axis=0), x.shape[0]).reshape((x.shape[0], x.mean(axis=0).shape[0]), order='F')) / \
np.sqrt(np.repeat(x.var(axis=0), x.shape[0]).reshape((x.shape[0], x.mean(axis=0).shape[0]), order='F'))
idx_train = list(np.random.choice(range(x.shape[0]), size=int(round(0.8 * x.shape[0]))))
idx_cv = list(set(range(x.shape[0])) - set(idx_train))
rbm = RBM(x.shape[1], 100,
rng=(lambda n: np.random.normal(0, 0.001, n)),
mode='gaus-bin')
print(rbm)
rbm.train(x[idx_train, :],
cd_k=1,
learning_rate=0.001,
momentum_rate=0.9,
max_iter=1000,
batch_size=20,
n_iter_stop_skip=10,
goal=euclidian,
#cv_input_data=cv_input,
stop_threshold=0.15,
#neural_local_gain=(0.05, 0.95, 0.01, 100),
regularization_rate=0.1,
#regularization_norm=l1,
d_regularization_norm=d_l1,
)
| gpl-2.0 | 8,418,792,797,985,184,000 | 34 | 135 | 0.550311 | false | 2.756849 | false | true | false |
pythondigest/pythondigest | digest/tests/test_import_python_weekly.py | 1 | 1376 | # -*- encoding: utf-8 -*-
from django.test import TestCase
from mock import patch
from digest.management.commands.import_python_weekly import _get_content, \
_get_blocks
from digest.utils import MockResponse
from digest.utils import read_fixture
class ImportPythonWeeklyBadTest(TestCase):
def test_get_content_bad_link(self):
content = _get_content('htt://googl.fa')
self.assertEqual(content, '')
class ImportPythonWeeklyTest(TestCase):
def setUp(self):
self.url = 'http://us2.campaign-archive1.com/?u=e2e180baf855ac797ef407fc7&id=31658452eb&utm_content=buffera9dc3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer'
test_name = 'fixture_test_import_python_weekly_test_get_blocks.txt'
self.patcher = patch(
'digest.management.commands.import_python_weekly.urlopen')
self.urlopen_mock = self.patcher.start()
self.urlopen_mock.return_value = MockResponse(read_fixture(test_name))
# list(map(save_item, map(_apply_rules, map(_get_block_item, _get_blocks(url)))))
def tearDown(self):
self.patcher.stop()
def test_get_content(self):
content = _get_content(self.url)
self.assertEqual(len(content), 48233)
def test_get_blocks(self):
blocks = _get_blocks(self.url)
self.assertEqual(len(blocks), 28)
return blocks
| mit | 3,518,114,190,919,860,000 | 32.560976 | 181 | 0.6875 | false | 3.380835 | true | false | false |
sio2project/oioioi | oioioi/questions/processors.py | 1 | 2124 | import six
from django.urls import reverse
from django.utils.functional import lazy
from django.utils.translation import ungettext
from oioioi.base.utils import make_navbar_badge
from oioioi.contests.utils import can_enter_contest, is_contest_basicadmin
from oioioi.questions.utils import unanswered_questions
from oioioi.questions.views import new_messages, visible_messages
from oioioi.status.registry import status_registry
def navbar_tip_processor(request):
if not getattr(request, 'contest', None):
return {}
if not request.user.is_authenticated:
return {}
if not can_enter_contest(request):
return {}
def generator():
return make_navbar_badge(**navbar_messages_generator(request))
return {'extra_navbar_right_messages': lazy(generator, six.text_type)()}
@status_registry.register
def get_messages(request, response):
response['messages'] = navbar_messages_generator(request)
return response
def navbar_messages_generator(request):
if request.contest is None:
return {}
is_admin = is_contest_basicadmin(request)
vis_messages = visible_messages(request)
if is_admin:
messages = unanswered_questions(vis_messages)
else:
messages = new_messages(request, vis_messages)
count = messages.count()
if count:
text = ungettext('%(count)d NEW MESSAGE', '%(count)d NEW MESSAGES', count) % {
'count': count
}
if count == 1:
m = messages.get()
link = reverse(
'message',
kwargs={
'contest_id': request.contest.id,
'message_id': m.top_reference_id
if vis_messages.filter(id=m.top_reference_id).exists()
else m.id,
},
)
else:
link = reverse(
'contest_messages', kwargs={'contest_id': request.contest.id}
)
return {'link': link, 'text': text, 'id': 'contest_new_messages'}
else:
return {'link': None, 'text': None, 'id': 'contest_new_messages'}
| gpl-3.0 | 5,808,894,500,609,906,000 | 31.181818 | 86 | 0.620527 | false | 3.868852 | true | false | false |
cobrateam/splinter | tests/fake_django/urls.py | 1 | 2998 | from django.conf.urls import include, url
from django.http import HttpResponse
from django.shortcuts import redirect
from django.contrib import admin
from django.contrib.auth.decorators import login_required
import six
if six.PY2:
from django.core.urlresolvers import reverse
else:
from django.urls import reverse
from tests.fake_webapp import (
EXAMPLE_HTML,
EXAMPLE_IFRAME_HTML,
EXAMPLE_ALERT_HTML,
EXAMPLE_TYPE_HTML,
EXAMPLE_NO_BODY_HTML,
EXAMPLE_POPUP_HTML,
EXAMPLE_REDIRECT_LOCATION_HTML,
)
admin.autodiscover()
def index(request):
return HttpResponse(EXAMPLE_HTML)
def iframed(request):
return HttpResponse(EXAMPLE_IFRAME_HTML)
def alertd(request):
return HttpResponse(EXAMPLE_ALERT_HTML)
def type(request):
return HttpResponse(EXAMPLE_TYPE_HTML)
def no_body(request):
return HttpResponse(EXAMPLE_NO_BODY_HTML)
def get_name(request):
return HttpResponse("My name is: Master Splinter")
def get_user_agent(request):
return HttpResponse(request.META["User-Agent"])
def post_form(request):
items = "\n".join("{}: {}".format(*item) for item in request.POST.items())
body = "<html><body>{}</body></html>".format(items)
return HttpResponse(body)
def request_headers(request):
body = "\n".join(
"%s: %s" % (key, value) for key, value in six.iteritems(request.META)
)
return HttpResponse(body)
def upload_file(request):
if request.method == "POST":
f = request.FILES["file"]
buffer = []
buffer.append("Content-type: %s" % f.content_type)
buffer.append("File content: %s" % f.read())
return HttpResponse("|".join(buffer))
def foo(request):
return HttpResponse("BAR!")
def query_string(request):
if request.query_string == "model":
return HttpResponse("query string is valid")
else:
raise Exception("500")
def popup(request):
return HttpResponse(EXAMPLE_POPUP_HTML)
@login_required
def auth_required(request):
return HttpResponse("Success!")
def redirected(request):
location = "{}?{}".format(reverse("redirect_location"), "come=get&some=true")
return redirect(location)
def redirect_location(request):
return HttpResponse(EXAMPLE_REDIRECT_LOCATION_HTML)
urlpatterns = [
url(r"^$", index),
url(r"^iframe$", iframed),
url(r"^alert$", alertd),
url(r"^type$", type),
url(r"^no_body$", no_body),
url(r"^name$", get_name),
url(r"^useragent$", get_user_agent),
url(r"^headers$", request_headers),
url(r"^upload$", upload_file),
url(r"^foo$", foo),
url(r"^query$", query_string),
url(r"^popup$", popup),
url(r"^authenticate$", auth_required),
url(r"^redirected", redirected),
url(r"^post", post_form),
url(r"^redirect-location", redirect_location, name="redirect_location"),
]
if six.PY2:
urlpatterns.append(url(r"^admin/", include(admin.site.urls)))
else:
urlpatterns.append(url(r"^admin/", admin.site.urls))
| bsd-3-clause | 4,334,059,057,444,581,400 | 22.24031 | 81 | 0.666778 | false | 3.506433 | false | false | false |
kailIII/emaresa | trunk.cl/search_by_name_vat/__openerp__.py | 3 | 1671 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 CCI Connect asbl (http://www.cciconnect.be) All Rights Reserved.
# Philmer <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Search By Name Vat',
'version' : '1.0',
'author' : 'OpenERP',
'category' : 'Accounting & Finance',
'website': 'http://www.openerp.com',
'description': """
Modify res_partner search method
==================================
Funcionality to add Search by Name and Vat in res_partner model
Vat + Name parameter added to search res_partner
Vat + Name parameter added in res_partner field selection
""",
'depends' : ['base'],
'data' : [
],
'active': False,
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,303,899,774,746,253,000 | 39.756098 | 88 | 0.600239 | false | 4.156716 | false | false | false |
endlessm/chromium-browser | third_party/protobuf/python/google/protobuf/text_encoding.py | 26 | 4855 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Encoding related utilities."""
import re
import six
_cescape_chr_to_symbol_map = {}
_cescape_chr_to_symbol_map[9] = r'\t' # optional escape
_cescape_chr_to_symbol_map[10] = r'\n' # optional escape
_cescape_chr_to_symbol_map[13] = r'\r' # optional escape
_cescape_chr_to_symbol_map[34] = r'\"' # necessary escape
_cescape_chr_to_symbol_map[39] = r"\'" # optional escape
_cescape_chr_to_symbol_map[92] = r'\\' # necessary escape
# Lookup table for unicode
_cescape_unicode_to_str = [chr(i) for i in range(0, 256)]
for byte, string in _cescape_chr_to_symbol_map.items():
_cescape_unicode_to_str[byte] = string
# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32)
_cescape_byte_to_str = ([r'\%03o' % i for i in range(0, 32)] +
[chr(i) for i in range(32, 127)] +
[r'\%03o' % i for i in range(127, 256)])
for byte, string in _cescape_chr_to_symbol_map.items():
_cescape_byte_to_str[byte] = string
del byte, string
def CEscape(text, as_utf8):
# type: (...) -> str
"""Escape a bytes string for use in an text protocol buffer.
Args:
text: A byte string to be escaped.
as_utf8: Specifies if result may contain non-ASCII characters.
In Python 3 this allows unescaped non-ASCII Unicode characters.
In Python 2 the return value will be valid UTF-8 rather than only ASCII.
Returns:
Escaped string (str).
"""
# Python's text.encode() 'string_escape' or 'unicode_escape' codecs do not
# satisfy our needs; they encodes unprintable characters using two-digit hex
# escapes whereas our C++ unescaping function allows hex escapes to be any
# length. So, "\0011".encode('string_escape') ends up being "\\x011", which
# will be decoded in C++ as a single-character string with char code 0x11.
if six.PY3:
text_is_unicode = isinstance(text, str)
if as_utf8 and text_is_unicode:
# We're already unicode, no processing beyond control char escapes.
return text.translate(_cescape_chr_to_symbol_map)
ord_ = ord if text_is_unicode else lambda x: x # bytes iterate as ints.
else:
ord_ = ord # PY2
if as_utf8:
return ''.join(_cescape_unicode_to_str[ord_(c)] for c in text)
return ''.join(_cescape_byte_to_str[ord_(c)] for c in text)
_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])')
def CUnescape(text):
# type: (str) -> bytes
"""Unescape a text string with C-style escape sequences to UTF-8 bytes.
Args:
text: The data to parse in a str.
Returns:
A byte string.
"""
def ReplaceHex(m):
# Only replace the match if the number of leading back slashes is odd. i.e.
# the slash itself is not escaped.
if len(m.group(1)) & 1:
return m.group(1) + 'x0' + m.group(2)
return m.group(0)
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
if six.PY2:
return result.decode('string_escape')
return (result.encode('utf-8') # PY3: Make it bytes to allow decode.
.decode('unicode_escape')
# Make it bytes again to return the proper type.
.encode('raw_unicode_escape'))
| bsd-3-clause | -6,027,538,505,363,856,000 | 40.495726 | 80 | 0.696601 | false | 3.559384 | false | false | false |
eukaryote/knowhow | knowhow/util.py | 1 | 3828 | # coding=utf8
"""
Misc utilities.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from datetime import datetime
import sys
import pytz
import pytz.reference
import six
ISO_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%f+00:00"
PYTHON2 = sys.version_info < (3,)
def decode(obj):
"""
Decode obj to unicode if it is a byte string, trying first utf8 and then
iso-8859-1, raising a `UnicodeDecodeError` if unable to decode a byte
string, or returning obj unchanged if it is not a byte string.
"""
if isinstance(obj, six.binary_type):
try:
obj = obj.decode("utf8")
except UnicodeDecodeError:
obj = obj.decode("iso-8859-1")
return obj
def encode(obj, ascii=False):
"""
Encode the object arg as ascii (unicode-escaped) if `ascii` true or utf8.
"""
if isinstance(obj, six.text_type):
obj = obj.encode("unicode-escape" if ascii else "utf8")
return obj
def needs_ascii(fh):
"""
Answer whether to encode as ascii for the given file handle, which is based
on whether the handle has an encoding (None under py2 and UTF-8 under py3)
and whether the handle is associated with a tty.
"""
if fh.encoding and fh.encoding != "UTF-8":
return True
return not fh.isatty()
def json_serializer(val):
"""A JSON `default` helper function for serializing datetimes."""
return val.isoformat() if isinstance(val, datetime) else val
def parse_datetime(val):
"""
Parse datetime string in `ISO_DATE_FORMAT` and return a datetime value.
"""
return datetime.strptime(val, ISO_DATE_FORMAT).replace(tzinfo=pytz.utc)
def utc_to_local(dt):
"""
Convert UTC `datetime.datetime` instance to localtime.
Returns a datetime with `tzinfo` set to the current local timezone.
"""
local_timezone = pytz.reference.LocalTimezone()
dt = dt + local_timezone.utcoffset(datetime.now())
return dt.replace(tzinfo=local_timezone)
def strip(val):
"""
Strip val, which may be str or iterable of str.
For str input, returns stripped string, and for iterable input,
returns list of str values without empty str (after strip) values.
"""
if isinstance(val, six.string_types):
return val.strip()
try:
return list(filter(None, map(strip, val)))
except TypeError:
return val
if PYTHON2:
def pickle_loads(data, *args, **kwargs):
"""
Pickle.loads replacement that handles Python2/3 gotchas.
"""
try:
from cPickle import loads
except ImportError:
from pickle import loads
return loads(data, *args, **kwargs)
else:
def pickle_loads(data, *args, **kwargs):
"""
Pickle.loads replacement that handles Python2/3 gotchas.
"""
from pickle import loads
try:
return loads(data, *args, **kwargs)
except UnicodeDecodeError as e:
print(e.args)
if PYTHON2 or not e.args[0] == "ascii":
raise
result = loads(data, encoding="bytes")
# need to handle a py2-pickled dict having bytes keys, which will
# be skipped in python3, so we convert all keys to str if needed
if isinstance(result, dict):
d = {}
method = result.iteritems if PYTHON2 else result.items
for k, v in method():
if isinstance(k, bytes):
k = k.decode("ascii")
d[k] = v
if d:
result = d
return result
def monkeypatch():
"""
Monkeypatch the whoosh.compat.loads to ...
"""
import whoosh.compat
whoosh.compat.loads = pickle_loads
| mit | -4,824,097,179,376,223,000 | 25.219178 | 79 | 0.618077 | false | 4.025237 | false | false | false |
JustinWingChungHui/okKindred | gallery/models/image.py | 2 | 9448 | from django.conf import settings
from django.db import models
from django.utils import timezone
from common.utils import create_hash
from common.get_lat_lon_exif_pil import get_lat_lon_backup
from common.s3_synch import upload_file_to_s3, remove_file_from_s3, get_file_from_s3
from custom_user.models import User
from gallery.models import Gallery
import PIL
import os
import threading
def upload_to(instance, filename):
'''
Defines a dynamic directory for files to be uploaded to
http://stackoverflow.com/questions/6350153/getting-username-in-imagefield-upload-to-path
'''
directory = ''.join([settings.MEDIA_ROOT, 'galleries/', str(instance.family_id), '/', str(instance.gallery.id)])
if not os.path.exists(directory):
os.makedirs(directory)
return 'galleries/%s/%s/%s' % (instance.family_id, instance.gallery.id, filename)
class Image(models.Model):
'''
Represents an image uploaded to a gallery
'''
class Meta:
#Allows models.py to be split up across multiple files
app_label = 'gallery'
indexes = [
models.Index(fields=['gallery']),
models.Index(fields=['family']),
models.Index(fields=['date_taken'])
]
gallery = models.ForeignKey(Gallery, blank=False, null=False, on_delete=models.CASCADE)
family = models.ForeignKey('family_tree.Family', null=False, on_delete=models.CASCADE) #Use of model string name to prevent circular import
original_image = models.ImageField(upload_to=upload_to, blank=True, null=False, width_field='original_image_width', height_field='original_image_height')
original_image_height = models.IntegerField(null=True)
original_image_width = models.IntegerField(null=True)
thumbnail = models.ImageField(upload_to=upload_to, blank=True, null=False, width_field='thumbnail_width', height_field='thumbnail_height')
thumbnail_height = models.IntegerField(null=True)
thumbnail_width = models.IntegerField(null=True)
large_thumbnail = models.ImageField(upload_to=upload_to, blank=True, null=False, width_field='large_thumbnail_width', height_field='large_thumbnail_height')
large_thumbnail_height = models.IntegerField(null=True)
large_thumbnail_width = models.IntegerField(null=True)
title = models.CharField(max_length=50)
description = models.TextField(blank=True)
#EXIF data
date_taken = models.DateTimeField(null=False)
latitude = models.FloatField(blank=True, null=False, default = 0) #(0,0) is in the middle of the ocean so can set this to 0 to avoid nulls
longitude = models.FloatField(blank=True, null=False, default = 0)
#Tracking
creation_date = models.DateTimeField(auto_now_add=True)
last_updated_date = models.DateTimeField(auto_now=True)
uploaded_by = models.ForeignKey(User, blank=True, null=True, on_delete=models.SET_NULL)
def __str__(self): # __unicode__ on Python 2
return self.title
def save(self, *args, **kwargs):
'''
Overrides the save method
'''
self.family_id = self.gallery.family_id
if self.id is None or self.id <= 0:
new_record = True
else:
new_record = False
if not self.date_taken:
self.date_taken = timezone.now()
#Need to call save first before making thumbnails so image path is set properly
super(Image, self).save(*args, **kwargs) # Call the "real" save() method.
# Don't need to do the rest if editing existing image
if new_record == False:
return
im = PIL.Image.open(self._get_absolute_image_path())
self._populate_exif_data(im)
self.make_thumbnails(im)
#Set last updated data on Gallery
self.gallery.save()
# Ensure that this has not been set to null
if not self.date_taken:
self.date_taken = timezone.now()
super(Image, self).save(*args, **kwargs) # Call the "real" save() method.
def make_thumbnails(self, image=None):
'''
Creates the thumbnails for the images
It also sets a thumbnail for the gallery if none exists
'''
if not self.original_image:
return
if not self.large_thumbnail:
self.large_thumbnail, image = self._create_thumbnail((960,960))
if not self.thumbnail:
self.thumbnail, image = self._create_thumbnail((200,200), image)
#Set the gallery thumbnail
if not self.gallery.thumbnail:
self.gallery.thumbnail = self.thumbnail
self.gallery.thumbnail_height = self.thumbnail_height
self.gallery.thumbnail_width = self.thumbnail_width
def _create_thumbnail(self, size, image = None):
'''
Creates the thumbnails
'''
if not image:
image = PIL.Image.open(self._get_absolute_image_path())
image.thumbnail(size)
filename = create_hash(str(self.original_image)) + '.jpg'
path_and_filename = upload_to(self, str(filename))
image = image.convert('RGB')
image.save(settings.MEDIA_ROOT + str(path_and_filename), "JPEG", quality=90)
return path_and_filename, image
def _get_absolute_image_path(self, path = None):
'''
Gets the absolute image path
'''
if not path:
path = self.original_image
if settings.MEDIA_ROOT in str(path):
image_file = str(path)
else:
image_file = settings.MEDIA_ROOT + str(path)
return image_file
def _populate_exif_data(self, image=None):
'''
Uses the exif data from an image to populate fields on the image model
http://stackoverflow.com/questions/6460381/translate-exif-dms-to-dd-geolocation-with-python
'''
if self.latitude != 0 and self.longitude != 0:
return
# if not image:
# image = PIL.Image.open(self._get_absolute_image_path())
# Issue with PIL GPS tag reading so using another library
lat, lon, date_time = get_lat_lon_backup(self._get_absolute_image_path())
self.latitude = lat
self.longitude = lon
self.date_taken = date_time
def delete_local_image_files(self):
'''
Deletes the original image and thumbnails associated with this
object
'''
try:
os.remove(self._get_absolute_image_path(self.original_image))
except:
pass
try:
os.remove(self._get_absolute_image_path(self.thumbnail))
except:
pass
try:
os.remove(self._get_absolute_image_path(self.large_thumbnail))
except:
pass
def delete_remote_image_files(self):
'''
Deletes the image and thumbnails associated with this
object on s3
'''
t1 = threading.Thread(target=remove_file_from_s3, args=(self.original_image,))
t2 = threading.Thread(target=remove_file_from_s3, args=(self.thumbnail,))
t3 = threading.Thread(target=remove_file_from_s3, args=(self.large_thumbnail,))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
def upload_files_to_s3(self):
'''
Uploads image and thumbnail files to s3
'''
t1 = threading.Thread(target=upload_file_to_s3, args=(self.original_image,))
t2 = threading.Thread(target=upload_file_to_s3, args=(self.thumbnail,))
t3 = threading.Thread(target=upload_file_to_s3, args=(self.large_thumbnail,))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
def rotate(self, anticlockwise_angle = 90):
'''
Rotates the image and all thumbnails
'''
thumbnail = self._rotate_image(self.thumbnail, anticlockwise_angle)
thumbnail_path_and_filename = upload_to(self, str(create_hash(str(self.original_image)) + '.jpg'))
thumbnail.save(settings.MEDIA_ROOT + str(thumbnail_path_and_filename), "JPEG", quality=95)
large_thumbnail = self._rotate_image(self.large_thumbnail, anticlockwise_angle)
large_thumbnail_path_and_filename = upload_to(self, str(create_hash(str(self.original_image)) + '.jpg'))
large_thumbnail.save(settings.MEDIA_ROOT + str(large_thumbnail_path_and_filename), "JPEG", quality=95)
original_image = self._rotate_image(self.original_image, anticlockwise_angle)
original_image_path_and_filename = upload_to(self, str(create_hash(str(self.original_image)) + '.jpg'))
original_image.save(settings.MEDIA_ROOT + str(original_image_path_and_filename), "JPEG", quality=95)
self.delete_local_image_files()
self.delete_remote_image_files()
self.thumbnail = thumbnail_path_and_filename
self.large_thumbnail = large_thumbnail_path_and_filename
self.original_image = original_image_path_and_filename
self.save()
self.upload_files_to_s3()
self.delete_local_image_files()
def _rotate_image(self, path, anticlockwise_angle = 90):
'''
Rotates an image
'''
get_file_from_s3(path)
image = PIL.Image.open(settings.MEDIA_ROOT + str(path))
return image.rotate(anticlockwise_angle, resample=PIL.Image.BICUBIC, expand=True)
| gpl-2.0 | 2,709,290,802,625,989,000 | 32.385159 | 160 | 0.634949 | false | 3.792854 | false | false | false |
rajalokan/keystone | keystone/tests/unit/test_cli.py | 1 | 57490 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import uuid
import fixtures
import mock
import oslo_config.fixture
from oslo_db.sqlalchemy import migration
from oslo_log import log
from six.moves import configparser
from six.moves import range
from testtools import matchers
from keystone.auth import controllers
from keystone.cmd import cli
from keystone.cmd.doctor import caching
from keystone.cmd.doctor import credential
from keystone.cmd.doctor import database as doc_database
from keystone.cmd.doctor import debug
from keystone.cmd.doctor import federation
from keystone.cmd.doctor import ldap
from keystone.cmd.doctor import security_compliance
from keystone.cmd.doctor import tokens
from keystone.cmd.doctor import tokens_fernet
from keystone.common import dependency
from keystone.common.sql import upgrades
import keystone.conf
from keystone import exception
from keystone.i18n import _
from keystone.identity.mapping_backends import mapping as identity_mapping
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
from keystone.tests.unit.ksfixtures import database
from keystone.tests.unit.ksfixtures import ldapdb
CONF = keystone.conf.CONF
class CliTestCase(unit.SQLDriverOverrides, unit.TestCase):
def config_files(self):
config_files = super(CliTestCase, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
return config_files
def test_token_flush(self):
self.useFixture(database.Database())
self.load_backends()
cli.TokenFlush.main()
class CliNoConfigTestCase(unit.BaseTestCase):
def setUp(self):
self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF))
self.config_fixture.register_cli_opt(cli.command_opt)
self.useFixture(fixtures.MockPatch(
'oslo_config.cfg.find_config_files', return_value=[]))
super(CliNoConfigTestCase, self).setUp()
# NOTE(crinkle): the command call doesn't have to actually work,
# that's what the other unit tests are for. So just mock it out.
class FakeConfCommand(object):
def __init__(self):
self.cmd_class = mock.Mock()
self.useFixture(fixtures.MockPatchObject(
CONF, 'command', FakeConfCommand()))
self.logging = self.useFixture(fixtures.FakeLogger(level=log.WARN))
def test_cli(self):
expected_msg = 'Config file not found, using default configs.'
cli.main(argv=['keystone-manage', 'db_sync'])
self.assertThat(self.logging.output, matchers.Contains(expected_msg))
class CliBootStrapTestCase(unit.SQLDriverOverrides, unit.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(CliBootStrapTestCase, self).setUp()
def config_files(self):
self.config_fixture.register_cli_opt(cli.command_opt)
config_files = super(CliBootStrapTestCase, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
return config_files
def config(self, config_files):
CONF(args=['bootstrap', '--bootstrap-password', uuid.uuid4().hex],
project='keystone',
default_config_files=config_files)
def test_bootstrap(self):
bootstrap = cli.BootStrap()
self._do_test_bootstrap(bootstrap)
def _do_test_bootstrap(self, bootstrap):
bootstrap.do_bootstrap()
project = bootstrap.resource_manager.get_project_by_name(
bootstrap.project_name,
'default')
user = bootstrap.identity_manager.get_user_by_name(
bootstrap.username,
'default')
role = bootstrap.role_manager.get_role(bootstrap.role_id)
role_list = (
bootstrap.assignment_manager.get_roles_for_user_and_project(
user['id'],
project['id']))
self.assertIs(1, len(role_list))
self.assertEqual(role_list[0], role['id'])
# NOTE(morganfainberg): Pass an empty context, it isn't used by
# `authenticate` method.
bootstrap.identity_manager.authenticate(
self.make_request(),
user['id'],
bootstrap.password)
if bootstrap.region_id:
region = bootstrap.catalog_manager.get_region(bootstrap.region_id)
self.assertEqual(self.region_id, region['id'])
if bootstrap.service_id:
svc = bootstrap.catalog_manager.get_service(bootstrap.service_id)
self.assertEqual(self.service_name, svc['name'])
self.assertEqual(set(['admin', 'public', 'internal']),
set(bootstrap.endpoints))
urls = {'public': self.public_url,
'internal': self.internal_url,
'admin': self.admin_url}
for interface, url in urls.items():
endpoint_id = bootstrap.endpoints[interface]
endpoint = bootstrap.catalog_manager.get_endpoint(endpoint_id)
self.assertEqual(self.region_id, endpoint['region_id'])
self.assertEqual(url, endpoint['url'])
self.assertEqual(svc['id'], endpoint['service_id'])
self.assertEqual(interface, endpoint['interface'])
def test_bootstrap_is_idempotent_when_password_does_not_change(self):
# NOTE(morganfainberg): Ensure we can run bootstrap with the same
# configuration multiple times without erroring.
bootstrap = cli.BootStrap()
self._do_test_bootstrap(bootstrap)
v3_token_controller = controllers.Auth()
v3_password_data = {
'identity': {
"methods": ["password"],
"password": {
"user": {
"name": bootstrap.username,
"password": bootstrap.password,
"domain": {
"id": CONF.identity.default_domain_id
}
}
}
}
}
auth_response = v3_token_controller.authenticate_for_token(
self.make_request(), v3_password_data)
token = auth_response.headers['X-Subject-Token']
self._do_test_bootstrap(bootstrap)
# build validation request
request = self.make_request(is_admin=True)
request.context_dict['subject_token_id'] = token
# Make sure the token we authenticate for is still valid.
v3_token_controller.validate_token(request)
def test_bootstrap_is_not_idempotent_when_password_does_change(self):
# NOTE(lbragstad): Ensure bootstrap isn't idempotent when run with
# different arguments or configuration values.
bootstrap = cli.BootStrap()
self._do_test_bootstrap(bootstrap)
v3_token_controller = controllers.Auth()
v3_password_data = {
'identity': {
"methods": ["password"],
"password": {
"user": {
"name": bootstrap.username,
"password": bootstrap.password,
"domain": {
"id": CONF.identity.default_domain_id
}
}
}
}
}
auth_response = v3_token_controller.authenticate_for_token(
self.make_request(), v3_password_data)
token = auth_response.headers['X-Subject-Token']
os.environ['OS_BOOTSTRAP_PASSWORD'] = uuid.uuid4().hex
self._do_test_bootstrap(bootstrap)
# build validation request
request = self.make_request(is_admin=True)
request.context_dict['subject_token_id'] = token
# Since the user account was recovered with a different password, we
# shouldn't be able to validate this token. Bootstrap should have
# persisted a revocation event because the user's password was updated.
# Since this token was obtained using the original password, it should
# now be invalid.
self.assertRaises(
exception.TokenNotFound,
v3_token_controller.validate_token,
request
)
def test_bootstrap_recovers_user(self):
bootstrap = cli.BootStrap()
self._do_test_bootstrap(bootstrap)
# Completely lock the user out.
user_id = bootstrap.identity_manager.get_user_by_name(
bootstrap.username,
'default')['id']
bootstrap.identity_manager.update_user(
user_id,
{'enabled': False,
'password': uuid.uuid4().hex})
# The second bootstrap run will recover the account.
self._do_test_bootstrap(bootstrap)
# Sanity check that the original password works again.
bootstrap.identity_manager.authenticate(
self.make_request(),
user_id,
bootstrap.password)
def test_bootstrap_creates_default_role(self):
bootstrap = cli.BootStrap()
try:
role = bootstrap.role_manager.get_role(CONF.member_role_id)
self.fail('Member Role is created and should not be.')
except exception.RoleNotFound:
pass
self._do_test_bootstrap(bootstrap)
role = bootstrap.role_manager.get_role(CONF.member_role_id)
self.assertEqual(role['name'], CONF.member_role_name)
self.assertEqual(role['id'], CONF.member_role_id)
class CliBootStrapTestCaseWithEnvironment(CliBootStrapTestCase):
def config(self, config_files):
CONF(args=['bootstrap'], project='keystone',
default_config_files=config_files)
def setUp(self):
super(CliBootStrapTestCaseWithEnvironment, self).setUp()
self.password = uuid.uuid4().hex
self.username = uuid.uuid4().hex
self.project_name = uuid.uuid4().hex
self.role_name = uuid.uuid4().hex
self.service_name = uuid.uuid4().hex
self.public_url = uuid.uuid4().hex
self.internal_url = uuid.uuid4().hex
self.admin_url = uuid.uuid4().hex
self.region_id = uuid.uuid4().hex
self.default_domain = {
'id': CONF.identity.default_domain_id,
'name': 'Default',
}
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_PASSWORD',
newvalue=self.password))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_USERNAME',
newvalue=self.username))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_PROJECT_NAME',
newvalue=self.project_name))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_ROLE_NAME',
newvalue=self.role_name))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_SERVICE_NAME',
newvalue=self.service_name))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_PUBLIC_URL',
newvalue=self.public_url))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_INTERNAL_URL',
newvalue=self.internal_url))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_ADMIN_URL',
newvalue=self.admin_url))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_REGION_ID',
newvalue=self.region_id))
def test_assignment_created_with_user_exists(self):
# test assignment can be created if user already exists.
bootstrap = cli.BootStrap()
bootstrap.resource_manager.create_domain(self.default_domain['id'],
self.default_domain)
user_ref = unit.new_user_ref(self.default_domain['id'],
name=self.username,
password=self.password)
bootstrap.identity_manager.create_user(user_ref)
self._do_test_bootstrap(bootstrap)
def test_assignment_created_with_project_exists(self):
# test assignment can be created if project already exists.
bootstrap = cli.BootStrap()
bootstrap.resource_manager.create_domain(self.default_domain['id'],
self.default_domain)
project_ref = unit.new_project_ref(self.default_domain['id'],
name=self.project_name)
bootstrap.resource_manager.create_project(project_ref['id'],
project_ref)
self._do_test_bootstrap(bootstrap)
def test_assignment_created_with_role_exists(self):
# test assignment can be created if role already exists.
bootstrap = cli.BootStrap()
bootstrap.resource_manager.create_domain(self.default_domain['id'],
self.default_domain)
role = unit.new_role_ref(name=self.role_name)
bootstrap.role_manager.create_role(role['id'], role)
self._do_test_bootstrap(bootstrap)
def test_assignment_created_with_region_exists(self):
# test assignment can be created if region already exists.
bootstrap = cli.BootStrap()
bootstrap.resource_manager.create_domain(self.default_domain['id'],
self.default_domain)
region = unit.new_region_ref(id=self.region_id)
bootstrap.catalog_manager.create_region(region)
self._do_test_bootstrap(bootstrap)
def test_endpoints_created_with_service_exists(self):
# test assignment can be created if service already exists.
bootstrap = cli.BootStrap()
bootstrap.resource_manager.create_domain(self.default_domain['id'],
self.default_domain)
service = unit.new_service_ref(name=self.service_name)
bootstrap.catalog_manager.create_service(service['id'], service)
self._do_test_bootstrap(bootstrap)
def test_endpoints_created_with_endpoint_exists(self):
# test assignment can be created if endpoint already exists.
bootstrap = cli.BootStrap()
bootstrap.resource_manager.create_domain(self.default_domain['id'],
self.default_domain)
service = unit.new_service_ref(name=self.service_name)
bootstrap.catalog_manager.create_service(service['id'], service)
region = unit.new_region_ref(id=self.region_id)
bootstrap.catalog_manager.create_region(region)
endpoint = unit.new_endpoint_ref(interface='public',
service_id=service['id'],
url=self.public_url,
region_id=self.region_id)
bootstrap.catalog_manager.create_endpoint(endpoint['id'], endpoint)
self._do_test_bootstrap(bootstrap)
class CliDomainConfigAllTestCase(unit.SQLDriverOverrides, unit.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(CliDomainConfigAllTestCase, self).setUp()
self.load_backends()
self.config_fixture.config(
group='identity',
domain_config_dir=unit.TESTCONF + '/domain_configs_multi_ldap')
self.domain_count = 3
self.setup_initial_domains()
self.logging = self.useFixture(
fixtures.FakeLogger(level=logging.INFO))
def config_files(self):
self.config_fixture.register_cli_opt(cli.command_opt)
config_files = super(CliDomainConfigAllTestCase, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
return config_files
def cleanup_domains(self):
for domain in self.domains:
if domain == 'domain_default':
# Not allowed to delete the default domain, but should at least
# delete any domain-specific config for it.
self.domain_config_api.delete_config(
CONF.identity.default_domain_id)
continue
this_domain = self.domains[domain]
this_domain['enabled'] = False
self.resource_api.update_domain(this_domain['id'], this_domain)
self.resource_api.delete_domain(this_domain['id'])
self.domains = {}
def config(self, config_files):
CONF(args=['domain_config_upload', '--all'], project='keystone',
default_config_files=config_files)
def setup_initial_domains(self):
def create_domain(domain):
return self.resource_api.create_domain(domain['id'], domain)
self.domains = {}
self.addCleanup(self.cleanup_domains)
for x in range(1, self.domain_count):
domain = 'domain%s' % x
self.domains[domain] = create_domain(
{'id': uuid.uuid4().hex, 'name': domain})
self.default_domain = unit.new_domain_ref(
description=u'The default domain',
id=CONF.identity.default_domain_id,
name=u'Default')
self.domains['domain_default'] = create_domain(self.default_domain)
def test_config_upload(self):
# The values below are the same as in the domain_configs_multi_ldap
# directory of test config_files.
default_config = {
'ldap': {'url': 'fake://memory',
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
'identity': {'driver': 'ldap'}
}
domain1_config = {
'ldap': {'url': 'fake://memory1',
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
'identity': {'driver': 'ldap',
'list_limit': '101'}
}
domain2_config = {
'ldap': {'url': 'fake://memory',
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=myroot,cn=com',
'group_tree_dn': 'ou=UserGroups,dc=myroot,dc=org',
'user_tree_dn': 'ou=Users,dc=myroot,dc=org'},
'identity': {'driver': 'ldap'}
}
# Clear backend dependencies, since cli loads these manually
dependency.reset()
cli.DomainConfigUpload.main()
res = self.domain_config_api.get_config_with_sensitive_info(
CONF.identity.default_domain_id)
self.assertEqual(default_config, res)
res = self.domain_config_api.get_config_with_sensitive_info(
self.domains['domain1']['id'])
self.assertEqual(domain1_config, res)
res = self.domain_config_api.get_config_with_sensitive_info(
self.domains['domain2']['id'])
self.assertEqual(domain2_config, res)
class CliDomainConfigSingleDomainTestCase(CliDomainConfigAllTestCase):
def config(self, config_files):
CONF(args=['domain_config_upload', '--domain-name', 'Default'],
project='keystone', default_config_files=config_files)
def test_config_upload(self):
# The values below are the same as in the domain_configs_multi_ldap
# directory of test config_files.
default_config = {
'ldap': {'url': 'fake://memory',
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
'identity': {'driver': 'ldap'}
}
# Clear backend dependencies, since cli loads these manually
dependency.reset()
cli.DomainConfigUpload.main()
res = self.domain_config_api.get_config_with_sensitive_info(
CONF.identity.default_domain_id)
self.assertEqual(default_config, res)
res = self.domain_config_api.get_config_with_sensitive_info(
self.domains['domain1']['id'])
self.assertEqual({}, res)
res = self.domain_config_api.get_config_with_sensitive_info(
self.domains['domain2']['id'])
self.assertEqual({}, res)
def test_no_overwrite_config(self):
# Create a config for the default domain
default_config = {
'ldap': {'url': uuid.uuid4().hex},
'identity': {'driver': 'ldap'}
}
self.domain_config_api.create_config(
CONF.identity.default_domain_id, default_config)
# Now try and upload the settings in the configuration file for the
# default domain
dependency.reset()
with mock.patch('six.moves.builtins.print') as mock_print:
self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main)
file_name = ('keystone.%s.conf' % self.default_domain['name'])
error_msg = _(
'Domain: %(domain)s already has a configuration defined - '
'ignoring file: %(file)s.') % {
'domain': self.default_domain['name'],
'file': os.path.join(CONF.identity.domain_config_dir,
file_name)}
mock_print.assert_has_calls([mock.call(error_msg)])
res = self.domain_config_api.get_config(
CONF.identity.default_domain_id)
# The initial config should not have been overwritten
self.assertEqual(default_config, res)
class CliDomainConfigNoOptionsTestCase(CliDomainConfigAllTestCase):
def config(self, config_files):
CONF(args=['domain_config_upload'],
project='keystone', default_config_files=config_files)
def test_config_upload(self):
dependency.reset()
with mock.patch('six.moves.builtins.print') as mock_print:
self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main)
mock_print.assert_has_calls(
[mock.call(
_('At least one option must be provided, use either '
'--all or --domain-name'))])
class CliDomainConfigTooManyOptionsTestCase(CliDomainConfigAllTestCase):
def config(self, config_files):
CONF(args=['domain_config_upload', '--all', '--domain-name',
'Default'],
project='keystone', default_config_files=config_files)
def test_config_upload(self):
dependency.reset()
with mock.patch('six.moves.builtins.print') as mock_print:
self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main)
mock_print.assert_has_calls(
[mock.call(_('The --all option cannot be used with '
'the --domain-name option'))])
class CliDomainConfigInvalidDomainTestCase(CliDomainConfigAllTestCase):
def config(self, config_files):
self.invalid_domain_name = uuid.uuid4().hex
CONF(args=['domain_config_upload', '--domain-name',
self.invalid_domain_name],
project='keystone', default_config_files=config_files)
def test_config_upload(self):
dependency.reset()
with mock.patch('six.moves.builtins.print') as mock_print:
self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main)
file_name = 'keystone.%s.conf' % self.invalid_domain_name
error_msg = (_(
'Invalid domain name: %(domain)s found in config file name: '
'%(file)s - ignoring this file.') % {
'domain': self.invalid_domain_name,
'file': os.path.join(CONF.identity.domain_config_dir,
file_name)})
mock_print.assert_has_calls([mock.call(error_msg)])
class TestDomainConfigFinder(unit.BaseTestCase):
def setUp(self):
super(TestDomainConfigFinder, self).setUp()
self.logging = self.useFixture(fixtures.LoggerFixture())
@mock.patch('os.walk')
def test_finder_ignores_files(self, mock_walk):
mock_walk.return_value = [
['.', [], ['file.txt', 'keystone.conf', 'keystone.domain0.conf']],
]
domain_configs = list(cli._domain_config_finder('.'))
expected_domain_configs = [('./keystone.domain0.conf', 'domain0')]
self.assertThat(domain_configs,
matchers.Equals(expected_domain_configs))
expected_msg_template = ('Ignoring file (%s) while scanning '
'domain config directory')
self.assertThat(
self.logging.output,
matchers.Contains(expected_msg_template % 'file.txt'))
self.assertThat(
self.logging.output,
matchers.Contains(expected_msg_template % 'keystone.conf'))
class CliDBSyncTestCase(unit.BaseTestCase):
class FakeConfCommand(object):
def __init__(self, parent):
self.extension = False
self.check = parent.command_check
self.expand = parent.command_expand
self.migrate = parent.command_migrate
self.contract = parent.command_contract
self.version = None
def setUp(self):
super(CliDBSyncTestCase, self).setUp()
self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF))
self.config_fixture.register_cli_opt(cli.command_opt)
upgrades.offline_sync_database_to_version = mock.Mock()
upgrades.expand_schema = mock.Mock()
upgrades.migrate_data = mock.Mock()
upgrades.contract_schema = mock.Mock()
self.command_check = False
self.command_expand = False
self.command_migrate = False
self.command_contract = False
def _assert_correct_call(self, mocked_function):
for func in [upgrades.offline_sync_database_to_version,
upgrades.expand_schema,
upgrades.migrate_data,
upgrades.contract_schema]:
if func == mocked_function:
self.assertTrue(func.called)
else:
self.assertFalse(func.called)
def test_db_sync(self):
self.useFixture(fixtures.MockPatchObject(
CONF, 'command', self.FakeConfCommand(self)))
cli.DbSync.main()
self._assert_correct_call(
upgrades.offline_sync_database_to_version)
def test_db_sync_expand(self):
self.command_expand = True
self.useFixture(fixtures.MockPatchObject(
CONF, 'command', self.FakeConfCommand(self)))
cli.DbSync.main()
self._assert_correct_call(upgrades.expand_schema)
def test_db_sync_migrate(self):
self.command_migrate = True
self.useFixture(fixtures.MockPatchObject(
CONF, 'command', self.FakeConfCommand(self)))
cli.DbSync.main()
self._assert_correct_call(upgrades.migrate_data)
def test_db_sync_contract(self):
self.command_contract = True
self.useFixture(fixtures.MockPatchObject(
CONF, 'command', self.FakeConfCommand(self)))
cli.DbSync.main()
self._assert_correct_call(upgrades.contract_schema)
@mock.patch('keystone.cmd.cli.upgrades.get_db_version')
def test_db_sync_check_when_database_is_empty(self, mocked_get_db_version):
e = migration.exception.DbMigrationError("Invalid version")
mocked_get_db_version.side_effect = e
checker = cli.DbSync()
log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO))
status = checker.check_db_sync_status()
self.assertIn("not currently under version control", log_info.output)
self.assertEqual(status, 2)
class TestMappingPopulate(unit.SQLDriverOverrides, unit.TestCase):
def setUp(self):
sqldb = self.useFixture(database.Database())
super(TestMappingPopulate, self).setUp()
self.ldapdb = self.useFixture(ldapdb.LDAPDatabase())
self.ldapdb.clear()
self.load_backends()
sqldb.recreate()
self.load_fixtures(default_fixtures)
def config_files(self):
self.config_fixture.register_cli_opt(cli.command_opt)
config_files = super(TestMappingPopulate, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_ldap_sql.conf'))
return config_files
def config_overrides(self):
super(TestMappingPopulate, self).config_overrides()
self.config_fixture.config(group='identity', driver='ldap')
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
def config(self, config_files):
CONF(args=['mapping_populate', '--domain-name', 'Default'],
project='keystone',
default_config_files=config_files)
def test_mapping_populate(self):
# mapping_populate should create id mappings. Test plan:
# 0. Purge mappings
# 1. Fetch user list directly via backend. It will not create any
# mappings because it bypasses identity manager
# 2. Verify that users have no public_id yet
# 3. Execute mapping_populate. It should create id mappings
# 4. For the same users verify that they have public_id now
purge_filter = {}
self.id_mapping_api.purge_mappings(purge_filter)
hints = None
users = self.identity_api.driver.list_users(hints)
for user in users:
local_entity = {
'domain_id': CONF.identity.default_domain_id,
'local_id': user['id'],
'entity_type': identity_mapping.EntityType.USER}
self.assertIsNone(self.id_mapping_api.get_public_id(local_entity))
dependency.reset() # backends are loaded again in the command handler
cli.MappingPopulate.main()
for user in users:
local_entity = {
'domain_id': CONF.identity.default_domain_id,
'local_id': user['id'],
'entity_type': identity_mapping.EntityType.USER}
self.assertIsNotNone(
self.id_mapping_api.get_public_id(local_entity))
def test_bad_domain_name(self):
CONF(args=['mapping_populate', '--domain-name', uuid.uuid4().hex],
project='keystone')
dependency.reset() # backends are loaded again in the command handler
# NOTE: assertEqual is used on purpose. assertFalse passes with None.
self.assertEqual(False, cli.MappingPopulate.main())
class CliDomainConfigUploadNothing(unit.BaseTestCase):
def setUp(self):
super(CliDomainConfigUploadNothing, self).setUp()
config_fixture = self.useFixture(oslo_config.fixture.Config(CONF))
config_fixture.register_cli_opt(cli.command_opt)
# NOTE(dstanek): since this is not testing any database
# functionality there is no need to go through the motions and
# setup a test database.
def fake_load_backends(self):
self.resource_manager = mock.Mock()
self.useFixture(fixtures.MockPatchObject(
cli.DomainConfigUploadFiles, 'load_backends', fake_load_backends))
tempdir = self.useFixture(fixtures.TempDir())
config_fixture.config(group='identity', domain_config_dir=tempdir.path)
self.logging = self.useFixture(
fixtures.FakeLogger(level=logging.DEBUG))
def test_uploading_all_from_an_empty_directory(self):
CONF(args=['domain_config_upload', '--all'], project='keystone',
default_config_files=[])
cli.DomainConfigUpload.main()
expected_msg = ('No domain configs uploaded from %r' %
CONF.identity.domain_config_dir)
self.assertThat(self.logging.output,
matchers.Contains(expected_msg))
class CachingDoctorTests(unit.TestCase):
def test_symptom_caching_disabled(self):
# Symptom Detected: Caching disabled
self.config_fixture.config(group='cache', enabled=False)
self.assertTrue(caching.symptom_caching_disabled())
# No Symptom Detected: Caching is enabled
self.config_fixture.config(group='cache', enabled=True)
self.assertFalse(caching.symptom_caching_disabled())
def test_caching_symptom_caching_enabled_without_a_backend(self):
# Success Case: Caching enabled and backend configured
self.config_fixture.config(group='cache', enabled=True)
self.config_fixture.config(group='cache', backend='dogpile.cache.null')
self.assertTrue(caching.symptom_caching_enabled_without_a_backend())
# Failure Case 1: Caching disabled and backend not configured
self.config_fixture.config(group='cache', enabled=False)
self.config_fixture.config(group='cache', backend='dogpile.cache.null')
self.assertFalse(caching.symptom_caching_enabled_without_a_backend())
# Failure Case 2: Caching disabled and backend configured
self.config_fixture.config(group='cache', enabled=False)
self.config_fixture.config(group='cache',
backend='dogpile.cache.memory')
self.assertFalse(caching.symptom_caching_enabled_without_a_backend())
# Failure Case 3: Caching enabled and backend configured
self.config_fixture.config(group='cache', enabled=True)
self.config_fixture.config(group='cache',
backend='dogpile.cache.memory')
self.assertFalse(caching.symptom_caching_enabled_without_a_backend())
class CredentialDoctorTests(unit.TestCase):
def test_credential_and_fernet_key_repositories_match(self):
# Symptom Detected: Key repository paths are not unique
directory = self.useFixture(fixtures.TempDir()).path
self.config_fixture.config(group='credential',
key_repository=directory)
self.config_fixture.config(group='fernet_tokens',
key_repository=directory)
self.assertTrue(credential.symptom_unique_key_repositories())
def test_credential_and_fernet_key_repositories_are_unique(self):
# No Symptom Detected: Key repository paths are unique
self.config_fixture.config(group='credential',
key_repository='/etc/keystone/cred-repo')
self.config_fixture.config(group='fernet_tokens',
key_repository='/etc/keystone/fernet-repo')
self.assertFalse(credential.symptom_unique_key_repositories())
@mock.patch('keystone.cmd.doctor.credential.utils')
def test_usability_of_cred_fernet_key_repo_raised(self, mock_utils):
# Symptom Detected: credential fernet key repository is world readable
self.config_fixture.config(group='credential', provider='fernet')
mock_utils.FernetUtils().validate_key_repository.return_value = False
self.assertTrue(
credential.symptom_usability_of_credential_fernet_key_repository())
@mock.patch('keystone.cmd.doctor.credential.utils')
def test_usability_of_cred_fernet_key_repo_not_raised(self, mock_utils):
# No Symptom Detected: Custom driver is used
self.config_fixture.config(group='credential', provider='my-driver')
mock_utils.FernetUtils().validate_key_repository.return_value = True
self.assertFalse(
credential.symptom_usability_of_credential_fernet_key_repository())
# No Symptom Detected: key repository is not world readable
self.config_fixture.config(group='credential', provider='fernet')
mock_utils.FernetUtils().validate_key_repository.return_value = True
self.assertFalse(
credential.symptom_usability_of_credential_fernet_key_repository())
@mock.patch('keystone.cmd.doctor.credential.utils')
def test_keys_in_credential_fernet_key_repository_raised(self, mock_utils):
# Symptom Detected: Key repo is empty
self.config_fixture.config(group='credential', provider='fernet')
mock_utils.FernetUtils().load_keys.return_value = False
self.assertTrue(
credential.symptom_keys_in_credential_fernet_key_repository())
@mock.patch('keystone.cmd.doctor.credential.utils')
def test_keys_in_credential_fernet_key_repository_not_raised(
self, mock_utils):
# No Symptom Detected: Custom driver is used
self.config_fixture.config(group='credential', provider='my-driver')
mock_utils.FernetUtils().load_keys.return_value = True
self.assertFalse(
credential.symptom_keys_in_credential_fernet_key_repository())
# No Symptom Detected: Key repo is not empty, fernet is current driver
self.config_fixture.config(group='credential', provider='fernet')
mock_utils.FernetUtils().load_keys.return_value = True
self.assertFalse(
credential.symptom_keys_in_credential_fernet_key_repository())
class DatabaseDoctorTests(unit.TestCase):
def test_symptom_is_raised_if_database_connection_is_SQLite(self):
# Symptom Detected: Database connection is sqlite
self.config_fixture.config(
group='database',
connection='sqlite:///mydb')
self.assertTrue(
doc_database.symptom_database_connection_is_not_SQLite())
# No Symptom Detected: Database connection is MySQL
self.config_fixture.config(
group='database',
connection='mysql+mysqlconnector://admin:secret@localhost/mydb')
self.assertFalse(
doc_database.symptom_database_connection_is_not_SQLite())
class DebugDoctorTests(unit.TestCase):
def test_symptom_debug_mode_is_enabled(self):
# Symptom Detected: Debug mode is enabled
self.config_fixture.config(debug=True)
self.assertTrue(debug.symptom_debug_mode_is_enabled())
# No Symptom Detected: Debug mode is disabled
self.config_fixture.config(debug=False)
self.assertFalse(debug.symptom_debug_mode_is_enabled())
class FederationDoctorTests(unit.TestCase):
def test_symptom_comma_in_SAML_public_certificate_path(self):
# Symptom Detected: There is a comma in path to public cert file
self.config_fixture.config(group='saml', certfile='file,cert.pem')
self.assertTrue(
federation.symptom_comma_in_SAML_public_certificate_path())
# No Symptom Detected: There is no comma in the path
self.config_fixture.config(group='saml', certfile='signing_cert.pem')
self.assertFalse(
federation.symptom_comma_in_SAML_public_certificate_path())
def test_symptom_comma_in_SAML_private_key_file_path(self):
# Symptom Detected: There is a comma in path to private key file
self.config_fixture.config(group='saml', keyfile='file,key.pem')
self.assertTrue(
federation.symptom_comma_in_SAML_private_key_file_path())
# No Symptom Detected: There is no comma in the path
self.config_fixture.config(group='saml', keyfile='signing_key.pem')
self.assertFalse(
federation.symptom_comma_in_SAML_private_key_file_path())
class LdapDoctorTests(unit.TestCase):
def test_user_enabled_emulation_dn_ignored_raised(self):
# Symptom when user_enabled_emulation_dn is being ignored because the
# user did not enable the user_enabled_emulation
self.config_fixture.config(group='ldap', user_enabled_emulation=False)
self.config_fixture.config(
group='ldap',
user_enabled_emulation_dn='cn=enabled_users,dc=example,dc=com')
self.assertTrue(
ldap.symptom_LDAP_user_enabled_emulation_dn_ignored())
def test_user_enabled_emulation_dn_ignored_not_raised(self):
# No symptom when configuration set properly
self.config_fixture.config(group='ldap', user_enabled_emulation=True)
self.config_fixture.config(
group='ldap',
user_enabled_emulation_dn='cn=enabled_users,dc=example,dc=com')
self.assertFalse(
ldap.symptom_LDAP_user_enabled_emulation_dn_ignored())
# No symptom when both configurations disabled
self.config_fixture.config(group='ldap', user_enabled_emulation=False)
self.config_fixture.config(group='ldap',
user_enabled_emulation_dn=None)
self.assertFalse(
ldap.symptom_LDAP_user_enabled_emulation_dn_ignored())
def test_user_enabled_emulation_use_group_config_ignored_raised(self):
# Symptom when user enabled emulation isn't enabled but group_config is
# enabled
self.config_fixture.config(group='ldap', user_enabled_emulation=False)
self.config_fixture.config(
group='ldap',
user_enabled_emulation_use_group_config=True)
self.assertTrue(
ldap.
symptom_LDAP_user_enabled_emulation_use_group_config_ignored())
def test_user_enabled_emulation_use_group_config_ignored_not_raised(self):
# No symptom when configuration deactivated
self.config_fixture.config(group='ldap', user_enabled_emulation=False)
self.config_fixture.config(
group='ldap',
user_enabled_emulation_use_group_config=False)
self.assertFalse(
ldap.
symptom_LDAP_user_enabled_emulation_use_group_config_ignored())
# No symptom when configurations set properly
self.config_fixture.config(group='ldap', user_enabled_emulation=True)
self.config_fixture.config(
group='ldap',
user_enabled_emulation_use_group_config=True)
self.assertFalse(
ldap.
symptom_LDAP_user_enabled_emulation_use_group_config_ignored())
def test_group_members_are_ids_disabled_raised(self):
# Symptom when objectclass is set to posixGroup but members_are_ids are
# not enabled
self.config_fixture.config(group='ldap',
group_objectclass='posixGroup')
self.config_fixture.config(group='ldap',
group_members_are_ids=False)
self.assertTrue(ldap.symptom_LDAP_group_members_are_ids_disabled())
def test_group_members_are_ids_disabled_not_raised(self):
# No symptom when the configurations are set properly
self.config_fixture.config(group='ldap',
group_objectclass='posixGroup')
self.config_fixture.config(group='ldap',
group_members_are_ids=True)
self.assertFalse(ldap.symptom_LDAP_group_members_are_ids_disabled())
# No symptom when configuration deactivated
self.config_fixture.config(group='ldap',
group_objectclass='groupOfNames')
self.config_fixture.config(group='ldap',
group_members_are_ids=False)
self.assertFalse(ldap.symptom_LDAP_group_members_are_ids_disabled())
@mock.patch('os.listdir')
@mock.patch('os.path.isdir')
def test_file_based_domain_specific_configs_raised(self, mocked_isdir,
mocked_listdir):
self.config_fixture.config(
group='identity',
domain_specific_drivers_enabled=True)
self.config_fixture.config(
group='identity',
domain_configurations_from_database=False)
# Symptom if there is no existing directory
mocked_isdir.return_value = False
self.assertTrue(ldap.symptom_LDAP_file_based_domain_specific_configs())
# Symptom if there is an invalid filename inside the domain directory
mocked_isdir.return_value = True
mocked_listdir.return_value = ['openstack.domains.conf']
self.assertTrue(ldap.symptom_LDAP_file_based_domain_specific_configs())
@mock.patch('os.listdir')
@mock.patch('os.path.isdir')
def test_file_based_domain_specific_configs_not_raised(self, mocked_isdir,
mocked_listdir):
# No symptom if both configurations deactivated
self.config_fixture.config(
group='identity',
domain_specific_drivers_enabled=False)
self.config_fixture.config(
group='identity',
domain_configurations_from_database=False)
self.assertFalse(
ldap.symptom_LDAP_file_based_domain_specific_configs())
# No symptom if directory exists with no invalid filenames
self.config_fixture.config(
group='identity',
domain_specific_drivers_enabled=True)
self.config_fixture.config(
group='identity',
domain_configurations_from_database=False)
mocked_isdir.return_value = True
mocked_listdir.return_value = ['keystone.domains.conf']
self.assertFalse(
ldap.symptom_LDAP_file_based_domain_specific_configs())
@mock.patch('os.listdir')
@mock.patch('os.path.isdir')
@mock.patch('keystone.cmd.doctor.ldap.configparser.ConfigParser')
def test_file_based_domain_specific_configs_formatted_correctly_raised(
self, mocked_parser, mocked_isdir, mocked_listdir):
symptom = ('symptom_LDAP_file_based_domain_specific_configs'
'_formatted_correctly')
# Symptom Detected: Ldap domain specific configuration files are not
# formatted correctly
self.config_fixture.config(
group='identity',
domain_specific_drivers_enabled=True)
self.config_fixture.config(
group='identity',
domain_configurations_from_database=False)
mocked_isdir.return_value = True
mocked_listdir.return_value = ['keystone.domains.conf']
mock_instance = mock.MagicMock()
mock_instance.read.side_effect = configparser.Error('No Section')
mocked_parser.return_value = mock_instance
self.assertTrue(getattr(ldap, symptom)())
@mock.patch('os.listdir')
@mock.patch('os.path.isdir')
def test_file_based_domain_specific_configs_formatted_correctly_not_raised(
self, mocked_isdir, mocked_listdir):
symptom = ('symptom_LDAP_file_based_domain_specific_configs'
'_formatted_correctly')
# No Symptom Detected: Domain_specific drivers is not enabled
self.config_fixture.config(
group='identity',
domain_specific_drivers_enabled=False)
self.assertFalse(getattr(ldap, symptom)())
# No Symptom Detected: Domain configuration from database is enabled
self.config_fixture.config(
group='identity',
domain_specific_drivers_enabled=True)
self.assertFalse(getattr(ldap, symptom)())
self.config_fixture.config(
group='identity',
domain_configurations_from_database=True)
self.assertFalse(getattr(ldap, symptom)())
# No Symptom Detected: The directory in domain_config_dir doesn't exist
mocked_isdir.return_value = False
self.assertFalse(getattr(ldap, symptom)())
# No Symptom Detected: domain specific drivers are enabled, domain
# configurations from database are disabled, directory exists, and no
# exceptions found.
self.config_fixture.config(
group='identity',
domain_configurations_from_database=False)
mocked_isdir.return_value = True
# An empty directory should not raise this symptom
self.assertFalse(getattr(ldap, symptom)())
# Test again with a file inside the directory
mocked_listdir.return_value = ['keystone.domains.conf']
self.assertFalse(getattr(ldap, symptom)())
class SecurityComplianceDoctorTests(unit.TestCase):
def test_minimum_password_age_greater_than_password_expires_days(self):
# Symptom Detected: Minimum password age is greater than the password
# expires days. Both values are positive integers greater than zero.
self.config_fixture.config(group='security_compliance',
minimum_password_age=2)
self.config_fixture.config(group='security_compliance',
password_expires_days=1)
self.assertTrue(
security_compliance.
symptom_minimum_password_age_greater_than_expires_days())
def test_minimum_password_age_equal_to_password_expires_days(self):
# Symptom Detected: Minimum password age is equal to the password
# expires days. Both values are positive integers greater than zero.
self.config_fixture.config(group='security_compliance',
minimum_password_age=1)
self.config_fixture.config(group='security_compliance',
password_expires_days=1)
self.assertTrue(
security_compliance.
symptom_minimum_password_age_greater_than_expires_days())
def test_minimum_password_age_less_than_password_expires_days(self):
# No Symptom Detected: Minimum password age is less than password
# expires days. Both values are positive integers greater than zero.
self.config_fixture.config(group='security_compliance',
minimum_password_age=1)
self.config_fixture.config(group='security_compliance',
password_expires_days=2)
self.assertFalse(
security_compliance.
symptom_minimum_password_age_greater_than_expires_days())
def test_minimum_password_age_and_password_expires_days_deactivated(self):
# No Symptom Detected: when minimum_password_age's default value is 0
# and password_expires_days' default value is None
self.assertFalse(
security_compliance.
symptom_minimum_password_age_greater_than_expires_days())
def test_invalid_password_regular_expression(self):
# Symptom Detected: Regular expression is invalid
self.config_fixture.config(
group='security_compliance',
password_regex='^^(??=.*\d)$')
self.assertTrue(
security_compliance.symptom_invalid_password_regular_expression())
def test_valid_password_regular_expression(self):
# No Symptom Detected: Regular expression is valid
self.config_fixture.config(
group='security_compliance',
password_regex='^(?=.*\d)(?=.*[a-zA-Z]).{7,}$')
self.assertFalse(
security_compliance.symptom_invalid_password_regular_expression())
def test_password_regular_expression_deactivated(self):
# No Symptom Detected: Regular expression deactivated to None
self.config_fixture.config(
group='security_compliance',
password_regex=None)
self.assertFalse(
security_compliance.symptom_invalid_password_regular_expression())
def test_password_regular_expression_description_not_set(self):
# Symptom Detected: Regular expression is set but description is not
self.config_fixture.config(
group='security_compliance',
password_regex='^(?=.*\d)(?=.*[a-zA-Z]).{7,}$')
self.config_fixture.config(
group='security_compliance',
password_regex_description=None)
self.assertTrue(
security_compliance.
symptom_password_regular_expression_description_not_set())
def test_password_regular_expression_description_set(self):
# No Symptom Detected: Regular expression and description are set
desc = '1 letter, 1 digit, and a minimum length of 7 is required'
self.config_fixture.config(
group='security_compliance',
password_regex='^(?=.*\d)(?=.*[a-zA-Z]).{7,}$')
self.config_fixture.config(
group='security_compliance',
password_regex_description=desc)
self.assertFalse(
security_compliance.
symptom_password_regular_expression_description_not_set())
def test_password_regular_expression_description_deactivated(self):
# No Symptom Detected: Regular expression and description are
# deactivated to None
self.config_fixture.config(
group='security_compliance', password_regex=None)
self.config_fixture.config(
group='security_compliance', password_regex_description=None)
self.assertFalse(
security_compliance.
symptom_password_regular_expression_description_not_set())
class TokensDoctorTests(unit.TestCase):
def test_unreasonable_max_token_size_raised(self):
# Symptom Detected: the max_token_size for uuid is not 32
self.config_fixture.config(group='token', provider='uuid')
self.config_fixture.config(max_token_size=33)
self.assertTrue(tokens.symptom_unreasonable_max_token_size())
# Symptom Detected: the max_token_size for fernet is greater than 255
self.config_fixture.config(group='token', provider='fernet')
self.config_fixture.config(max_token_size=256)
self.assertTrue(tokens.symptom_unreasonable_max_token_size())
def test_unreasonable_max_token_size_not_raised(self):
# No Symptom Detected: the max_token_size for uuid is 32
self.config_fixture.config(group='token', provider='uuid')
self.config_fixture.config(max_token_size=32)
self.assertFalse(tokens.symptom_unreasonable_max_token_size())
# No Symptom Detected: the max_token_size for fernet is 255 or less
self.config_fixture.config(group='token', provider='fernet')
self.config_fixture.config(max_token_size=255)
self.assertFalse(tokens.symptom_unreasonable_max_token_size())
class TokenFernetDoctorTests(unit.TestCase):
@mock.patch('keystone.cmd.doctor.tokens_fernet.utils')
def test_usability_of_Fernet_key_repository_raised(self, mock_utils):
# Symptom Detected: Fernet key repo is world readable
self.config_fixture.config(group='token', provider='fernet')
mock_utils.FernetUtils().validate_key_repository.return_value = False
self.assertTrue(
tokens_fernet.symptom_usability_of_Fernet_key_repository())
@mock.patch('keystone.cmd.doctor.tokens_fernet.utils')
def test_usability_of_Fernet_key_repository_not_raised(self, mock_utils):
# No Symptom Detected: UUID is used instead of fernet
self.config_fixture.config(group='token', provider='uuid')
mock_utils.FernetUtils().validate_key_repository.return_value = False
self.assertFalse(
tokens_fernet.symptom_usability_of_Fernet_key_repository())
# No Symptom Detected: configs set properly, key repo is not world
# readable but is user readable
self.config_fixture.config(group='token', provider='fernet')
mock_utils.FernetUtils().validate_key_repository.return_value = True
self.assertFalse(
tokens_fernet.symptom_usability_of_Fernet_key_repository())
@mock.patch('keystone.cmd.doctor.tokens_fernet.utils')
def test_keys_in_Fernet_key_repository_raised(self, mock_utils):
# Symptom Detected: Fernet key repository is empty
self.config_fixture.config(group='token', provider='fernet')
mock_utils.FernetUtils().load_keys.return_value = False
self.assertTrue(
tokens_fernet.symptom_keys_in_Fernet_key_repository())
@mock.patch('keystone.cmd.doctor.tokens_fernet.utils')
def test_keys_in_Fernet_key_repository_not_raised(self, mock_utils):
# No Symptom Detected: UUID is used instead of fernet
self.config_fixture.config(group='token', provider='uuid')
mock_utils.FernetUtils().load_keys.return_value = True
self.assertFalse(
tokens_fernet.symptom_usability_of_Fernet_key_repository())
# No Symptom Detected: configs set properly, key repo has been
# populated with keys
self.config_fixture.config(group='token', provider='fernet')
mock_utils.FernetUtils().load_keys.return_value = True
self.assertFalse(
tokens_fernet.symptom_usability_of_Fernet_key_repository())
| apache-2.0 | 4,415,172,387,815,540,700 | 42.718631 | 79 | 0.627048 | false | 4.149105 | true | false | false |
geraldoandradee/pytest | _pytest/config.py | 1 | 32554 | """ command line options, ini-file and conftest.py processing. """
import py
import sys, os
from _pytest import hookspec # the extension point definitions
from _pytest.core import PluginManager
# pytest startup
def main(args=None, plugins=None):
""" return exit code, after performing an in-process test run.
:arg args: list of command line arguments.
:arg plugins: list of plugin objects to be auto-registered during
initialization.
"""
config = _prepareconfig(args, plugins)
exitstatus = config.hook.pytest_cmdline_main(config=config)
return exitstatus
class cmdline: # compatibility namespace
main = staticmethod(main)
class UsageError(Exception):
""" error in py.test usage or invocation"""
_preinit = []
default_plugins = (
"mark main terminal runner python pdb unittest capture skipping "
"tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
"junitxml resultlog doctest").split()
def _preloadplugins():
assert not _preinit
_preinit.append(get_plugin_manager())
def get_plugin_manager():
if _preinit:
return _preinit.pop(0)
# subsequent calls to main will create a fresh instance
pluginmanager = PytestPluginManager()
pluginmanager.config = config = Config(pluginmanager) # XXX attr needed?
for spec in default_plugins:
pluginmanager.import_plugin(spec)
return pluginmanager
def _prepareconfig(args=None, plugins=None):
if args is None:
args = sys.argv[1:]
elif isinstance(args, py.path.local):
args = [str(args)]
elif not isinstance(args, (tuple, list)):
if not isinstance(args, str):
raise ValueError("not a string or argument list: %r" % (args,))
args = py.std.shlex.split(args)
pluginmanager = get_plugin_manager()
if plugins:
for plugin in plugins:
pluginmanager.register(plugin)
return pluginmanager.hook.pytest_cmdline_parse(
pluginmanager=pluginmanager, args=args)
class PytestPluginManager(PluginManager):
def __init__(self, hookspecs=[hookspec]):
super(PytestPluginManager, self).__init__(hookspecs=hookspecs)
self.register(self)
if os.environ.get('PYTEST_DEBUG'):
err = sys.stderr
encoding = getattr(err, 'encoding', 'utf8')
try:
err = py.io.dupfile(err, encoding=encoding)
except Exception:
pass
self.trace.root.setwriter(err.write)
def pytest_configure(self, config):
config.addinivalue_line("markers",
"tryfirst: mark a hook implementation function such that the "
"plugin machinery will try to call it first/as early as possible.")
config.addinivalue_line("markers",
"trylast: mark a hook implementation function such that the "
"plugin machinery will try to call it last/as late as possible.")
class Parser:
""" Parser for command line arguments and ini-file values. """
def __init__(self, usage=None, processopt=None):
self._anonymous = OptionGroup("custom options", parser=self)
self._groups = []
self._processopt = processopt
self._usage = usage
self._inidict = {}
self._ininames = []
self.hints = []
def processoption(self, option):
if self._processopt:
if option.dest:
self._processopt(option)
def getgroup(self, name, description="", after=None):
""" get (or create) a named option Group.
:name: name of the option group.
:description: long description for --help output.
:after: name of other group, used for ordering --help output.
The returned group object has an ``addoption`` method with the same
signature as :py:func:`parser.addoption
<_pytest.config.Parser.addoption>` but will be shown in the
respective group in the output of ``pytest. --help``.
"""
for group in self._groups:
if group.name == name:
return group
group = OptionGroup(name, description, parser=self)
i = 0
for i, grp in enumerate(self._groups):
if grp.name == after:
break
self._groups.insert(i+1, group)
return group
def addoption(self, *opts, **attrs):
""" register a command line option.
:opts: option names, can be short or long options.
:attrs: same attributes which the ``add_option()`` function of the
`optparse library
<http://docs.python.org/library/optparse.html#module-optparse>`_
accepts.
After command line parsing options are available on the pytest config
object via ``config.option.NAME`` where ``NAME`` is usually set
by passing a ``dest`` attribute, for example
``addoption("--long", dest="NAME", ...)``.
"""
self._anonymous.addoption(*opts, **attrs)
def parse(self, args):
from _pytest._argcomplete import try_argcomplete
self.optparser = self._getparser()
try_argcomplete(self.optparser)
return self.optparser.parse_args([str(x) for x in args])
def _getparser(self):
from _pytest._argcomplete import filescompleter
optparser = MyOptionParser(self)
groups = self._groups + [self._anonymous]
for group in groups:
if group.options:
desc = group.description or group.name
arggroup = optparser.add_argument_group(desc)
for option in group.options:
n = option.names()
a = option.attrs()
arggroup.add_argument(*n, **a)
# bash like autocompletion for dirs (appending '/')
optparser.add_argument(FILE_OR_DIR, nargs='*'
).completer=filescompleter
return optparser
def parse_setoption(self, args, option):
parsedoption = self.parse(args)
for name, value in parsedoption.__dict__.items():
setattr(option, name, value)
return getattr(parsedoption, FILE_OR_DIR)
def parse_known_args(self, args):
optparser = self._getparser()
args = [str(x) for x in args]
return optparser.parse_known_args(args)[0]
def addini(self, name, help, type=None, default=None):
""" register an ini-file option.
:name: name of the ini-variable
:type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``.
:default: default value if no ini-file option exists but is queried.
The value of ini-variables can be retrieved via a call to
:py:func:`config.getini(name) <_pytest.config.Config.getini>`.
"""
assert type in (None, "pathlist", "args", "linelist")
self._inidict[name] = (help, type, default)
self._ininames.append(name)
class ArgumentError(Exception):
"""
Raised if an Argument instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class Argument:
"""class that mimics the necessary behaviour of py.std.optparse.Option """
_typ_map = {
'int': int,
'string': str,
}
# enable after some grace period for plugin writers
TYPE_WARN = False
def __init__(self, *names, **attrs):
"""store parms in private vars for use in add_argument"""
self._attrs = attrs
self._short_opts = []
self._long_opts = []
self.dest = attrs.get('dest')
if self.TYPE_WARN:
try:
help = attrs['help']
if '%default' in help:
py.std.warnings.warn(
'py.test now uses argparse. "%default" should be'
' changed to "%(default)s" ',
FutureWarning,
stacklevel=3)
except KeyError:
pass
try:
typ = attrs['type']
except KeyError:
pass
else:
# this might raise a keyerror as well, don't want to catch that
if isinstance(typ, py.builtin._basestring):
if typ == 'choice':
if self.TYPE_WARN:
py.std.warnings.warn(
'type argument to addoption() is a string %r.'
' For parsearg this is optional and when supplied '
' should be a type.'
' (options: %s)' % (typ, names),
FutureWarning,
stacklevel=3)
# argparse expects a type here take it from
# the type of the first element
attrs['type'] = type(attrs['choices'][0])
else:
if self.TYPE_WARN:
py.std.warnings.warn(
'type argument to addoption() is a string %r.'
' For parsearg this should be a type.'
' (options: %s)' % (typ, names),
FutureWarning,
stacklevel=3)
attrs['type'] = Argument._typ_map[typ]
# used in test_parseopt -> test_parse_defaultgetter
self.type = attrs['type']
else:
self.type = typ
try:
# attribute existence is tested in Config._processopt
self.default = attrs['default']
except KeyError:
pass
self._set_opt_strings(names)
if not self.dest:
if self._long_opts:
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
try:
self.dest = self._short_opts[0][1:]
except IndexError:
raise ArgumentError(
'need a long or short option', self)
def names(self):
return self._short_opts + self._long_opts
def attrs(self):
# update any attributes set by processopt
attrs = 'default dest help'.split()
if self.dest:
attrs.append(self.dest)
for attr in attrs:
try:
self._attrs[attr] = getattr(self, attr)
except AttributeError:
pass
if self._attrs.get('help'):
a = self._attrs['help']
a = a.replace('%default', '%(default)s')
#a = a.replace('%prog', '%(prog)s')
self._attrs['help'] = a
return self._attrs
def _set_opt_strings(self, opts):
"""directly from optparse
might not be necessary as this is passed to argparse later on"""
for opt in opts:
if len(opt) < 2:
raise ArgumentError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise ArgumentError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise ArgumentError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def __repr__(self):
retval = 'Argument('
if self._short_opts:
retval += '_short_opts: ' + repr(self._short_opts) + ', '
if self._long_opts:
retval += '_long_opts: ' + repr(self._long_opts) + ', '
retval += 'dest: ' + repr(self.dest) + ', '
if hasattr(self, 'type'):
retval += 'type: ' + repr(self.type) + ', '
if hasattr(self, 'default'):
retval += 'default: ' + repr(self.default) + ', '
if retval[-2:] == ', ': # always long enough to test ("Argument(" )
retval = retval[:-2]
retval += ')'
return retval
class OptionGroup:
def __init__(self, name, description="", parser=None):
self.name = name
self.description = description
self.options = []
self.parser = parser
def addoption(self, *optnames, **attrs):
""" add an option to this group.
if a shortened version of a long option is specified it will
be suppressed in the help. addoption('--twowords', '--two-words')
results in help showing '--two-words' only, but --twowords gets
accepted **and** the automatic destination is in args.twowords
"""
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=False)
def _addoption(self, *optnames, **attrs):
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=True)
def _addoption_instance(self, option, shortupper=False):
if not shortupper:
for opt in option._short_opts:
if opt[0] == '-' and opt[1].islower():
raise ValueError("lowercase shortoptions reserved")
if self.parser:
self.parser.processoption(option)
self.options.append(option)
class MyOptionParser(py.std.argparse.ArgumentParser):
def __init__(self, parser):
self._parser = parser
py.std.argparse.ArgumentParser.__init__(self, usage=parser._usage,
add_help=False, formatter_class=DropShorterLongHelpFormatter)
def format_epilog(self, formatter):
hints = self._parser.hints
if hints:
s = "\n".join(["hint: " + x for x in hints]) + "\n"
s = "\n" + s + "\n"
return s
return ""
def parse_args(self, args=None, namespace=None):
"""allow splitting of positional arguments"""
args, argv = self.parse_known_args(args, namespace)
if argv:
for arg in argv:
if arg and arg[0] == '-':
msg = py.std.argparse._('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
getattr(args, FILE_OR_DIR).extend(argv)
return args
class DropShorterLongHelpFormatter(py.std.argparse.HelpFormatter):
"""shorten help for long options that differ only in extra hyphens
- collapse **long** options that are the same except for extra hyphens
- special action attribute map_long_option allows surpressing additional
long options
- shortcut if there are only two options and one of them is a short one
- cache result on action object as this is called at least 2 times
"""
def _format_action_invocation(self, action):
orgstr = py.std.argparse.HelpFormatter._format_action_invocation(self, action)
if orgstr and orgstr[0] != '-': # only optional arguments
return orgstr
res = getattr(action, '_formatted_action_invocation', None)
if res:
return res
options = orgstr.split(', ')
if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
# a shortcut for '-h, --help' or '--abc', '-a'
action._formatted_action_invocation = orgstr
return orgstr
return_list = []
option_map = getattr(action, 'map_long_option', {})
if option_map is None:
option_map = {}
short_long = {}
for option in options:
if len(option) == 2 or option[2] == ' ':
continue
if not option.startswith('--'):
raise ArgumentError('long optional argument without "--": [%s]'
% (option), self)
xxoption = option[2:]
if xxoption.split()[0] not in option_map:
shortened = xxoption.replace('-', '')
if shortened not in short_long or \
len(short_long[shortened]) < len(xxoption):
short_long[shortened] = xxoption
# now short_long has been filled out to the longest with dashes
# **and** we keep the right option ordering from add_argument
for option in options: #
if len(option) == 2 or option[2] == ' ':
return_list.append(option)
if option[2:] == short_long.get(option.replace('-', '')):
return_list.append(option)
action._formatted_action_invocation = ', '.join(return_list)
return action._formatted_action_invocation
class Conftest(object):
""" the single place for accessing values and interacting
towards conftest modules from py.test objects.
"""
def __init__(self, onimport=None, confcutdir=None):
self._path2confmods = {}
self._onimport = onimport
self._conftestpath2mod = {}
self._confcutdir = confcutdir
def setinitial(self, args):
""" try to find a first anchor path for looking up global values
from conftests. This function is usually called _before_
argument parsing. conftest files may add command line options
and we thus have no completely safe way of determining
which parts of the arguments are actually related to options
and which are file system paths. We just try here to get
bootstrapped ...
"""
current = py.path.local()
opt = '--confcutdir'
for i in range(len(args)):
opt1 = str(args[i])
if opt1.startswith(opt):
if opt1 == opt:
if len(args) > i:
p = current.join(args[i+1], abs=True)
elif opt1.startswith(opt + "="):
p = current.join(opt1[len(opt)+1:], abs=1)
self._confcutdir = p
break
foundanchor = False
for arg in args:
if hasattr(arg, 'startswith') and arg.startswith("--"):
continue
anchor = current.join(arg, abs=1)
if exists(anchor): # we found some file object
self._try_load_conftest(anchor)
foundanchor = True
if not foundanchor:
self._try_load_conftest(current)
def _try_load_conftest(self, anchor):
self._path2confmods[None] = self.getconftestmodules(anchor)
# let's also consider test* subdirs
if anchor.check(dir=1):
for x in anchor.listdir("test*"):
if x.check(dir=1):
self.getconftestmodules(x)
def getconftestmodules(self, path):
try:
clist = self._path2confmods[path]
except KeyError:
if path is None:
raise ValueError("missing default conftest.")
clist = []
for parent in path.parts():
if self._confcutdir and self._confcutdir.relto(parent):
continue
conftestpath = parent.join("conftest.py")
if conftestpath.check(file=1):
clist.append(self.importconftest(conftestpath))
self._path2confmods[path] = clist
return clist
def rget(self, name, path=None):
mod, value = self.rget_with_confmod(name, path)
return value
def rget_with_confmod(self, name, path=None):
modules = self.getconftestmodules(path)
modules.reverse()
for mod in modules:
try:
return mod, getattr(mod, name)
except AttributeError:
continue
raise KeyError(name)
def importconftest(self, conftestpath):
assert conftestpath.check(), conftestpath
try:
return self._conftestpath2mod[conftestpath]
except KeyError:
pkgpath = conftestpath.pypkgpath()
if pkgpath is None:
_ensure_removed_sysmodule(conftestpath.purebasename)
self._conftestpath2mod[conftestpath] = mod = conftestpath.pyimport()
dirpath = conftestpath.dirpath()
if dirpath in self._path2confmods:
for path, mods in self._path2confmods.items():
if path and path.relto(dirpath) or path == dirpath:
assert mod not in mods
mods.append(mod)
self._postimport(mod)
return mod
def _postimport(self, mod):
if self._onimport:
self._onimport(mod)
return mod
def _ensure_removed_sysmodule(modname):
try:
del sys.modules[modname]
except KeyError:
pass
class CmdOptions(object):
""" holds cmdline options as attributes."""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
return "<CmdOptions %r>" %(self.__dict__,)
FILE_OR_DIR = 'file_or_dir'
class Config(object):
""" access to configuration values, pluginmanager and plugin hooks. """
def __init__(self, pluginmanager):
#: access to command line option as attributes.
#: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead
self.option = CmdOptions()
_a = FILE_OR_DIR
self._parser = Parser(
usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a),
processopt=self._processopt,
)
#: a pluginmanager instance
self.pluginmanager = pluginmanager
self.trace = self.pluginmanager.trace.root.get("config")
self._conftest = Conftest(onimport=self._onimportconftest)
self.hook = self.pluginmanager.hook
self._inicache = {}
self._opt2dest = {}
self._cleanup = []
self.pluginmanager.register(self, "pytestconfig")
self.pluginmanager.set_register_callback(self._register_plugin)
self._configured = False
def _register_plugin(self, plugin, name):
call_plugin = self.pluginmanager.call_plugin
call_plugin(plugin, "pytest_addhooks",
{'pluginmanager': self.pluginmanager})
self.hook.pytest_plugin_registered(plugin=plugin,
manager=self.pluginmanager)
dic = call_plugin(plugin, "pytest_namespace", {}) or {}
if dic:
import pytest
setns(pytest, dic)
call_plugin(plugin, "pytest_addoption", {'parser': self._parser})
if self._configured:
call_plugin(plugin, "pytest_configure", {'config': self})
def do_configure(self):
assert not self._configured
self._configured = True
self.hook.pytest_configure(config=self)
def do_unconfigure(self):
assert self._configured
self._configured = False
self.hook.pytest_unconfigure(config=self)
self.pluginmanager.ensure_shutdown()
def pytest_cmdline_parse(self, pluginmanager, args):
assert self == pluginmanager.config, (self, pluginmanager.config)
self.parse(args)
return self
def pytest_unconfigure(config):
while config._cleanup:
fin = config._cleanup.pop()
fin()
def notify_exception(self, excinfo, option=None):
if option and option.fulltrace:
style = "long"
else:
style = "native"
excrepr = excinfo.getrepr(funcargs=True,
showlocals=getattr(option, 'showlocals', False),
style=style,
)
res = self.hook.pytest_internalerror(excrepr=excrepr,
excinfo=excinfo)
if not py.builtin.any(res):
for line in str(excrepr).split("\n"):
sys.stderr.write("INTERNALERROR> %s\n" %line)
sys.stderr.flush()
@classmethod
def fromdictargs(cls, option_dict, args):
""" constructor useable for subprocesses. """
pluginmanager = get_plugin_manager()
config = pluginmanager.config
config._preparse(args, addopts=False)
config.option.__dict__.update(option_dict)
for x in config.option.plugins:
config.pluginmanager.consider_pluginarg(x)
return config
def _onimportconftest(self, conftestmodule):
self.trace("loaded conftestmodule %r" %(conftestmodule,))
self.pluginmanager.consider_conftest(conftestmodule)
def _processopt(self, opt):
for name in opt._short_opts + opt._long_opts:
self._opt2dest[name] = opt.dest
if hasattr(opt, 'default') and opt.dest:
if not hasattr(self.option, opt.dest):
setattr(self.option, opt.dest, opt.default)
def _getmatchingplugins(self, fspath):
allconftests = self._conftest._conftestpath2mod.values()
plugins = [x for x in self.pluginmanager.getplugins()
if x not in allconftests]
plugins += self._conftest.getconftestmodules(fspath)
return plugins
def pytest_load_initial_conftests(self, parser, args):
self._conftest.setinitial(args)
pytest_load_initial_conftests.trylast = True
def _initini(self, args):
self.inicfg = getcfg(args, ["pytest.ini", "tox.ini", "setup.cfg"])
self._parser.addini('addopts', 'extra command line options', 'args')
self._parser.addini('minversion', 'minimally required pytest version')
def _preparse(self, args, addopts=True):
self._initini(args)
if addopts:
args[:] = self.getini("addopts") + args
self._checkversion()
self.pluginmanager.consider_preparse(args)
self.pluginmanager.consider_setuptools_entrypoints()
self.pluginmanager.consider_env()
self.hook.pytest_load_initial_conftests(early_config=self,
args=args, parser=self._parser)
def _checkversion(self):
import pytest
minver = self.inicfg.get('minversion', None)
if minver:
ver = minver.split(".")
myver = pytest.__version__.split(".")
if myver < ver:
raise pytest.UsageError(
"%s:%d: requires pytest-%s, actual pytest-%s'" %(
self.inicfg.config.path, self.inicfg.lineof('minversion'),
minver, pytest.__version__))
def parse(self, args):
# parse given cmdline arguments into this config object.
# Note that this can only be called once per testing process.
assert not hasattr(self, 'args'), (
"can only parse cmdline args at most once per Config object")
self._origargs = args
self._preparse(args)
# XXX deprecated hook:
self.hook.pytest_cmdline_preparse(config=self, args=args)
self._parser.hints.extend(self.pluginmanager._hints)
args = self._parser.parse_setoption(args, self.option)
if not args:
args.append(py.std.os.getcwd())
self.args = args
def addinivalue_line(self, name, line):
""" add a line to an ini-file option. The option must have been
declared but might not yet be set in which case the line becomes the
the first line in its value. """
x = self.getini(name)
assert isinstance(x, list)
x.append(line) # modifies the cached list inline
def getini(self, name):
""" return configuration value from an :ref:`ini file <inifiles>`. If the
specified name hasn't been registered through a prior
:py:func:`parser.addini <pytest.config.Parser.addini>`
call (usually from a plugin), a ValueError is raised. """
try:
return self._inicache[name]
except KeyError:
self._inicache[name] = val = self._getini(name)
return val
def _getini(self, name):
try:
description, type, default = self._parser._inidict[name]
except KeyError:
raise ValueError("unknown configuration value: %r" %(name,))
try:
value = self.inicfg[name]
except KeyError:
if default is not None:
return default
if type is None:
return ''
return []
if type == "pathlist":
dp = py.path.local(self.inicfg.config.path).dirpath()
l = []
for relpath in py.std.shlex.split(value):
l.append(dp.join(relpath, abs=True))
return l
elif type == "args":
return py.std.shlex.split(value)
elif type == "linelist":
return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
else:
assert type is None
return value
def _getconftest_pathlist(self, name, path=None):
try:
mod, relroots = self._conftest.rget_with_confmod(name, path)
except KeyError:
return None
modpath = py.path.local(mod.__file__).dirpath()
l = []
for relroot in relroots:
if not isinstance(relroot, py.path.local):
relroot = relroot.replace("/", py.path.local.sep)
relroot = modpath.join(relroot, abs=True)
l.append(relroot)
return l
def _getconftest(self, name, path=None, check=False):
if check:
self._checkconftest(name)
return self._conftest.rget(name, path)
def getoption(self, name):
""" return command line option value.
:arg name: name of the option. You may also specify
the literal ``--OPT`` option instead of the "dest" option name.
"""
name = self._opt2dest.get(name, name)
try:
return getattr(self.option, name)
except AttributeError:
raise ValueError("no option named %r" % (name,))
def getvalue(self, name, path=None):
""" return command line option value.
:arg name: name of the command line option
(deprecated) if we can't find the option also lookup
the name in a matching conftest file.
"""
try:
return getattr(self.option, name)
except AttributeError:
return self._getconftest(name, path, check=False)
def getvalueorskip(self, name, path=None):
""" (deprecated) return getvalue(name) or call
py.test.skip if no value exists. """
__tracebackhide__ = True
try:
val = self.getvalue(name, path)
if val is None:
raise KeyError(name)
return val
except KeyError:
py.test.skip("no %r value found" %(name,))
def exists(path, ignore=EnvironmentError):
try:
return path.check()
except ignore:
return False
def getcfg(args, inibasenames):
args = [x for x in args if not str(x).startswith("-")]
if not args:
args = [py.path.local()]
for arg in args:
arg = py.path.local(arg)
for base in arg.parts(reverse=True):
for inibasename in inibasenames:
p = base.join(inibasename)
if exists(p):
iniconfig = py.iniconfig.IniConfig(p)
if 'pytest' in iniconfig.sections:
return iniconfig['pytest']
return {}
def setns(obj, dic):
import pytest
for name, value in dic.items():
if isinstance(value, dict):
mod = getattr(obj, name, None)
if mod is None:
modname = "pytest.%s" % name
mod = py.std.types.ModuleType(modname)
sys.modules[modname] = mod
mod.__all__ = []
setattr(obj, name, mod)
obj.__all__.append(name)
setns(mod, value)
else:
setattr(obj, name, value)
obj.__all__.append(name)
#if obj != pytest:
# pytest.__all__.append(name)
setattr(pytest, name, value)
| mit | -1,276,906,123,637,224,400 | 36.765661 | 93 | 0.561068 | false | 4.272178 | true | false | false |
florentchandelier/zipline | zipline/data/data_portal_live.py | 2 | 3669 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zipline.data.data_portal import DataPortal
from logbook import Logger
log = Logger('DataPortalLive')
class DataPortalLive(DataPortal):
def __init__(self, broker, *args, **kwargs):
self.broker = broker
super(DataPortalLive, self).__init__(*args, **kwargs)
def get_last_traded_dt(self, asset, dt, data_frequency):
return self.broker.get_last_traded_dt(asset)
def get_spot_value(self, assets, field, dt, data_frequency):
return self.broker.get_spot_value(assets, field, dt, data_frequency)
def get_history_window(self,
assets,
end_dt,
bar_count,
frequency,
field,
data_frequency,
ffill=True):
# This method is responsible for merging the ingested historical data
# with the real-time collected data through the Broker.
# DataPortal.get_history_window() is called with ffill=False to mark
# the missing fields with NaNs. After merge on the historical and
# real-time data the missing values (NaNs) are filled based on their
# next available values in the requested time window.
#
# Warning: setting ffill=True in DataPortal.get_history_window() call
# results a wrong behavior: The last available value reported by
# get_spot_value() will be used to fill the missing data - which is
# always representing the current spot price presented by Broker.
historical_bars = super(DataPortalLive, self).get_history_window(
assets, end_dt, bar_count, frequency, field, data_frequency,
ffill=False)
realtime_bars = self.broker.get_realtime_bars(
assets, frequency)
# Broker.get_realtime_history() returns the asset as level 0 column,
# open, high, low, close, volume returned as level 1 columns.
# To filter for field the levels needs to be swapped
realtime_bars = realtime_bars.swaplevel(0, 1, axis=1)
ohlcv_field = 'close' if field == 'price' else field
# TODO: end_dt is ignored when historical & realtime bars are merged.
# Should not cause issues as end_dt is set to current time in live
# trading, but would be more proper if merge would make use of it.
combined_bars = historical_bars.combine_first(
realtime_bars[ohlcv_field])
if ffill and field == 'price':
# Simple forward fill is not enough here as the last ingested
# value might be outside of the requested time window. That case
# the time series starts with NaN and forward filling won't help.
# To provide values for such cases we backward fill.
# Backward fill as a second operation will have no effect if the
# forward-fill was successful.
combined_bars.fillna(method='ffill', inplace=True)
combined_bars.fillna(method='bfill', inplace=True)
return combined_bars[-bar_count:]
| apache-2.0 | 4,738,107,394,206,212,000 | 43.743902 | 77 | 0.644317 | false | 4.326651 | false | false | false |
tdaylan/tdgu | radt_6dim.py | 1 | 2203 | from __init__ import *
import json, requests
import h5py
def query(lon, lat, coordsys='gal', mode='full'):
url = 'http://argonaut.skymaps.info/gal-lb-query-light'
payload = {'mode': mode}
if coordsys.lower() in ['gal', 'g']:
payload['l'] = lon
payload['b'] = lat
elif coordsys.lower() in ['equ', 'e']:
payload['ra'] = lon
payload['dec'] = lat
else:
raise ValueError("coordsys '{0}' not understood.".format(coordsys))
headers = {'content-type': 'application/json'}
r = requests.post(url, data=json.dumps(payload), headers=headers)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
print('Response received from Argonaut:')
print(r.text)
raise e
return json.loads(r.text)
def plot_dust3dim():
magv = arange(10, 22)
prlxerrr = array([4., 4., 4.2, 6.0, 9.1, 14.3, 23.1, 38.8, 69.7, 138., 312., 1786.])
fig, ax = plt.subplots()
ax.plot(magv, prlxerrr)
ax.set_ylabel(r'$\sigma_\pi [\mu$as]')
ax.set_xlabel('V [mag]')
ax.set_yscale('log')
plt.show()
# paths
pathimag, pathdata = tdpy.util.retr_path('radt_6dim')
numbside = 256
maxmgang = 10.
lghp, bghp, numbpixl, apix = tdpy.util.retr_healgrid(numbside)
mpixl = where((abs(lghp) < maxmgang) & (abs(90. - bghp) < maxmgang))[0]
qresult = query(list(lghp[mpixl]), list(bghp[mpixl]))
for key in qresult.keys():
fig, ax = plt.subplots()
ax.hist(qresult[key], 100)
ax.set_title(key)
plt.savefig(pathimag + 'full.pdf')
plt.close()
qresult = query(list(lghp[mpixl]), list(bghp[mpixl]), mode='sfnd')
for key in qresult.keys():
fig, ax = plt.subplots()
ax.hist(qresult[key])
ax.set_title(key)
plt.savefig(pathimag + 'sfnd.pdf')
plt.close()
qresult = query(list(lghp[mpixl]), list(bghp[mpixl]), mode='lite')
for key in qresult.keys():
fig, ax = plt.subplots()
ax.hist(qresult[key])
ax.set_title(key)
plt.savefig(pathimag + 'lite.pdf')
plt.close()
plot_dust3dim()
| mit | 1,658,613,054,913,812,500 | 26.886076 | 88 | 0.563323 | false | 2.981055 | false | false | false |
twilio/twilio-python | twilio/rest/preview/trusted_comms/__init__.py | 1 | 2274 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.version import Version
from twilio.rest.preview.trusted_comms.branded_channel import BrandedChannelList
from twilio.rest.preview.trusted_comms.brands_information import BrandsInformationList
from twilio.rest.preview.trusted_comms.cps import CpsList
from twilio.rest.preview.trusted_comms.current_call import CurrentCallList
class TrustedComms(Version):
def __init__(self, domain):
"""
Initialize the TrustedComms version of Preview
:returns: TrustedComms version of Preview
:rtype: twilio.rest.preview.trusted_comms.TrustedComms.TrustedComms
"""
super(TrustedComms, self).__init__(domain)
self.version = 'TrustedComms'
self._branded_channels = None
self._brands_information = None
self._cps = None
self._current_calls = None
@property
def branded_channels(self):
"""
:rtype: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelList
"""
if self._branded_channels is None:
self._branded_channels = BrandedChannelList(self)
return self._branded_channels
@property
def brands_information(self):
"""
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationList
"""
if self._brands_information is None:
self._brands_information = BrandsInformationList(self)
return self._brands_information
@property
def cps(self):
"""
:rtype: twilio.rest.preview.trusted_comms.cps.CpsList
"""
if self._cps is None:
self._cps = CpsList(self)
return self._cps
@property
def current_calls(self):
"""
:rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallList
"""
if self._current_calls is None:
self._current_calls = CurrentCallList(self)
return self._current_calls
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.TrustedComms>'
| mit | -597,316,505,879,555,100 | 29.32 | 90 | 0.627968 | false | 3.679612 | false | false | false |
csaldias/python-usm | Certámenes resueltos/Certamen 1 2013-1/pregunta-2.py | 1 | 2188 | #Primero, pedimos la hora
tiempo_llegada = raw_input("Ingrese hora: ")
#Ahora vemos si acaso estamos en horario normal o punta (o no opera)
if tiempo_llegada >= "06:30" and tiempo_llegada <= "22:30":
#En operacion
if (tiempo_llegada >= "07:30" and tiempo_llegada <= "09:00") or (tiempo_llegada >= "18:00" and tiempo_llegada <= "19:00"):
#Hora punta
#Trenes largos (4 vagones) y cortos (2 vagones) cada 6 minutos (en ese orden)
print "Se encuentra en horario punta"
hora_llegada = int(tiempo_llegada[:2])
minutos_llegada = int(tiempo_llegada[3:])
#Veamos que vagon llegara a la estacion y en cuanto tiempo.
contador_largo_vagones = 0
contador_vagones = 0
while contador_largo_vagones < minutos_llegada:
contador_largo_vagones += 6
contador_vagones += 1
if contador_vagones % 2 == 0:
print "El tren tiene 4 vagones"
else:
print "El tren tiene 2 vagones"
if minutos_llegada % 6 == 0:
print "El tren se encuentra en el anden!"
else:
print "Debe esperar "+str(6 - (minutos_llegada % 6))+" minutos"
else:
#Horario normal
#Trenes largos (4 vagones) cada 12 minutos
print "Se encuentra en horario normal"
#Veamos en cuanto tiempo llegara el tren a la estacion.
hora_llegada = int(tiempo_llegada[:2])
minutos_llegada = int(tiempo_llegada[3:])
print "El tren tiene 4 vagones"
if minutos_llegada % 12 == 0:
print "El tren se encuentra en el anden!"
else:
print "Debe esperar "+str(12 - (minutos_llegada % 12))+" minutos"
else:
#El servicio no opera
print "Ya no se encuentran trenes en este horario"
| mit | 6,007,246,369,956,435,000 | 43.653061 | 123 | 0.494059 | false | 3.285285 | false | false | false |
Psycojoker/geholparser | examples/test_student_calendar.py | 1 | 2532 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Frederic'
import os
import sys
sys.path.append("../src")
from gehol import GeholProxy
from gehol.studentsetcalendar import StudentSetCalendar
from gehol.converters.utils import write_content_to_file
from gehol.converters.rfc5545icalwriter import convert_geholcalendar_to_ical
from pprint import pprint
DATA_DIR = "../data/student/"
DATA_FILE = "../data/student-2012/SOCO_BA3.html"
first_monday = '19/09/2011'
def make_ical_from_local_file(filename):
f = open(filename)
cal = StudentSetCalendar(f)
print cal.header_data
pprint(cal.events)
ical_content = convert_geholcalendar_to_ical(cal, first_monday)
write_content_to_file(ical_content, "%s.ics" % cal.description)
URLs = [("MA1 en sciences informatiques - Spécialisée - Multimedia", "http://164.15.72.157:8081/Reporting/Individual;Student%20Set%20Groups;id;%23SPLUS0FACD0?&template=Ann%E9e%20d%27%E9tude&weeks=1-14&days=1-6&periods=5-33&width=0&height=0"),
("BA1 en sciences de l'ingénieur, orientation ingénieur civil - Série 2B", "http://164.15.72.157:8081/Reporting/Individual;Student%20Set%20Groups;id;%23SPLUSA6299F?&template=Ann%E9e%20d%27%E9tude&weeks=1-14&days=1-6&periods=5-33&width=0&height=0"),
("BA3 en information et communication", "http://164.15.72.157:8081/Reporting/Individual;Student%20Set%20Groups;id;%23SPLUS35F074?&template=Ann%E9e%20d%27%E9tude&weeks=1-14&days=1-6&periods=5-33&width=0&height=0"),
]
def make_ical_from_url(name, url):
gehol_proxy = GeholProxy()
cal = gehol_proxy.get_studentset_calendar_from_url(url)
ical = convert_geholcalendar_to_ical(cal, first_monday)
ical_data = ical.as_string()
outfile = "%s.ics" % name
print "writing ical file : %s" % outfile
write_content_to_file(ical_data, outfile)
GROUP_IDs = ["%23SPLUS0FACD0", "%23SPLUSA6299D", "%23SPLUS35F0CB", "%23SPLUS35F0CA", "%23SPLUS4BCCBA", "%23SPLUSA6299B"]
def make_ical_from_groupid(group_id):
gehol_proxy = GeholProxy()
cal = gehol_proxy.get_studentset_calendar(group_id, "1-14")
ical = convert_geholcalendar_to_ical(cal, first_monday)
ical_data = ical.as_string()
outfile = "%s.ics" % ical.name
print "writing ical file : %s" % outfile
write_content_to_file(ical_data, outfile)
if __name__ == "__main__":
for (profile, url) in URLs:
make_ical_from_url(profile, url)
for id in GROUP_IDs:
make_ical_from_groupid(id) | mit | -4,477,545,330,042,107,400 | 37.515625 | 256 | 0.687772 | false | 2.711373 | false | false | false |
GitOnion/climate-commander | django-server/climate_commander/jobs/migrations/0020_auto_20160916_0053.py | 1 | 1258 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0019_auto_20160824_1516'),
]
operations = [
migrations.AddField(
model_name='server',
name='crdntl_instanceip',
field=models.GenericIPAddressField(null=True),
),
migrations.AddField(
model_name='server',
name='crdntl_loginnode',
field=models.CharField(max_length=40, null=True),
),
migrations.AlterField(
model_name='server',
name='crdntl_domain',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='server',
name='crdntl_password',
field=models.CharField(max_length=20, null=True),
),
migrations.AlterField(
model_name='server',
name='crdntl_user',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='server',
name='server_cpus',
field=models.IntegerField(null=True),
),
]
| mit | -7,196,496,385,899,133,000 | 27.590909 | 61 | 0.5469 | false | 4.278912 | false | false | false |
weiweihuanghuang/wei-glyphs-scripts | Spacing/Show Kerning Pairs For Space.py | 1 | 2909 | #MenuTitle: Show Kerning Pairs for Space
# -*- coding: utf-8 -*-
__doc__="""
Show Kerning Pairs for this glyph in a new tab.
"""
import GlyphsApp
import traceback
thisFont = Glyphs.font
Doc = Glyphs.currentDocument
selectedLayers = thisFont.selectedLayers
selectedMaster = thisFont.selectedFontMaster
masterID = selectedMaster.id
kernDict = thisFont.kerningDict()
leftGroups = {}
rightGroups = {}
for g in thisFont.glyphs:
if g.rightKerningGroup:
group_name = g.rightKerningGroupId()
try:
leftGroups[group_name].append(g.name)
except:
leftGroups[group_name] = [g.name]
if g.leftKerningGroup:
group_name = g.leftKerningGroupId()
try:
rightGroups[group_name].append(g.name)
except:
rightGroups[group_name] = [g.name]
def nameMaker(kernGlyphOrGroup, side):
# if this is a kerning group
if kernGlyphOrGroup[0] == "@":
# right glyph, left kerning group
if side == "right":
try:
return rightGroups[kernGlyphOrGroup][0]
except:
pass
elif side == "left":
# left glyph, right kerning group
try:
return leftGroups[kernGlyphOrGroup][0]
except:
pass
else:
return thisFont.glyphForId_(kernGlyphOrGroup).name
editString = u""""""
editStringL = u""""""
editStringR = u""""""
thisGlyph = thisFont.glyphs["space"]
thisGlyphName = thisGlyph.name
rGroupName = str(thisGlyph.rightKerningGroup)
lGroupName = str(thisGlyph.leftKerningGroup)
# print "\t", rGroupName, lGroupName
kernPairListL = []
kernPairListSortedL = []
kernPairListR = []
kernPairListSortedR = []
for L in thisFont.kerning[ masterID ]:
try:
# if the this kerning-pair's left glyph matches rGroupName (right side kerning group of thisGlyph)
if rGroupName == L[7:] or rGroupName == thisFont.glyphForId_(L).name or thisFont.glyphForId_(L).name == thisGlyph.name:
# for every R counterpart to L in the kerning pairs of rGroupName
for R in thisFont.kerning[masterID][L]:
if thisFont.kerning[masterID][L][R] != 0:
kernPairListL += [nameMaker(R, "right")]
except:
# print traceback.format_exc()
pass
for R in thisFont.kerning[masterID][L]:
try:
# if the R counterpart (class glyph) of L glyph is the selectedGlyph
if lGroupName == R[7:] or lGroupName == thisFont.glyphForId_(R).name or thisFont.glyphForId_(R).name == thisGlyph.name:
if thisFont.kerning[masterID][L][R] != 0:
kernPairListR += [nameMaker(L, "left")]
except:
pass
kernPairListSortedL = [g.name for g in Font.glyphs if g.name in kernPairListL]
for everyGlyph in kernPairListSortedL:
editStringL += "/%s/%s/bar" % (thisGlyphName, everyGlyph)
kernPairListSortedR = [g.name for g in Font.glyphs if g.name in kernPairListR]
for everyGlyph in kernPairListSortedR:
editStringR += "/%s/%s/bar" % (everyGlyph, thisGlyphName)
# editString = "/bar" + editStringL + "\n\n" + editStringR + "/bar"
editString = "/bar%s\n\n/bar%s" % (editStringL, editStringR)
thisFont.newTab(editString) | apache-2.0 | -4,223,721,100,003,747,000 | 28.393939 | 122 | 0.71296 | false | 2.877349 | false | false | false |
epage/The-One-Ring | src/contacts.py | 1 | 2892 | import logging
import dbus
import telepathy
import util.misc as misc_utils
_moduleLogger = logging.getLogger(__name__)
class ContactsMixin(telepathy.server.ConnectionInterfaceContacts):
ATTRIBUTES = {
telepathy.CONNECTION : 'contact-id',
telepathy.CONNECTION_INTERFACE_SIMPLE_PRESENCE : 'presence',
telepathy.CONNECTION_INTERFACE_ALIASING : 'alias',
telepathy.CONNECTION_INTERFACE_AVATARS : 'token',
telepathy.CONNECTION_INTERFACE_CAPABILITIES : 'caps',
telepathy.CONNECTION_INTERFACE_CONTACT_CAPABILITIES : 'capabilities'
}
def __init__(self):
telepathy.server.ConnectionInterfaceContacts.__init__(self)
dbus_interface = telepathy.CONNECTION_INTERFACE_CONTACTS
self._implement_property_get(
dbus_interface,
{'ContactAttributeInterfaces' : self.get_contact_attribute_interfaces}
)
def HoldHandles(self, *args):
"""
@abstract
"""
raise NotImplementedError("Abstract function called")
# Overwrite the dbus attribute to get the sender argument
@misc_utils.log_exception(_moduleLogger)
@dbus.service.method(telepathy.CONNECTION_INTERFACE_CONTACTS, in_signature='auasb',
out_signature='a{ua{sv}}', sender_keyword='sender')
def GetContactAttributes(self, handles, interfaces, hold, sender):
#InspectHandle already checks we're connected, the handles and handle type.
supportedInterfaces = set()
for interface in interfaces:
if interface in self.ATTRIBUTES:
supportedInterfaces.add(interface)
else:
_moduleLogger.debug("Ignoring unsupported interface %s" % interface)
handle_type = telepathy.HANDLE_TYPE_CONTACT
ret = dbus.Dictionary(signature='ua{sv}')
for handle in handles:
ret[handle] = dbus.Dictionary(signature='sv')
functions = {
telepathy.CONNECTION:
lambda x: zip(x, self.InspectHandles(handle_type, x)),
telepathy.CONNECTION_INTERFACE_SIMPLE_PRESENCE:
lambda x: self.GetPresences(x).items(),
telepathy.CONNECTION_INTERFACE_ALIASING:
lambda x: self.GetAliases(x).items(),
telepathy.CONNECTION_INTERFACE_AVATARS :
lambda x: self.GetKnownAvatarTokens(x).items(),
telepathy.CONNECTION_INTERFACE_CAPABILITIES:
lambda x: self.GetCapabilities(x).items(),
telepathy.CONNECTION_INTERFACE_CONTACT_CAPABILITIES :
lambda x: self.GetContactCapabilities(x).items()
}
#Hold handles if needed
if hold:
self.HoldHandles(handle_type, handles, sender)
# Attributes from the interface org.freedesktop.Telepathy.Connection
# are always returned, and need not be requested explicitly.
supportedInterfaces.add(telepathy.CONNECTION)
for interface in supportedInterfaces:
interface_attribute = interface + '/' + self.ATTRIBUTES[interface]
results = functions[interface](handles)
for handle, value in results:
ret[int(handle)][interface_attribute] = value
return ret
def get_contact_attribute_interfaces(self):
return self.ATTRIBUTES.keys()
| lgpl-2.1 | -985,817,404,625,239 | 32.241379 | 84 | 0.75242 | false | 3.496977 | false | false | false |
m-ober/byceps | byceps/services/party/models/party.py | 1 | 2070 | """
byceps.services.party.models.party
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
from typing import Optional
from ....database import db
from ....typing import BrandID, PartyID
from ....util.instances import ReprBuilder
from ...brand.models.brand import Brand
from ...shop.shop.transfer.models import ShopID
class Party(db.Model):
"""A party."""
__tablename__ = 'parties'
id = db.Column(db.UnicodeText, primary_key=True)
brand_id = db.Column(db.UnicodeText, db.ForeignKey('brands.id'), index=True, nullable=False)
brand = db.relationship(Brand, backref='parties')
title = db.Column(db.UnicodeText, unique=True, nullable=False)
starts_at = db.Column(db.DateTime, nullable=False)
ends_at = db.Column(db.DateTime, nullable=False)
max_ticket_quantity = db.Column(db.Integer, nullable=True)
shop_id = db.Column(db.UnicodeText, db.ForeignKey('shops.id'), index=True, nullable=True)
ticket_management_enabled = db.Column(db.Boolean, default=False, nullable=False)
seat_management_enabled = db.Column(db.Boolean, default=False, nullable=False)
archived = db.Column(db.Boolean, default=False, nullable=False)
def __init__(
self,
party_id: PartyID,
brand_id: BrandID,
title: str,
starts_at: datetime,
ends_at: datetime,
*,
max_ticket_quantity: Optional[int] = None,
shop_id: Optional[ShopID] = None,
) -> None:
self.id = party_id
self.brand_id = brand_id
self.title = title
self.starts_at = starts_at
self.ends_at = ends_at
self.max_ticket_quantity = max_ticket_quantity
self.shop_id = shop_id
@property
def is_over(self) -> bool:
"""Returns true if the party has ended."""
return self.ends_at < datetime.utcnow()
def __repr__(self) -> str:
return ReprBuilder(self) \
.add_with_lookup('id') \
.build()
| bsd-3-clause | -7,402,205,738,868,657,000 | 31.34375 | 96 | 0.636715 | false | 3.514431 | false | false | false |
boland1992/SeisSuite | seissuite/sort_later/waveloc/gen_ctrl_file.py | 2 | 2551 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 6 08:56:57 2015
@author: boland
The following script is used to generate a control file for
the programme NonLinLoc. The format is such:
Source description (multiple sources can be specified)
GTSRCE stat_name loc_type x_srce y_srce z_srce elev
Examples:
GTSRCE STA XYZ 27.25 -67.78 0.0 1.242
GTSRCE CALF LATLON 43.753 6.922 0.0 1.242
GTSRCE JOU LATLONDM 43 38.00 N 05 39.52 E 0.0 0.300
For each control file, the location types should be consistent. Elevation is
measured in km.
For Example:
GTSRCE 101 LATLON 49.9520 12.5110 0.0 0.677
GTSRCE 102 LATLON 49.9660 12.5440 0.0 0.595
GTSRCE 103 LATLON 49.9780 12.5860 0.0 0.548
GTSRCE 104 LATLON 49.9910 12.6120 0.0 0.531
GTSRCE 105 LATLON 50.0070 12.6490 0.0 0.733
"""
import sys; sys.path.append('../..')
import os
from obspy import read
from classes.dataless import Dataless
import numpy as np
# set input parameters.
# this script can take either station XML, dataless SEED metadata or MSEED
# set one import path to the type of file that you want to import for your
# metadata
in_path = 'S.BHZ.01.2014.696174.dataless'
use_dataless = True
# set output file location and name
outfolder = os.getcwd()
outfile = 'station_info'
outpath = os.path.join(outfolder, outfile)
if use_dataless:
obj_dataless = Dataless(in_path)
coords = obj_dataless.locs_from_dataless()
stats = obj_dataless.stats_from_dataless()
#combine stations IDs with location data
info = np.column_stack((stats, coords))
if os.path.exists(outpath):
# generate a searchable object
search = True
search_list = []
else:
search = False
# now construct the control file line syntax and output to outpath
with open(outpath, "a+") as f:
if search:
# if the path already exists, then make sure to only append new lines!
for line in f:
search_list.append(line)
for row in info:
line_str = 'GTSRCE %s LATLON %0.4f %0.4f 0.0 %0.3f\n'\
%(str(row[0].split('.')[-1]),
float(row[2]),
float(row[1]),
float(row[3])/1e3)
if search:
if not line_str in search_list:
f.writelines(line_str)
| gpl-3.0 | 6,378,401,050,146,038,000 | 27.344444 | 78 | 0.588397 | false | 3.241423 | false | false | false |
DesertBot/DesertBot | desertbot/modules/urlfollow/Mastodon.py | 1 | 6216 | """
Created on Aug 16, 2018
@author: StarlitGhost
"""
import json
import re
from urllib.parse import urlparse
import dateutil.parser
import dateutil.tz
from bs4 import BeautifulSoup
from twisted.plugin import IPlugin
from twisted.words.protocols.irc import assembleFormattedText as colour, attributes as A
from zope.interface import implementer
from desertbot.message import IRCMessage
from desertbot.moduleinterface import IModule
from desertbot.modules.commandinterface import BotCommand
from desertbot.response import IRCResponse
@implementer(IPlugin, IModule)
class Mastodon(BotCommand):
def actions(self):
return super(Mastodon, self).actions() + [('urlfollow', 2, self.followURL)]
def triggers(self):
return ['cw']
def help(self, query):
if query and query[0].lower() in self.triggers():
return {
'cw': 'cw <toot URL> - displays the contents of toots with content warnings'
}[query[0].lower()]
else:
return ('Automatic module that fetches toots from Mastodon URLs. '
'Also {}cw to display contents of toots with content warnings'
.format(self.bot.commandChar))
def execute(self, message: IRCMessage):
# display contents of a toot with a content warning
if message.command == 'cw':
if not message.parameters:
return IRCResponse(self.help(['cw']), message.replyTo)
match = re.search(r'(?P<url>(https?://|www\.)[^\s]+)',
message.parameters,
re.IGNORECASE)
if not match:
return IRCResponse('{!r} is not a recognized URL format'.format(message.parameters), message.replyTo)
follow = self.followURL(message, url=message.parameters, showContents=True)
if not follow:
return IRCResponse("Couldn't find a toot at {!r}".format(message.parameters), message.replyTo)
toot, _ = follow
return IRCResponse(toot, message.replyTo)
def followURL(self, _: IRCMessage, url: str, showContents: bool=False) -> [str, None]:
# check this is actually a Mastodon instance we're looking at
hostname = urlparse(url).hostname
endpoint = 'https://{domain}/api/v1/instance'.format(domain=hostname)
endpointResponse = self.bot.moduleHandler.runActionUntilValue('fetch-url', endpoint)
if not endpointResponse:
return
try:
endpointJSON = endpointResponse.json()
except json.decoder.JSONDecodeError:
return
if 'uri' not in endpointJSON:
return
response = self.bot.moduleHandler.runActionUntilValue('fetch-url',
'{}/embed'.format(url))
if not response:
return
soup = BeautifulSoup(response.content, 'lxml')
toot = soup.find(class_='entry')
if not toot:
# presumably not a toot, ignore
return
date = toot.find(class_='dt-published')['value']
date = dateutil.parser.parse(date)
date = date.astimezone(dateutil.tz.UTC)
date = date.strftime('%Y/%m/%d %H:%M')
name = toot.find(class_='p-name')
name = self.translateEmojo(name).text.strip()
user = toot.find(class_='display-name__account').text.strip()
user = '{} ({})'.format(name, user)
content = toot.find(class_='status__content')
summary = content.find(class_='p-summary')
if summary:
summary = self.translateEmojo(summary).text.strip()
text = content.find(class_='e-content')
text = self.translateEmojo(text)
# if there's no p tag, add one wrapping everything
if not text.find_all('p'):
text_children = list(text.children)
wrapper_p = soup.new_tag('p')
text.clear()
text.append(wrapper_p)
for child in text_children:
wrapper_p.append(child)
# replace <br /> tags with a newline
for br in text.find_all("br"):
br.replace_with('\n')
# then replace consecutive <p> tags with a double newline
lines = [line.text for line in text.find_all('p')]
text = '\n\n'.join(lines)
# strip empty lines, strip leading/ending whitespace,
# and replace newlines with gray pipes
graySplitter = colour(A.normal[' ', A.fg.gray['|'], ' '])
lines = [l.strip() for l in text.splitlines() if l.strip()]
text = graySplitter.join(lines)
media = toot.find('div', {'data-component': 'MediaGallery'})
if media:
media = json.loads(media['data-props'])
media = media['media']
numMedia = len(media)
if numMedia == 1:
medType = media[0]['type']
#size = media[0]['meta']['original']['size']
description = media[0]['description']
description = ': {}'.format(description) if description else ''
media = '(attached {medType}{description})'.format(medType=medType,
description=description)
else:
media = '({} media attached)'.format(numMedia)
formatString = str(colour(A.normal[A.fg.gray['[{date}]'],
A.bold[' {user}:'],
A.fg.red[' [{summary}]'] if summary else '',
' {text}' if not summary or showContents else '',
A.fg.gray[' {media}'] if media else '']))
return formatString.format(date=date,
user=user,
summary=summary,
text=text,
media=media), ''
def translateEmojo(self, tagTree):
for img in tagTree.find_all('img', class_='emojione'):
img.replace_with(img['title'])
return tagTree
mastodon = Mastodon()
| mit | 7,755,254,143,440,010,000 | 38.846154 | 117 | 0.556789 | false | 4.30173 | false | false | false |
UBC-Victorious-410/project | tools/pmd_parser.py | 1 | 1800 | __author__ = 'Austin'
import os,os.path
import xml.etree.ElementTree as ET
#get the path to the pmd commits
#gwtcwd = get current working directory
#print os.getcwd()
def parse(logpath):
rootdir = os.path.abspath(os.path.dirname(os.getcwd()))
#print rootdir
LoR= []
pmd_folder = logpath
print pmd_folder
i = 0
completeName = os.path.join(pmd_folder, "CommitResult.txt")
with open(completeName, "w") as output:
for file in os.listdir(pmd_folder):
Result = dict()
currentfile = ""
num_viol = 0
i = i + 1
if os.path.isfile(pmd_folder +"\\"+ "commit"+str(i)+".xml"):
output.write("commit"+str(i)+".xml: \n")
f = open(pmd_folder +"\\"+ "commit"+str(i)+".xml")
lines = f.readlines()
for line in lines:
if '<file name=' in line:
temp = line.split("\\")
if currentfile == "":
currentfile = temp[-1][:-3]
Result[currentfile] = num_viol
else:
if currentfile not in Result:
Result[currentfile] = num_viol
else:
Result[currentfile] += num_viol
num_viol = 0
currentfile = temp[-1][:-3]
if '</violation>' in line:
num_viol = num_viol + 1
for key in Result.keys():
output.write("\t" +key + " : " + str(Result[key]) + "\n")
# print num_viol
f.close()
LoR.append(Result)
return LoR
| mit | -5,457,313,715,963,984,000 | 29 | 77 | 0.437222 | false | 4.137931 | false | false | false |
Ruide/angr-dev | angr-management/angrmanagement/ui/dialogs/load_binary.py | 1 | 5343 |
import os
import logging
from PySide.QtGui import QDialog, QVBoxLayout, QHBoxLayout, QLabel, QTabWidget, QPushButton, QCheckBox, QFrame, \
QGroupBox, QListWidgetItem, QListWidget
from PySide.QtCore import Qt
import angr
l = logging.getLogger('dialogs.load_binary')
class LoadBinary(QDialog):
def __init__(self, file_path, parent=None):
super(LoadBinary, self).__init__(parent)
# initialization
self.file_path = file_path
self.option_widgets = { }
# return values
self.cfg_args = None
self.load_options = None
self.setWindowTitle('Load a new binary')
self.main_layout = QVBoxLayout()
self._init_widgets()
self._try_loading()
self.setLayout(self.main_layout)
self.show()
@property
def filename(self):
return os.path.basename(self.file_path)
#
# Private methods
#
def _try_loading(self):
try:
proj = angr.Project(self.file_path)
deps = [ i for i in list(proj.loader._satisfied_deps)
if i not in { 'angr syscalls', 'angr externs', '##cle_tls##', self.filename }
]
dep_list = self.option_widgets['dep_list'] # type: QListWidget
for dep in deps:
dep_item = QListWidgetItem(dep)
dep_item.setData(Qt.CheckStateRole, Qt.Unchecked)
dep_list.addItem(dep_item)
except Exception:
# I guess we will have to load it as a blob?
l.warning("Preloading of the binary fails due to an exception.", exc_info=True)
def _init_widgets(self):
# filename
filename_caption = QLabel(self)
filename_caption.setText('File name:')
filename = QLabel(self)
filename.setText(self.filename)
filename_layout = QHBoxLayout()
filename_layout.addWidget(filename_caption)
filename_layout.addWidget(filename)
self.main_layout.addLayout(filename_layout)
# central tab
tab = QTabWidget()
self._init_central_tab(tab)
self.main_layout.addWidget(tab)
# buttons
ok_button = QPushButton(self)
ok_button.setText('OK')
ok_button.clicked.connect(self._on_ok_clicked)
cancel_button = QPushButton(self)
cancel_button.setText('Cancel')
cancel_button.clicked.connect(self._on_cancel_clicked)
buttons_layout = QHBoxLayout()
buttons_layout.addWidget(ok_button)
buttons_layout.addWidget(cancel_button)
self.main_layout.addLayout(buttons_layout)
def _init_central_tab(self, tab):
self._init_load_options_tab(tab)
self._init_cfg_options_tab(tab)
def _init_load_options_tab(self, tab):
# auto load libs
auto_load_libs = QCheckBox(self)
auto_load_libs.setText("Automatically load all libraries")
auto_load_libs.setChecked(False)
self.option_widgets['auto_load_libs'] = auto_load_libs
# dependencies list
dep_group = QGroupBox("Dependencies")
dep_list = QListWidget(self)
self.option_widgets['dep_list'] = dep_list
sublayout = QVBoxLayout()
sublayout.addWidget(dep_list)
dep_group.setLayout(sublayout)
layout = QVBoxLayout()
layout.addWidget(auto_load_libs)
layout.addWidget(dep_group)
layout.addStretch(0)
frame = QFrame(self)
frame.setLayout(layout)
tab.addTab(frame, "Loading Options")
def _init_cfg_options_tab(self, tab):
resolve_indirect_jumps = QCheckBox(self)
resolve_indirect_jumps.setText('Resolve indirect jumps')
resolve_indirect_jumps.setChecked(True)
self.option_widgets['resolve_indirect_jumps'] = resolve_indirect_jumps
collect_data_refs = QCheckBox(self)
collect_data_refs.setText('Collect cross-references and infer data types')
collect_data_refs.setChecked(True)
self.option_widgets['collect_data_refs'] = collect_data_refs
layout = QVBoxLayout()
layout.addWidget(resolve_indirect_jumps)
layout.addWidget(collect_data_refs)
layout.addStretch(0)
frame = QFrame(self)
frame.setLayout(layout)
tab.addTab(frame, 'CFG Options')
#
# Event handlers
#
def _on_ok_clicked(self):
force_load_libs = [ ]
skip_libs = set()
dep_list = self.option_widgets['dep_list'] # type: QListWidget
for i in xrange(dep_list.count()):
item = dep_list.item(i) # type: QListWidgetItem
if item.checkState() == Qt.Checked:
force_load_libs.append(item.text())
else:
skip_libs.add(item.text())
self.load_options = {
'auto_load_libs': self.option_widgets['auto_load_libs'].isChecked(),
'force_load_libs': force_load_libs,
'skip_libs': skip_libs,
}
self.cfg_args = {
'resolve_indirect_jumps': self.option_widgets['resolve_indirect_jumps'].isChecked(),
'collect_data_references': self.option_widgets['collect_data_refs'].isChecked(),
}
self.close()
def _on_cancel_clicked(self):
self.cfg_args = None
self.close()
| bsd-2-clause | 3,316,653,497,871,381,000 | 27.725806 | 113 | 0.604155 | false | 3.852199 | false | false | false |
animekita/selvbetjening | selvbetjening/core/events/models/payment.py | 1 | 1331 | import logging
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from selvbetjening.core.events.models import Attend
logger = logging.getLogger('selvbetjening.events')
class Payment(models.Model):
class Meta:
app_label = 'events'
user = models.ForeignKey(get_user_model())
attendee = models.ForeignKey(Attend, null=True, on_delete=models.SET_NULL) # store abandoned payments
amount = models.DecimalField(max_digits=6, decimal_places=2)
signee = models.ForeignKey(get_user_model(), null=True, blank=True, related_name='signee_payment_set')
note = models.CharField(max_length=256, blank=True)
created_date = models.DateTimeField(auto_now_add=True)
@receiver(post_save, sender=Payment)
def payment_save_handler(sender, **kwargs):
instance = kwargs.get('instance')
created = kwargs.get('created')
try:
if created:
logger.info('Payment registered (%s,-) -- %s', instance.amount, instance.note,
extra={
'related_user': instance.user,
'related_attendee': instance.attendee
})
except ObjectDoesNotExist:
pass | mit | -56,545,008,254,815,176 | 29.272727 | 106 | 0.686702 | false | 3.869186 | false | false | false |
eyaler/tensorpack | tensorpack/dataflow/dftools.py | 1 | 1915 | # -*- coding: utf-8 -*-
# File: dftools.py
import multiprocessing as mp
from six.moves import range
from ..utils.concurrency import DIE
from ..utils.develop import deprecated
from .serialize import LMDBSerializer, TFRecordSerializer
__all__ = ['dump_dataflow_to_process_queue',
'dump_dataflow_to_lmdb', 'dump_dataflow_to_tfrecord']
def dump_dataflow_to_process_queue(df, size, nr_consumer):
"""
Convert a DataFlow to a :class:`multiprocessing.Queue`.
The DataFlow will only be reset in the spawned process.
Args:
df (DataFlow): the DataFlow to dump.
size (int): size of the queue
nr_consumer (int): number of consumer of the queue.
The producer will add this many of ``DIE`` sentinel to the end of the queue.
Returns:
tuple(queue, process):
The process will take data from ``df`` and fill
the queue, once you start it. Each element in the queue is (idx,
dp). idx can be the ``DIE`` sentinel when ``df`` is exhausted.
"""
q = mp.Queue(size)
class EnqueProc(mp.Process):
def __init__(self, df, q, nr_consumer):
super(EnqueProc, self).__init__()
self.df = df
self.q = q
def run(self):
self.df.reset_state()
try:
for idx, dp in enumerate(self.df):
self.q.put((idx, dp))
finally:
for _ in range(nr_consumer):
self.q.put((DIE, None))
proc = EnqueProc(df, q, nr_consumer)
return q, proc
@deprecated("Use LMDBSerializer.save instead!", "2019-01-31")
def dump_dataflow_to_lmdb(df, lmdb_path, write_frequency=5000):
LMDBSerializer.save(df, lmdb_path, write_frequency)
@deprecated("Use TFRecordSerializer.save instead!", "2019-01-31")
def dump_dataflow_to_tfrecord(df, path):
TFRecordSerializer.save(df, path)
| apache-2.0 | 3,301,600,712,247,803,000 | 29.396825 | 88 | 0.610444 | false | 3.620038 | false | false | false |
kashev/kashev.rocks | manage.py | 1 | 1238 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# kashev.rocks
# Kashev Dalmia - [email protected]
from flask.ext.script import Manager
from flask.ext.assets import ManageAssets
from src.kashevrocks import app
from src.assets import register_assets
manager = Manager(app)
assets_env = register_assets(app)
manager.add_command("assets", ManageAssets(assets_env))
@manager.command
def liveserver(debug=True):
""" Runs a live reloading server which watches non-python code as well. """
import livereload
app.debug = debug
assets_env.debug = debug
server = livereload.Server(app.wsgi_app)
server.watch('src/')
server.serve()
@manager.command
def clean():
""" Cleans up all generated and cache files from the project. """
import shutil
import os
paths_to_clean = ['src/static/.webassets-cache',
'src/static/generated',
'debug.log']
for path in paths_to_clean:
try:
shutil.rmtree(path)
except NotADirectoryError:
os.remove(path) # It's a file, not a directory
except FileNotFoundError:
pass # They're not there, that's fine.
if __name__ == "__main__":
manager.run()
| mit | -2,425,773,170,606,357,000 | 22.807692 | 79 | 0.642973 | false | 3.598837 | false | false | false |
sonus89/FIPER | host/bridge.py | 1 | 5953 | from __future__ import print_function, absolute_import, unicode_literals
# stdlib imports
import time
from datetime import datetime
# project imports
from FIPER.generic.subsystem import StreamDisplayer
from FIPER.generic.util import Table
from FIPER.generic.probeclient import Probe
from FIPER.host.component import Listener, Console
# noinspection PyUnusedLocal
class FleetHandler(object):
"""
Class of the main server.
Groups together the following concepts:
- Console is run in the main thread, waiting for and parsing input commands.
- Listener is listening for incomming car connections in a separate thread.
It also coordinates the creation and validation of new car interfaces.
- CarInterface instances are stored in the .cars dictionary.
- StreamDisplayer objects can be attached to CarInterface objects and
are run in a separate thread each.
- FleetHandler itself is responsible for sending commands to CarInterfaces
and to coordinate the shutdown of the cars on this side, etc.
"""
the_one = None
def __init__(self, myIP):
self.clients = {}
self.ip = myIP
self.cars = {}
self.watchers = {}
self.since = datetime.now()
self.status = "Idle"
self.console = Console(
master_name="FIPER-Server",
status_tag=self.status,
commands_dict={
"cars": self.printout_cars,
"kill": self.kill_car,
"watch": self.watch_car,
"unwatch": self.stop_watch,
"shutdown": self.shutdown,
"status": self.report,
"message": self.message,
"probe": self.probe,
"connect": Probe.initiate,
"sweep": self.sweep
}
)
self.listener = Listener(self)
self.listener.start()
print("SERVER: online")
def mainloop(self):
self.console.mainloop()
def printout_cars(self, *args):
"""List the current car-connections"""
print("Cars online:\n{}\n".format("\n".join(self.cars)))
@staticmethod
def probe(*ips):
"""Probe the supplied ip address(es)"""
IDs = dict(Probe.probe(*ips))
for ID, IP in IDs.iteritems():
print("{:<15}: {}".format(IP, ID if ID else "-"))
def message(self, ID, *msgs):
"""Just supply the car ID, and then the message to send."""
self.cars[ID].send(" ".join(msgs).encode())
@staticmethod
def sweep(*ips):
"""Probe the supplied ip addresses and print the formatted results"""
def get_status(dID):
status = ""
if dID is None:
status = "offline"
else:
status = "available"
return status
if not ips:
print("[sweep]: please specify an IP address range!")
return
IDs = dict(Probe.probe(*ips))
tab = Table(["IP", "ID", "status"],
[3*5, max(len(unicode(v)) for v in IDs.itervalues()), 11])
for IP, ID in IDs.iteritems():
tab.add(IP, ID, get_status(ID))
print(tab.get())
def kill_car(self, ID, *args):
"""Sends a shutdown message to a remote car, then tears down the connection"""
if ID not in self.cars:
print("SERVER: no such car:", ID)
return
if ID in self.watchers:
self.stop_watch(ID)
success = self.cars[ID].teardown(sleep=1)
if success:
del self.cars[ID]
def watch_car(self, ID, *args):
"""Launches the stream display in a separate thread"""
if ID not in self.cars:
print("SERVER: no such car:", ID)
return
if ID in self.watchers:
print("SERVER: already watching", ID)
return
self.cars[ID].send(b"stream on")
time.sleep(1)
self.watchers[ID] = StreamDisplayer(self.cars[ID])
def stop_watch(self, ID, *args):
"""Tears down the StreamDisplayer and shuts down a stream"""
if ID not in self.watchers:
print("SERVER: {} is not being watched!".format(ID))
return
self.cars[ID].send(b"stream off")
self.watchers[ID].teardown(sleep=1)
del self.watchers[ID]
def shutdown(self, *args):
"""Shuts the server down, terminating all threads nicely"""
self.listener.teardown(1)
rounds = 0
while self.cars:
print("SERVER: Car corpse collection round {}/{}".format(rounds+1, 4))
for ID in self.cars:
if ID in self.watchers:
self.stop_watch(ID)
self.kill_car(ID)
if rounds >= 3:
print("SERVER: cars: [{}] didn't shut down correctly"
.format(", ".join(self.cars.keys())))
break
rounds += 1
else:
print("SERVER: All cars shut down correctly!")
print("SERVER: Exiting...")
def report(self, *args):
"""
Prints a nice server status report
"""
repchain = "FIPER Server\n"
repchain += "-" * (len(repchain) - 1) + "\n"
repchain += "Up since " + self.since.strftime("%Y.%m.%d %H:%M:%S") + "\n"
repchain += "Cars online: {}\n".format(len(self.cars))
print("\n" + repchain + "\n")
def __enter__(self, srvinstance):
"""Context enter method"""
if FleetHandler.the_one is not None:
FleetHandler.the_one = srvinstance
else:
raise RuntimeError("Only one can remain!")
return srvinstance
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context exit method, ensures proper shutdown"""
if FleetHandler.the_one is not None:
FleetHandler.the_one.shutdown()
FleetHandler.the_one = None
| mit | 6,569,932,496,502,960,000 | 32.256983 | 86 | 0.560726 | false | 4.000672 | false | false | false |
shravan-shandilya/web3.py | web3/utils/address.py | 1 | 2834 | # Address utilities
from __future__ import absolute_import
import re
from .crypto import (
sha3,
)
from .encoding import (
encode_hex,
)
from .string import (
force_text,
coerce_args_to_text,
coerce_return_to_text,
)
from .types import (
is_string,
)
from .formatting import (
add_0x_prefix,
remove_0x_prefix,
is_prefixed,
)
@coerce_args_to_text
def is_address(address):
"""
Checks if the given string is an address
"""
if not is_string(address):
return False
if not re.match(r"^(0x)?[0-9a-fA-F]{40}$", address):
return False
elif re.match(r"^(0x)?[0-9a-f]{40}", address) or re.match(r"(0x)?[0-9A-F]{40}$", address):
return True
else:
return is_checksum_address(address)
@coerce_args_to_text
def is_checksum_address(address):
"""
Checks if the given string is a checksummed address
"""
if not is_string(address):
return False
checksum_address = to_checksum_address(address)
return force_text(address) == force_text(checksum_address)
@coerce_args_to_text
def is_strict_address(address):
"""
Checks if the given string is strictly an address
"""
if not is_string(address):
return False
return re.match(r"^0x[0-9a-fA-F]{40}$", address) is not None
@coerce_args_to_text
@coerce_return_to_text
def to_checksum_address(address):
"""
Makes a checksum address
"""
if not is_string(address):
return False
address = remove_0x_prefix(address.lower())
addressHash = sha3(address)
checksumAddress = "0x"
for i in range(len(address)):
if int(addressHash[i], 16) > 7:
checksumAddress += address[i].upper()
else:
checksumAddress += address[i]
return checksumAddress
@coerce_args_to_text
@coerce_return_to_text
def to_address(address):
"""
Transforms given string to valid 20 bytes-length addres with 0x prefix
"""
if is_string(address):
if len(address) == 42:
return address.lower()
elif len(address) == 40:
return add_0x_prefix(address.lower())
elif len(address) == 20:
return encode_hex(address)
elif len(address) in {66, 64}:
long_address = remove_0x_prefix(address.lower())
if is_prefixed(long_address, '000000000000000000000000'):
return add_0x_prefix(address[-40:])
raise ValueError("Unknown address format")
@coerce_args_to_text
def is_same_address(addr1, addr2):
"""
Checks if both addresses are same or not
"""
if is_address(addr1) & is_address(addr2):
return to_checksum_address(addr1) == to_checksum_address(addr2)
else:
return to_checksum_address(to_address(addr1)) == to_checksum_address(to_address(addr2))
| mit | -3,592,311,787,607,494,700 | 22.229508 | 95 | 0.6235 | false | 3.481572 | false | false | false |
dylanseago/IdeaHub | ideahub/apps/accounts/forms.py | 1 | 4165 | from crispy_forms.bootstrap import AppendedText, PrependedText
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Div, Field
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
from ideahub import settings
from ideahub.utils import glyphicon, field_max_len
class LoginForm(AuthenticationForm):
"""
A form that logs a user in
"""
remember_me = forms.BooleanField(
label = 'Remember Me',
required = False,
widget = forms.CheckboxInput
)
def remember_user(self):
try:
if self.cleaned_data.get('remember_me'):
return True
except AttributeError:
pass
return False
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = 'login'
self.helper.label_class = 'sr-only'
self.helper.layout = Layout(
PrependedText('username', glyphicon('user'), placeholder='Username'),
PrependedText('password', glyphicon('lock'), placeholder='Password'),
Field('remember_me', css_class='checkbox-inline'),
Submit('submit', 'Login'),
)
class SignupForm(UserCreationForm):
"""
A form that creates a user, with no privileges, from the given username, email,
password, first name and last name.
"""
first_name = forms.CharField(
label = 'First Name',
max_length = field_max_len(User, 'first_name'),
required = True,
widget = forms.TextInput,
)
last_name = forms.CharField(
label = 'Last Name',
max_length = field_max_len(User, 'last_name'),
required = True,
widget = forms.TextInput,
)
email = forms.EmailField(
label = 'Email',
max_length = field_max_len(User, 'email'),
required = True,
widget = forms.EmailInput,
)
username = forms.RegexField(
label = "Username",
max_length = field_max_len(User, 'username'),
required = True,
regex = r'^[\w.@+-]+$',
help_text = "{} characters or fewer. Letters, digits and @/./+/-/_ only.".format(field_max_len(User, 'username')),
error_messages = {
'invalid': "This value may contain only letters, numbers and @/./+/-/_ characters.",
},
)
password1 = forms.CharField(
label = "Password",
min_length = 8,
required = True,
widget = forms.PasswordInput,
help_text = "8 characters minimum.",
)
password2 = forms.CharField(
label = "Repeat Password",
required = True,
widget = forms.PasswordInput,
help_text = "Enter the same password as above, for verification.",
)
class Meta:
model = User
fields = ['first_name', 'last_name', 'email', 'username']
def save(self, commit=True, auth_after_save=True):
user = super(SignupForm, self).save(commit)
if commit and auth_after_save:
user = authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password1'])
return user
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.form_method = 'post'
self.helper.form_action = 'signup'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-10'
self.helper.layout = Layout(
Field('first_name', placeholder='First Name'),
Field('last_name', placeholder='Last Name'),
Field('email', placeholder='Email'),
Field('username', placeholder='Username'),
Field('password1', placeholder='Password'),
Field('password2', placeholder='Repeat Password'),
Submit('submit', 'Sign Up'),
) | mit | 3,287,142,277,204,960,000 | 33.147541 | 122 | 0.598319 | false | 4.095379 | false | false | false |
Adamssss/projectEuler | Problem 001-150 Python/pb060.py | 1 | 1366 | import math
import time
t1 = time.time()
prime = []
def primeSieve(n):
global prime
n = (n+1)//2
p = [True]*(n)
i = 1
prime.append(2)
while i < n:
if p[i]:
t = 2*i+1
prime.append(t)
p[i] = False
j = 2*i*i+2*i
while j < n:
p[j] = False
j += t
i += 1
return prime
primeSieve(10000)
# make two number concatenate
def seq(a,b):
dig = math.floor(math.log10(b)+1)
return a*math.pow(10,dig)+b
def isPrime(item):
root = math.floor(math.sqrt(item))
i = 0
t = prime[i]
while t <= root:
if item%t == 0:
return False
if t < prime[-1]:
i += 1
t = prime[i]
else:
t += 2
return True
ps = [[3]]
def canadd(tset,num):
for i in tset:
if not isPrime(seq(i,num)):
return False
if not isPrime(seq(num,i)):
return False
return True
def getanswer():
global ps
for j in range(3,len(prime)):
for k in ps:
if canadd(k,prime[j]):
ps.append(k+[prime[j]])
if len(k) == 4:
print(sum(ps[-1]))
return
if prime[j] < 20:
ps.append([prime[j]])
getanswer()
print("time:",time.time()-t1)
| mit | 3,745,079,951,915,523,000 | 18.239437 | 39 | 0.444363 | false | 3.184149 | false | false | false |
slipstream/SlipStreamClient | client/src/main/scripts/slipstream-prepare-bootstrap.py | 1 | 3230 | #!/usr/bin/env python
"""
SlipStream Client
=====
Copyright (C) 2013 SixSq Sarl (sixsq.com)
=====
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import shutil
def process(filename, cmd=None):
''' Append at the end of the file (e.g. rc.local) the SlipStream bootstrap
file to trigger the execution of the node (e.g. package generation,
image creation, deployment)
Do the following:
- Open target file
- Reverse the lines
- Process empty lines, if any
- Look for exit or not empty line
- If starts with exit, prepend the bootstrap line
- If starts with none empty line (but not exit),
prepend the bootstrap script
- Copy rest of lines
- Move original file to <filename>.sav
- Replace old file
Option: The 'cmd' field can be used to customize the command that will be inserted in the file'''
bootstrap = 'mkdir -p /tmp/slipstream/reports\n'
if cmd is None:
bootstrap += os.path.join(os.sep, 'etc', 'slipstream.bootstrap.sh') \
+ ' > ' + os.path.join(os.sep, 'tmp', 'slipstream', 'reports', 'node-execution.log') \
+ ' 2>&1 &\n'
else:
bootstrap += cmd + '\n'
# Backup the file if it was not done before
originalFilename = filename + '.orig'
if not os.path.exists(originalFilename):
shutil.copyfile(filename, originalFilename)
file = open(filename)
lines = file.readlines()
newlines = []
gotit = False
lines.reverse()
for line in lines:
# Simply copy empty lines
if gotit:
newlines.append(line)
continue
if line.strip() == '':
newlines.append(line)
continue
if line.strip().startswith('exit'):
gotit = True
newlines.append(line)
newlines.append(bootstrap)
continue
gotit = True
newlines.append(bootstrap)
newlines.append(line)
savedfilename = filename + '.sav'
if os.path.exists(savedfilename):
os.remove(savedfilename)
shutil.move(filename, savedfilename)
newfile = open(filename, 'w')
# reverse the lines
newlines.reverse()
newfile.writelines(newlines)
newfile.close()
os.chmod(filename, 0755)
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
sys.stderr.write('Error, usage is: %s <filename> [<command-string>], got: %s\n' %
(sys.argv[0], ' '.join(sys.argv)))
sys.exit(1)
cmd = None
if len(sys.argv) == 3:
cmd = sys.argv[2]
process(sys.argv[1], cmd)
print 'Done!'
| apache-2.0 | -7,584,596,654,818,867,000 | 32.645833 | 105 | 0.608359 | false | 4.012422 | false | false | false |
alcor-dtu/ml-poster | code/ImageDenoising.py | 1 | 17727 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 01 15:52:10 2016
@author: aluo
"""
from __future__ import print_function
import os
import sys
import errno
import timeit
import pickle
import numpy
from matplotlib import pyplot
from generate_patches import recombine_image
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from logistic_sgd import load_data
from utils import tile_raster_images
try:
import PIL.Image as Image
except ImportError:
import Image
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class dA(object):
def __init__(
self,
numpy_rng,
theano_rng=None,
input=None,
noiseInput=None,
n_visible=32*32,
n_hidden=800,
W=None,
bhid=None,
bvis=None
):
"""
Initialize the dA class by specifying the number of visible units (the
dimension d of the input ), the number of hidden units ( the dimension
d' of the latent or hidden space ) and the corruption level. The
constructor also receives symbolic variables for the input, weights and
bias. Such a symbolic variables are useful when, for example the input
is the result of some computations, or when weights are shared between
the dA and an MLP layer. When dealing with SdAs this always happens,
the dA on layer 2 gets as input the output of the dA on layer 1,
and the weights of the dA are used in the second stage of training
to construct an MLP.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: number random generator used to generate weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type input: theano.tensor.TensorType
:param input: a symbolic description of the input or None for
standalone dA
:type n_visible: int
:param n_visible: number of visible units
:type n_hidden: int
:param n_hidden: number of hidden units
:type W: theano.tensor.TensorType
:param W: Theano variable pointing to a set of weights that should be
shared belong the dA and another architecture; if dA should
be standalone set this to None
:type bhid: theano.tensor.TensorType
:param bhid: Theano variable pointing to a set of biases values (for
hidden units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
:type bvis: theano.tensor.TensorType
:param bvis: Theano variable pointing to a set of biases values (for
visible units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
# create a Theano random generator that gives symbolic random values
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# note : W' was written as `W_prime` and b' as `b_prime`
if not W:
# W is initialized with `initial_W` which is uniformely sampled
# from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible))the output of uniform if
# converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
initial_W = numpy.asarray(
numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)
),
dtype=theano.config.floatX
)
W = theano.shared(value=initial_W, name='W', borrow=True)
if not bvis:
bvis = theano.shared(
value=numpy.zeros(
n_visible,
dtype=theano.config.floatX
),
name='b_prime',
borrow=True
)
if not bhid:
bhid = theano.shared(
value=numpy.zeros(
n_hidden,
dtype=theano.config.floatX
),
name='b',
borrow=True
)
self.W = W
# b corresponds to the bias of the hidden
self.b = bhid
# b_prime corresponds to the bias of the visible
self.b_prime = bvis
# tied weights, therefore W_prime is W transpose
self.W_prime = self.W.T
self.theano_rng = theano_rng
# if no input is given, generate a variable representing the input
if input is None:
# we use a matrix because we expect a minibatch of several
# examples, each example being a row
self.x = T.dmatrix(name='input')
else:
self.x = input
if noiseInput is None:
# we use a matrix because we expect a minibatch of several
# examples, each example being a row
self.noise_x = T.dmatrix(name='noiseInput')
else:
self.noise_x = noiseInput
self.params = [self.W, self.b, self.b_prime]
def get_corrupted_input(self, input, corruption_level):
"""This function keeps ``1-corruption_level`` entries of the inputs the
same and zero-out randomly selected subset of size ``coruption_level``
Note : first argument of theano.rng.binomial is the shape(size) of
random numbers that it should produce
second argument is the number of trials
third argument is the probability of success of any trial
this will produce an array of 0s and 1s where 1 has a
probability of 1 - ``corruption_level`` and 0 with
``corruption_level``
The binomial function return int64 data type by
default. int64 multiplicated by the input
type(floatX) always return float64. To keep all data
in floatX when floatX is float32, we set the dtype of
the binomial to floatX. As in our case the value of
the binomial is always 0 or 1, this don't change the
result. This is needed to allow the gpu to work
correctly as it only support float32 for now.
"""
return self.theano_rng.binomial(size=input.shape, n=1,
p=1 - corruption_level,
dtype=theano.config.floatX) * input
def get_hidden_values(self, input):
""" Computes the values of the hidden layer """
return T.nnet.sigmoid(T.dot(input, self.W) + self.b)
def get_reconstructed_input(self, hidden):
"""Computes the reconstructed input given the values of the
hidden layer
"""
return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)
def get_denoised_patch_function(self, patch):
y = self.get_hidden_values(patch)
z = self.get_reconstructed_input(y)
return z
def get_cost_updates(self, learning_rate):
""" This function computes the cost and the updates for one trainng
step of the dA """
# tilde_x = self.get_corrupted_input(self.x, corruption_level)
tilde_x=self.noise_x
y = self.get_hidden_values(tilde_x)
z = self.get_reconstructed_input(y)
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, with one entry per
# example in minibatch
L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
# note : L is now a vector, where each element is the
# cross-entropy cost of the reconstruction of the
# corresponding example of the minibatch. We need to
# compute the average of all these to get the cost of
# the minibatch
cost = T.mean(L)
# cost = L
# square_param = numpy.multiply(self.params[0],self.params[0])
# regularization = learning_rate* 0.5 * T.mean(T.sum(T.sum(square_param,axis = 0),axis=0))
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
#gparams[0] = gparams[0] + learning_rate * self.params[0] / self.params[0].size
# generate the list of updates
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(self.params, gparams)
]
return (cost, updates)
def test_dA(Width = 32, Height = 32, hidden = 800, learning_rate=0.1, training_epochs=15,
dataset = None, noise_dataset=None,
batch_size=20, output_folder='dA_plots'):
"""
This demo is tested on MNIST
:type learning_rate: float
:param learning_rate: learning rate used for training the DeNosing
AutoEncoder
:type training_epochs: int
:param training_epochs: number of epochs used for training
:type dataset: string
:param dataset: path to the picked dataset
"""
train_set_x = theano.shared(dataset)
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x', dtype='float32') # the data is presented as rasterized images
noise_x = T.matrix('noise_x', dtype='float32')
#####################################
# BUILDING THE MODEL CORRUPTION 30% #
#####################################
rng = numpy.random.RandomState(1)
theano_rng = RandomStreams(rng.randint(2 ** 30))
noise_train_set_x = theano.shared(noise_dataset)
da = dA(
numpy_rng=rng,
theano_rng=theano_rng,
input=x,
noiseInput=noise_x,
n_visible=Width * Height,
n_hidden=hidden
)
cost, updates = da.get_cost_updates(
learning_rate=learning_rate
)
train_da = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
noise_x: noise_train_set_x[index * batch_size: (index + 1) * batch_size]
}
)
start_time = timeit.default_timer()
############
# TRAINING #
############
# go through training epochs
for epoch in range(training_epochs):
# go through trainng set
c = []
for batch_index in range(n_train_batches):
c.append(train_da(batch_index))
if epoch % 100 == 0:
print('Training epoch %d, cost ' % epoch, numpy.mean(c))
end_time = timeit.default_timer()
training_time = (end_time - start_time)
print(('The training code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % (training_time / 60.)), file=sys.stderr)
W_corruption = da.W
bhid_corruption = da.b
bvis_corruption = da.b_prime
results = (W_corruption,
bhid_corruption, bvis_corruption)
return results
def unpickle(file):
fo = open(file, 'rb')
d = pickle.load(fo)
fo.close()
return d
def showRGBImage(array_data, W, H):
array_data = array_data.reshape(3,W, H).transpose()
array_data = numpy.swapaxes(array_data,0,1)
pyplot.axis('off')
array_data = pyplot.imshow(array_data)
def showGrayImage(data, W, H):
data = data.reshape(W,H)
pyplot.axis('off')
pyplot.imshow(data,cmap='Greys_r')
def showEncodeImage(data, autoEncoder, W, H):
X = data
tilde_X = X
Y = autoEncoder.get_hidden_values(tilde_X)
Z = autoEncoder.get_reconstructed_input(Y)
Y = Y.eval()
Z = Z.eval()
# tilde_X = tilde_X.eval()
showGrayImage(tilde_X, W, H)
pyplot.figure()
showGrayImage(Z, W, H)
pyplot.figure()
pyplot.show()
def saveTrainedData(path,noise_W, noise_b, noise_b_p,hidden, Width, Height ):
d = {}
d["noise_W"] = {"data" : noise_W}
d["noise_b"] = {"data" : noise_b}
d["noise_b_p"] = {"data" : noise_b_p}
d["hidden"] = {"data" : hidden}
d["Width"] = {"data" : Width}
d["Height"] = {"data" : Height}
ff = open(path, "wb")
pickle.dump(d, ff, protocol=pickle.HIGHEST_PROTOCOL)
ff.close()
def loadTrainedData(path):
d = unpickle(path)
noise_W = d["noise_W"]["data"]
noise_b = d["noise_b"]["data"]
noise_b_p = d["noise_b_p"]["data"]
hidden = d["hidden"]["data"]
Width = d["Width"]["data"]
Height = d["Height"]["data"]
results =(noise_W,noise_b,noise_b_p,hidden,Width,Height)
return results
def filterImages(noise_datasets, autoEncoder):
d = noise_datasets.copy()
rgb = ('r', 'g', 'b')
x = T.matrix('x', dtype='float32')
evaluate = theano.function(
[x],
autoEncoder.get_denoised_patch_function(x)
)
for c in rgb:
imgs = numpy.array(d[c]['data'], dtype='float32')
X = imgs
Z = evaluate(X)
d[c]['data'] = Z
return d
def saveImage(image_dict, image_file_name, results_folder="./result_images"):
make_sure_path_exists(results_folder)
recombine_image(image_dict, results_folder + os.sep +image_file_name + '.png')
def loadDataset(name, source_folder = "./image_patch_data"):
make_sure_path_exists(source_folder)
dataset_path = source_folder + os.sep + name + '.dat'
datasets = unpickle(dataset_path)
patches = numpy.concatenate((datasets['r']['data'], datasets['g']['data'], datasets['b']['data']),axis=0)
patches_f = numpy.array(patches, dtype='float32')
return patches_f, datasets
def loadDatasets(reference_name_list, noisy_dataset_name_list,source_folder = "./image_patch_data"):
assert len(reference_name_list) == len(noisy_dataset_name_list)
make_sure_path_exists(source_folder)
clean_datasets = []
noisy_datasets = []
noisy_patches_f = numpy.zeros(1)
clean_patches_f = numpy.zeros(1)
for i in range(len(reference_name_list)):
ref_name = reference_name_list[i]
noise_name = noisy_dataset_name_list[i]
clean_patches_f_i, clean_dataset_i = loadDataset(ref_name, source_folder)
noisy_patches_f_i, noisy_dataset_i = loadDataset(noise_name, source_folder)
if i == 0:
clean_patches_f = clean_patches_f_i
noisy_patches_f = noisy_patches_f_i
else:
clean_patches_f = numpy.concatenate((clean_patches_f, clean_patches_f_i), axis = 0)
noisy_patches_f = numpy.concatenate((noisy_patches_f, noisy_patches_f_i), axis = 0)
clean_datasets.append(clean_dataset_i)
noisy_datasets.append(noisy_dataset_i)
patch_size = noisy_datasets[0]['patch_size']
return clean_patches_f, noisy_patches_f, clean_datasets, noisy_datasets, patch_size
if __name__ == '__main__':
dataset_base = "sponzat_0"
dataset_name = dataset_base + "_10000"
result_folder = "./result_images"
noise_dataset_samples = 5
noise_dataset_name = dataset_base +'_'+ str(noise_dataset_samples)
clean_patches_f, noisy_patches_f, clean_datasets, noisy_datasets, patch_size = loadDatasets(dataset_name, noise_dataset_name)
Width = patch_size[0]
Height = patch_size[1]
#PARAMETERS TO PLAY WITH
hidden_fraction = 0.5
hidden = int(hidden_fraction*Width * Height)
training_epochs = 100
learning_rate =0.01
batch_size = clean_patches_f.shape[0]
parameters_string = '_dA_epochs' + str(training_epochs) +'_hidden' + str(hidden) + '_lrate' + str(learning_rate) +'_W' +str(Width)
path = 'training/trained_variables_' + noise_dataset_name + parameters_string + '.dat'
isTrained = os.path.isfile(path)
if not isTrained:
noise_W, noise_b, noise_b_p = test_dA(dataset=clean_patches_f,learning_rate=learning_rate,
training_epochs=training_epochs,hidden=hidden,
Width = Width, Height = Height,
batch_size = batch_size,
noise_dataset=noisy_patches_f)
saveTrainedData(path, noise_W, noise_b, noise_b_p,hidden, Width, Height )
else:
noise_W, noise_b, noise_b_p,hidden, Width, Height = loadTrainedData(path)
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
noiseDA = dA(
numpy_rng=rng,
theano_rng=theano_rng,
input=clean_patches_f,
noiseInput=noisy_patches_f,
n_visible=Width * Height,
n_hidden=hidden,
W=noise_W,
bhid=noise_b,
bvis=noise_b_p
)
denoised_datasets = filterImages(noisy_datasets,noiseDA)
saveImage(denoised_datasets, noise_dataset_name + parameters_string,
result_folder)
| gpl-3.0 | -6,087,468,138,474,883,000 | 34.312749 | 134 | 0.586224 | false | 3.72416 | false | false | false |
F5Networks/f5-common-python | f5/bigip/tm/shared/licensing.py | 1 | 3467 | # coding=utf-8
#
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IP® system failover module
REST URI
``http://localhost/mgmt/tm/shared/license``
GUI Path
``System --> License``
REST Kind
``tm:shared:licensing:*``
"""
from f5.bigip.resource import PathElement
from f5.bigip.resource import UnnamedResource
from f5.sdk_exception import UnsupportedMethod
class Licensing(PathElement):
"""BIG-IP® licensing stats and states.
Licensing objects themselves do not support any methods and are just
containers for lower level objects.
.. note::
This is an unnamed resource so it has not ~Partition~Name pattern
at the end of its URI.
"""
def __init__(self, shared):
super(Licensing, self).__init__(shared)
self._meta_data['allowed_lazy_attributes'] = [
Activation,
Registration,
]
self._meta_data['attribute_registry'] = {
'tm:shared:licensing:activation:activatelicenseresponse':
Activation,
'tm:shared:licensing:registration:registrationlicenseresponse':
Registration,
}
class Activation(UnnamedResource):
"""BIG-IP® license activation status
Activation state objects only support the
:meth:`~f5.bigip.resource.Resource.load` method because they cannot be
modified via the API.
.. note::
This is an unnamed resource so it has not ~Partition~Name pattern
at the end of its URI.
"""
def __init__(self, licensing):
super(Activation, self).__init__(licensing)
self._meta_data['required_load_parameters'] = set()
self._meta_data['required_json_kind'] =\
'tm:shared:licensing:activation:activatelicenseresponse'
def update(self, **kwargs):
'''Update is not supported for License Activation
:raises: UnsupportedOperation
'''
raise UnsupportedMethod(
"%s does not support the update method" % self.__class__.__name__
)
class Registration(UnnamedResource):
"""BIG-IP® license registration status
Registration state objects only support the
:meth:`~f5.bigip.resource.Resource.load` method because they cannot be
modified via the API.
.. note::
This is an unnamed resource so it has not ~Partition~Name pattern
at the end of its URI.
"""
def __init__(self, licensing):
super(Registration, self).__init__(licensing)
self._meta_data['required_load_parameters'] = set()
self._meta_data['required_json_kind'] =\
'tm:shared:licensing:activation:activatelicenseresponse'
def update(self, **kwargs):
'''Update is not supported for License Registration
:raises: UnsupportedOperation
'''
raise UnsupportedMethod(
"%s does not support the update method" % self.__class__.__name__
)
| apache-2.0 | -455,912,475,897,392,100 | 29.377193 | 77 | 0.656367 | false | 4.291202 | false | false | false |
Gimpneek/exclusive-raid-gym-tracker | app/features/steps/page_object_models/gym_management.py | 1 | 2044 | """ Page Object Model - Gym Management Page """
from selenium.webdriver.common.by import By
from .selectors.gym_management import TRACKED_GYMS_TABLE_ROW, REMOVE_BUTTON, \
TABLE_COLUMNS
from .selectors.listing import SEARCH_BAR
from .common import BasePage
from .listing import ListingPage
class GymManagementPage(BasePage):
"""
Page Object Model for Gym Management Page
"""
def find_tracked_gym(self, gym_name):
"""
Locate the gym in the tracked gyms list
:return: WebElement of row with Gym in it or None
"""
gyms = self.driver.find_elements(*TRACKED_GYMS_TABLE_ROW)
for gym in gyms:
if gym_name in gym.text:
return gym
return None
def remove_tracked_gym(self, gym_row):
"""
Press the Remove button the supplied table row containing the Gym
to stop tracking
:param gym_row: WebElement of the table row that the Gym is in
"""
columns = gym_row.find_elements(*TABLE_COLUMNS)
remove_button = columns[1].find_element(*REMOVE_BUTTON)
button_selector = (
By.CSS_SELECTOR,
'a[href="{}"]'.format(remove_button.get_attribute('href'))
)
self.click_and_verify_change(
remove_button,
button_selector,
hidden=True
)
def enter_search_term(self, term):
"""
Enter a search term into the search bar
:param term: Term to enter
"""
page = ListingPage(self.driver)
page.enter_search_term(term)
def press_suggested_gym(self, gym):
"""
Press the suggested gym in the dropdown
:param gym: Gym to find
"""
page = ListingPage(self.driver)
suggestions = page.get_search_suggestions()
option = None
for suggestion in suggestions:
if gym in suggestion.text:
option = suggestion
assert option
page.click_and_verify_change(option, SEARCH_BAR)
| gpl-3.0 | 4,339,768,182,742,312,400 | 29.058824 | 78 | 0.600294 | false | 3.938343 | false | false | false |
simphony/tornado-webapi | tornadowebapi/exceptions.py | 1 | 2025 | from .http import httpstatus
class WebAPIException(Exception):
"""Base exception for the REST infrastructure
These are exceptions that can be raised by the handlers.
"""
#: HTTP code generally associated to this exception.
#: Missing any better info, default is a server error.
http_code = httpstatus.INTERNAL_SERVER_ERROR
def __init__(self, message=None, **kwargs):
"""Initializes the exception. keyword arguments will become
part of the representation as key/value pairs."""
super().__init__(message)
self.message = message
self.info = kwargs if len(kwargs) else None
class NotFound(WebAPIException):
"""Exception raised when the resource is not found.
Raise this exception in your handlers when you can't
find the resource the identifier refers to.
"""
http_code = httpstatus.NOT_FOUND
class Exists(WebAPIException):
"""Represents a case where the resource could not be created
because it already exists. This is generally raised in the
create() method if the resource has uniqueness constraints on
things other than the exposed id."""
http_code = httpstatus.CONFLICT
class BadRepresentation(WebAPIException):
"""Exception raised when the resource representation is
invalid or does not contain the appropriate keys.
Raise this exception in your handlers when the received
representation is ill-formed
"""
http_code = httpstatus.BAD_REQUEST
class BadQueryArguments(WebAPIException):
"""Exception raised when the query arguments do not conform to the
expected format.
"""
http_code = httpstatus.BAD_REQUEST
class BadRequest(WebAPIException):
"""Deprecated. Kept for compatibility. Use BadRepresentation."""
http_code = httpstatus.BAD_REQUEST
class Unable(WebAPIException):
"""Exception raised when the request cannot be performed
for whatever reason that is not dependent on the client.
"""
http_code = httpstatus.INTERNAL_SERVER_ERROR
| bsd-3-clause | 4,556,857,852,830,923,000 | 31.66129 | 70 | 0.719012 | false | 4.753521 | false | false | false |
vgupta6/Project-2 | modules/s3/s3widgets.py | 1 | 139828 | # -*- coding: utf-8 -*-
""" Custom UI Widgets
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3HiddenWidget",
"S3DateWidget",
"S3DateTimeWidget",
"S3BooleanWidget",
#"S3UploadWidget",
"S3AutocompleteWidget",
"S3LocationAutocompleteWidget",
"S3LatLonWidget",
"S3OrganisationAutocompleteWidget",
"S3OrganisationHierarchyWidget",
"S3PersonAutocompleteWidget",
"S3HumanResourceAutocompleteWidget",
"S3SiteAutocompleteWidget",
"S3LocationSelectorWidget",
"S3LocationDropdownWidget",
#"S3CheckboxesWidget",
"S3MultiSelectWidget",
"S3ACLWidget",
"CheckboxesWidgetS3",
"S3AddPersonWidget",
"S3AutocompleteOrAddWidget",
"S3AddObjectWidget",
"S3SearchAutocompleteWidget",
"S3TimeIntervalWidget",
"S3EmbedComponentWidget",
"S3KeyValueWidget",
"S3SliderWidget",
"S3InvBinWidget",
"s3_comments_widget",
"s3_richtext_widget",
"s3_checkboxes_widget",
"s3_grouped_checkboxes_widget"
]
import datetime
try:
from lxml import etree
except ImportError:
import sys
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
# Here are dependencies listed for reference:
#from gluon import current
#from gluon.dal import Field
#from gluon.html import *
#from gluon.http import HTTP
#from gluon.validators import *
from gluon.sqlhtml import *
from gluon.storage import Storage
from s3utils import *
from s3validators import *
repr_select = lambda l: len(l.name) > 48 and "%s..." % l.name[:44] or l.name
# =============================================================================
class S3HiddenWidget(StringWidget):
"""
Standard String widget, but with a class of hide
- currently unused
"""
def __call__(self, field, value, **attributes):
default = dict(
_type = "text",
value = (value != None and str(value)) or "",
)
attr = StringWidget._attributes(field, default, **attributes)
attr["_class"] = "hide %s" % attr["_class"]
return TAG[""](
INPUT(**attr),
requires = field.requires
)
# =============================================================================
class S3DateWidget(FormWidget):
"""
Standard Date widget, but with a modified yearRange to support Birth dates
"""
def __init__(self,
format = None,
past=1440, # how many months into the past the date can be set to
future=1440 # how many months into the future the date can be set to
):
self.format = format
self.past = past
self.future = future
def __call__(self, field, value, **attributes):
# Need to convert value into ISO-format
# (widget expects ISO, but value comes in custom format)
_format = current.deployment_settings.get_L10n_date_format()
v, error = IS_DATE_IN_RANGE(format=_format)(value)
if not error:
value = v.isoformat()
if self.format:
# default: "yy-mm-dd"
format = str(self.format)
else:
format = _format.replace("%Y", "yy").replace("%y", "y").replace("%m", "mm").replace("%d", "dd").replace("%b", "M")
default = dict(
_type = "text",
value = (value != None and str(value)) or "",
)
attr = StringWidget._attributes(field, default, **attributes)
attr["_class"] = "date"
selector = str(field).replace(".", "_")
current.response.s3.jquery_ready.append(
'''$('#%(selector)s').datepicker('option',{
minDate:'-%(past)sm',
maxDate:'+%(future)sm',
yearRange:'c-100:c+100',
dateFormat:'%(format)s'})''' % \
dict(selector = selector,
past = self.past,
future = self.future,
format = format))
return TAG[""](
INPUT(**attr),
requires = field.requires
)
# =============================================================================
class S3DateTimeWidget(FormWidget):
"""
Standard DateTime widget, based on the widget above, but instead of using
jQuery datepicker we use Anytime.
"""
def __init__(self,
format = None,
past=876000, # how many hours into the past the date can be set to
future=876000 # how many hours into the future the date can be set to
):
self.format = format
self.past = past
self.future = future
def __call__(self, field, value, **attributes):
if self.format:
# default: "%Y-%m-%d %T"
format = str(self.format)
else:
format = str(current.deployment_settings.get_L10n_datetime_format())
request = current.request
s3 = current.response.s3
if isinstance(value, datetime.datetime):
value = value.strftime(format)
elif value is None:
value = ""
default = dict(_type = "text",
# Prevent default "datetime" calendar from showing up:
_class = "anytime",
value = value,
old_value = value)
attr = StringWidget._attributes(field, default, **attributes)
selector = str(field).replace(".", "_")
now = request.utcnow
offset = IS_UTC_OFFSET.get_offset_value(current.session.s3.utc_offset)
if offset:
now = now + datetime.timedelta(seconds=offset)
timedelta = datetime.timedelta
earliest = now - timedelta(hours = self.past)
latest = now + timedelta(hours = self.future)
earliest = earliest.strftime(format)
latest = latest.strftime(format)
script_dir = "/%s/static/scripts" % request.application
if s3.debug and \
"%s/anytime.js" % script_dir not in s3.scripts:
s3.scripts.append("%s/anytime.js" % script_dir)
s3.stylesheets.append("plugins/anytime.css")
elif "%s/anytimec.js" % script_dir not in s3.scripts:
s3.scripts.append("%s/anytimec.js" % script_dir)
s3.stylesheets.append("plugins/anytimec.css")
s3.jquery_ready.append(
'''$('#%(selector)s').AnyTime_picker({
askSecond:false,
firstDOW:1,
earliest:'%(earliest)s',
latest:'%(latest)s',
format:'%(format)s',
})
clear_button=$('<input type="button" value="clear"/>').click(function(e){
$('#%(selector)s').val('')
})
$('#%(selector)s').after(clear_button)''' % \
dict(selector=selector,
earliest=earliest,
latest=latest,
format=format.replace("%M", "%i")))
return TAG[""](
INPUT(**attr),
requires = field.requires
)
# =============================================================================
class S3BooleanWidget(BooleanWidget):
"""
Standard Boolean widget, with an option to hide/reveal fields conditionally.
"""
def __init__(self,
fields = [],
click_to_show = True
):
self.fields = fields
self.click_to_show = click_to_show
def __call__(self, field, value, **attributes):
response = current.response
fields = self.fields
click_to_show = self.click_to_show
default = dict(
_type="checkbox",
value=value,
)
attr = BooleanWidget._attributes(field, default, **attributes)
tablename = field.tablename
hide = ""
show = ""
for _field in fields:
fieldname = "%s_%s" % (tablename, _field)
hide += '''
$('#%s__row1').hide()
$('#%s__row').hide()
''' % (fieldname, fieldname)
show += '''
$('#%s__row1').show()
$('#%s__row').show()
''' % (fieldname, fieldname)
if fields:
checkbox = "%s_%s" % (tablename, field.name)
click_start = '''
$('#%s').click(function(){
if(this.checked){
''' % checkbox
middle = "} else {\n"
click_end = "}})"
if click_to_show:
# Hide by default
script = "%s\n%s\n%s\n%s\n%s\n%s" % (hide, click_start, show, middle, hide, click_end)
else:
# Show by default
script = "%s\n%s\n%s\n%s\n%s\n%s" % (show, click_start, hide, middle, show, click_end)
response.s3.jquery_ready.append(script)
return TAG[""](
INPUT(**attr),
requires = field.requires
)
# =============================================================================
class S3UploadWidget(UploadWidget):
"""
Subclassed to not show the delete checkbox when field is mandatory
- This now been included as standard within Web2Py from r2867
- Leaving this unused example in the codebase so that we can easily
amend this if we wish to later
"""
@staticmethod
def widget(field, value, download_url=None, **attributes):
"""
generates a INPUT file tag.
Optionally provides an A link to the file, including a checkbox so
the file can be deleted.
All is wrapped in a DIV.
@see: :meth:`FormWidget.widget`
@param download_url: Optional URL to link to the file (default = None)
"""
default=dict(
_type="file",
)
attr = UploadWidget._attributes(field, default, **attributes)
inp = INPUT(**attr)
if download_url and value:
url = "%s/%s" % (download_url, value)
(br, image) = ("", "")
if UploadWidget.is_image(value):
br = BR()
image = IMG(_src = url, _width = UploadWidget.DEFAULT_WIDTH)
requires = attr["requires"]
if requires == [] or isinstance(requires, IS_EMPTY_OR):
inp = DIV(inp, "[",
A(UploadWidget.GENERIC_DESCRIPTION, _href = url),
"|",
INPUT(_type="checkbox",
_name=field.name + UploadWidget.ID_DELETE_SUFFIX),
UploadWidget.DELETE_FILE,
"]", br, image)
else:
inp = DIV(inp, "[",
A(UploadWidget.GENERIC_DESCRIPTION, _href = url),
"]", br, image)
return inp
# =============================================================================
class S3AutocompleteWidget(FormWidget):
"""
Renders a SELECT as an INPUT field with AJAX Autocomplete
"""
def __init__(self,
module,
resourcename,
fieldname = "name",
filter = "", # REST filter
link_filter = "",
#new_items = False, # Whether to make this a combo box
post_process = "",
delay = 450, # milliseconds
min_length = 2): # Increase this for large deployments
self.module = module
self.resourcename = resourcename
self.fieldname = fieldname
self.filter = filter
self.link_filter = link_filter
#self.new_items = new_items
self.post_process = post_process
self.delay = delay
self.min_length = min_length
# @ToDo: Refreshes all dropdowns as-necessary
self.post_process = post_process or ""
def __call__(self, field, value, **attributes):
default = dict(
_type = "text",
value = (value != None and str(value)) or "",
)
attr = StringWidget._attributes(field, default, **attributes)
# Hide the real field
attr["_class"] = attr["_class"] + " hide"
real_input = str(field).replace(".", "_")
dummy_input = "dummy_%s" % real_input
# Script defined in static/scripts/S3/S3.js
js_autocomplete = '''S3.autocomplete('%s','%s','%s','%s','%s','%s',\"%s\",%s,%s)''' % \
(self.fieldname, self.module, self.resourcename, real_input, self.filter,
self.link_filter, self.post_process, self.delay, self.min_length)
if value:
text = str(field.represent(default["value"]))
if "<" in text:
# Strip Markup
try:
markup = etree.XML(text)
text = markup.xpath(".//text()")
if text:
text = " ".join(text)
else:
text = ""
except etree.XMLSyntaxError:
pass
represent = text
else:
represent = ""
current.response.s3.jquery_ready.append(js_autocomplete)
return TAG[""](
INPUT(_id=dummy_input,
_class="string",
_value=represent),
IMG(_src="/%s/static/img/ajax-loader.gif" % \
current.request.application,
_height=32, _width=32,
_id="%s_throbber" % dummy_input,
_class="throbber hide"),
INPUT(**attr),
requires = field.requires
)
# =============================================================================
class S3LocationAutocompleteWidget(FormWidget):
"""
Renders a gis_location SELECT as an INPUT field with AJAX Autocomplete
@note: differs from the S3AutocompleteWidget:
- needs to have deployment_settings passed-in
- excludes unreliable imported records (Level 'XX')
Appropriate when the location has been previously created (as is the
case for location groups or other specialized locations that need
the location create form).
S3LocationSelectorWidget may be more appropriate for specific locations.
Currently used for selecting the region location in gis_config
and for project/location.
@todo: .represent for the returned data
@todo: Refreshes any dropdowns as-necessary (post_process)
"""
def __init__(self,
prefix="gis",
resourcename="location",
fieldname="name",
level="",
hidden = False,
post_process = "",
delay = 450, # milliseconds
min_length = 2): # Increase this for large deployments
self.prefix = prefix
self.resourcename = resourcename
self.fieldname = fieldname
self.level = level
self.hidden = hidden
self.post_process = post_process
self.delay = delay
self.min_length = min_length
def __call__(self, field, value, **attributes):
fieldname = self.fieldname
level = self.level
if level:
if isinstance(level, list):
levels = ""
counter = 0
for _level in level:
levels += _level
if counter < len(level):
levels += "|"
counter += 1
url = URL(c=self.prefix,
f=self.resourcename,
args="search.json",
vars={"filter":"~",
"field":fieldname,
"level":levels,
"simple":1,
})
else:
url = URL(c=self.prefix,
f=self.resourcename,
args="search.json",
vars={"filter":"~",
"field":fieldname,
"level":level,
"simple":1,
})
else:
url = URL(c=self.prefix,
f=self.resourcename,
args="search.json",
vars={"filter":"~",
"field":fieldname,
"simple":1,
})
# Which Levels do we have in our hierarchy & what are their Labels?
#location_hierarchy = current.deployment_settings.gis.location_hierarchy
#try:
# # Ignore the bad bulk-imported data
# del location_hierarchy["XX"]
#except:
# pass
# @ToDo: Something nicer (i.e. server-side formatting within S3LocationSearch)
name_getter = \
'''function(item){
if(item.level=="L0"){return item.name+" (%(country)s)"
}else if(item.level=="L1"){return item.name+" ("+item.L0+")"
}else if(item.level=="L2"){return item.name+" ("+item.L1+","+item.L0+")"
}else if(item.level=="L3"){return item.name+" ("+item.L2+","+item.L1+","+item.L0+")"
}else if(item.level=="L4"){return item.name+" ("+item.L3+","+item.L2+","+item.L1+","+item.L0+")"
}else{return item.name}}''' % dict(country = current.messages.COUNTRY)
return S3GenericAutocompleteTemplate(
self.post_process,
self.delay,
self.min_length,
field,
value,
attributes,
source = repr(url),
name_getter = name_getter,
)
# =============================================================================
class S3OrganisationAutocompleteWidget(FormWidget):
"""
Renders an org_organisation SELECT as an INPUT field with AJAX Autocomplete.
Differs from the S3AutocompleteWidget in that it can default to the setting in the profile.
@ToDo: Add an option to hide the widget completely when using the Org from the Profile
- i.e. prevent user overrides
"""
def __init__(self,
post_process = "",
default_from_profile = False,
new_items = False, # Whether to make this a combo box
delay = 450, # milliseconds
min_length = 2): # Increase this for large deployments
self.post_process = post_process
self.delay = delay
self.min_length = min_length
self.new_items = new_items
self.default_from_profile = default_from_profile
def __call__(self, field, value, **attributes):
def transform_value(value):
if not value and self.default_from_profile:
auth = current.session.auth
if auth and auth.user:
value = auth.user.organisation_id
return value
return S3GenericAutocompleteTemplate(
self.post_process,
self.delay,
self.min_length,
field,
value,
attributes,
transform_value = transform_value,
new_items = self.new_items,
tablename = "org_organisation",
source = repr(
URL(c="org", f="org_search",
args="search.json",
vars={"filter":"~"})
)
)
# =============================================================================
class S3OrganisationHierarchyWidget(OptionsWidget):
""" Renders an organisation_id SELECT as a menu """
_class = "widget-org-hierarchy"
def __init__(self, primary_options=None):
"""
[{"id":12, "pe_id":4, "name":"Organisation Name"}]
"""
self.primary_options = primary_options
def __call__(self, field, value, **attributes):
options = self.primary_options
name = attributes.get("_name", field.name)
if options is None:
requires = field.requires
if isinstance(requires, (list, tuple)) and \
len(requires):
requires = requires[0]
if requires is not None:
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
if hasattr(requires, "options"):
table = current.s3db.org_organisation
options = requires.options()
ids = [option[0] for option in options if option[0]]
rows = current.db(table.id.belongs(ids)).select(table.id,
table.pe_id,
table.name,
orderby=table.name)
options = []
for row in rows:
options.append(row.as_dict())
else:
raise SyntaxError, "widget cannot determine options of %s" % field
javascript_array = '''%s_options=%s''' % (name,
json.dumps(options))
s3 = current.response.s3
s3.js_global.append(javascript_array)
s3.scripts.append("/%s/static/scripts/S3/s3.orghierarchy.js" % \
current.request.application)
s3.stylesheets.append("jquery-ui/jquery.ui.menu.css")
return self.widget(field, value, **attributes)
# =============================================================================
class S3PersonAutocompleteWidget(FormWidget):
"""
Renders a pr_person SELECT as an INPUT field with AJAX Autocomplete.
Differs from the S3AutocompleteWidget in that it uses 3 name fields
To make this widget use the HR table, set the controller to "hrm"
@ToDo: Migrate to template (initial attempt failed)
"""
def __init__(self,
controller = "pr",
function = "person_search",
post_process = "",
delay = 450, # milliseconds
min_length=2): # Increase this for large deployments
self.post_process = post_process
self.delay = delay
self.min_length = min_length
self.c = controller
self.f = function
def __call__(self, field, value, **attributes):
default = dict(
_type = "text",
value = (value != None and str(value)) or "",
)
attr = StringWidget._attributes(field, default, **attributes)
# Hide the real field
attr["_class"] = "%s hide" % attr["_class"]
real_input = str(field).replace(".", "_")
dummy_input = "dummy_%s" % real_input
url = URL(c=self.c,
f=self.f,
args="search.json")
js_autocomplete = "".join((
'''var %(real_input)s={val:$('#%(dummy_input)s').val(),accept:false}
$('#%(dummy_input)s').autocomplete({
source:'%(url)s',
delay:%(delay)d,
minLength:%(min_length)d,
search:function(event,ui){
$('#%(dummy_input)s_throbber').removeClass('hide').show()
return true
},
response:function(event,ui,content){
$('#%(dummy_input)s_throbber').hide()
return content
},
focus:function(event,ui){
var name=ui.item.first
if(ui.item.middle){
name+=' '+ui.item.middle
}
if(ui.item.last){
name+=' '+ui.item.last
}
$('#%(dummy_input)s').val(name)
return false
},
select:function(event,ui){
var name=ui.item.first
if(ui.item.middle){
name+=' '+ui.item.middle
}
if(ui.item.last){
name+=' '+ui.item.last
}
$('#%(dummy_input)s').val(name)
$('#%(real_input)s').val(ui.item.id).change()
''' % dict(dummy_input = dummy_input,
url = url,
delay = self.delay,
min_length = self.min_length,
real_input = real_input),
self.post_process, '''
%(real_input)s.accept=true
return false
}
}).data('autocomplete')._renderItem=function(ul,item){
var name=item.first
if(item.middle){
name+=' '+item.middle
}
if(item.last){
name+=' '+item.last
}
return $('<li></li>').data('item.autocomplete',item).append('<a>'+name+'</a>').appendTo(ul)
}
$('#%(dummy_input)s').blur(function(){
if(!$('#%(dummy_input)s').val()){
$('#%(real_input)s').val('').change()
%(real_input)s.accept=true
}
if(!%(real_input)s.accept){
$('#%(dummy_input)s').val(%(real_input)s.val)
}else{
%(real_input)s.val=$('#%(dummy_input)s').val()
}
%(real_input)s.accept=false
})''' % dict(dummy_input = dummy_input,
real_input = real_input)))
if value:
# Provide the representation for the current/default Value
text = str(field.represent(default["value"]))
if "<" in text:
# Strip Markup
try:
markup = etree.XML(text)
text = markup.xpath(".//text()")
if text:
text = " ".join(text)
else:
text = ""
except etree.XMLSyntaxError:
pass
represent = text
else:
represent = ""
current.response.s3.jquery_ready.append(js_autocomplete)
return TAG[""](
INPUT(_id=dummy_input,
_class="string",
_value=represent),
IMG(_src="/%s/static/img/ajax-loader.gif" % \
current.request.application,
_height=32, _width=32,
_id="%s_throbber" % dummy_input,
_class="throbber hide"),
INPUT(**attr),
requires = field.requires
)
# =============================================================================
class S3HumanResourceAutocompleteWidget(FormWidget):
"""
Renders an hrm_human_resource SELECT as an INPUT field with
AJAX Autocomplete.
Differs from the S3AutocompleteWidget in that it uses:
3 name fields
Organisation
Job Role
"""
def __init__(self,
post_process = "",
delay = 450, # milliseconds
min_length=2, # Increase this for large deployments
group=None, # Filter to staff/volunteers
):
self.post_process = post_process
self.delay = delay
self.min_length = min_length
self.group = group
def __call__(self, field, value, **attributes):
request = current.request
response = current.response
default = dict(
_type = "text",
value = (value != None and str(value)) or "",
)
attr = StringWidget._attributes(field, default, **attributes)
# Hide the real field
attr["_class"] = "%s hide" % attr["_class"]
real_input = str(field).replace(".", "_")
dummy_input = "dummy_%s" % real_input
group = self.group
if group == "staff":
# Search Staff using S3HRSearch
url = URL(c="hrm",
f="person_search",
args="search.json",
vars={"group":"staff"})
elif group == "volunteer":
# Search Volunteers using S3HRSearch
url = URL(c="vol",
f="person_search",
args="search.json")
else:
# Search all HRs using S3HRSearch
url = URL(c="hrm",
f="person_search",
args="search.json")
js_autocomplete = "".join((
'''var %(real_input)s={val:$('#%(dummy_input)s').val(),accept:false}
$('#%(dummy_input)s').autocomplete({
source:'%(url)s',
delay:%(delay)d,
minLength:%(min_length)d,
search:function(event,ui){
$('#%(dummy_input)s_throbber').removeClass('hide').show()
return true
},
response:function(event,ui,content){
$('#%(dummy_input)s_throbber').hide()
return content
},
focus:function(event,ui){
var name=ui.item.first
if(ui.item.middle){
name+=' '+ui.item.middle
}
if(ui.item.last){
name+=' '+ui.item.last
}
var org=ui.item.org
var job=ui.item.job
if(org||job){
if(job){
name+=' ('+job
if(org){
name+=', '+org
}
name+=')'
}else{
name+=' ('+org+')'
}
}
$('#%(dummy_input)s').val(name)
return false
},
select:function(event,ui){
var name=ui.item.first
if(ui.item.middle){
name+=' '+ui.item.middle
}
if(ui.item.last){
name+=' '+ui.item.last
}
var org=ui.item.org
var job=ui.item.job
if(org||job){
if(job){
name+=' ('+job
if(org){
name+=', '+org
}
name+=')'
}else{
name+=' ('+org+')'
}
}
$('#%(dummy_input)s').val(name)
$('#%(real_input)s').val(ui.item.id).change()
''' % dict(dummy_input = dummy_input,
url = url,
delay = self.delay,
min_length = self.min_length,
real_input = real_input),
self.post_process, '''
%(real_input)s.accept=true
return false
}
}).data('autocomplete')._renderItem=function(ul,item){
var name=item.first
if(item.middle){
name+=' '+item.middle
}
if(item.last){
name+=' '+item.last
}
var org=item.org
var job=item.job
if(org||job){
if(job){
name+=' ('+job
if(org){
name+=', '+org
}
name+=')'
}else{
name+=' ('+org+')'
}
}
return $('<li></li>').data('item.autocomplete',item).append('<a>'+name+'</a>').appendTo(ul)
}
$('#%(dummy_input)s').blur(function(){
if(!$('#%(dummy_input)s').val()){
$('#%(real_input)s').val('').change()
%(real_input)s.accept=true
}
if(!%(real_input)s.accept){
$('#%(dummy_input)s').val(%(real_input)s.val)
}else{
%(real_input)s.val=$('#%(dummy_input)s').val()
}
%(real_input)s.accept=false
})''' % dict(dummy_input = dummy_input,
real_input = real_input)))
if value:
# Provide the representation for the current/default Value
text = str(field.represent(default["value"]))
if "<" in text:
# Strip Markup
try:
markup = etree.XML(text)
text = markup.xpath(".//text()")
if text:
text = " ".join(text)
else:
text = ""
except etree.XMLSyntaxError:
pass
represent = text
else:
represent = ""
response.s3.jquery_ready.append(js_autocomplete)
return TAG[""](
INPUT(_id=dummy_input,
_class="string",
_value=represent),
IMG(_src="/%s/static/img/ajax-loader.gif" % \
request.application,
_height=32, _width=32,
_id="%s_throbber" % dummy_input,
_class="throbber hide"),
INPUT(**attr),
requires = field.requires
)
# =============================================================================
class S3SiteAutocompleteWidget(FormWidget):
"""
Renders an org_site SELECT as an INPUT field with AJAX Autocomplete.
Differs from the S3AutocompleteWidget in that it uses name & type fields
in the represent
"""
def __init__(self,
post_process = "",
delay = 450, # milliseconds
min_length = 2):
self.auth = current.auth
self.post_process = post_process
self.delay = delay
self.min_length = min_length
def __call__(self, field, value, **attributes):
default = dict(
_type = "text",
value = (value != None and str(value)) or "",
)
attr = StringWidget._attributes(field, default, **attributes)
# Hide the real field
attr["_class"] = "%s hide" % attr["_class"]
real_input = str(field).replace(".", "_")
dummy_input = "dummy_%s" % real_input
url = URL(c="org", f="site",
args="search.json",
vars={"filter":"~",
"field":"name"})
# Provide a Lookup Table for Site Types
cases = ""
case = -1
org_site_types = current.auth.org_site_types
for instance_type in org_site_types.keys():
case = case + 1
cases += '''case '%s':
return '%s'
''' % (instance_type,
org_site_types[instance_type])
js_autocomplete = "".join(('''function s3_site_lookup(instance_type){
switch(instance_type){
%s}
}''' % cases, '''
var %(real_input)s={val:$('#%(dummy_input)s').val(),accept:false}
$('#%(dummy_input)s').autocomplete({
source:'%(url)s',
delay:%(delay)d,
minLength:%(min_length)d,
search:function(event,ui){
$('#%(dummy_input)s_throbber').removeClass('hide').show()
return true
},
response:function(event,ui,content){
$('#%(dummy_input)s_throbber').hide()
return content
},
focus:function(event,ui){
var name=''
if(ui.item.name!=null){
name+=ui.item.name
}
if(ui.item.instance_type!=''){
name+=' ('+s3_site_lookup(ui.item.instance_type)+')'
}
$('#%(dummy_input)s').val(name)
return false
},
select:function(event,ui){
var name=''
if(ui.item.name!=null){
name+=ui.item.name
}
if(ui.item.instance_type!=''){
name+=' ('+s3_site_lookup(ui.item.instance_type)+')'
}
$('#%(dummy_input)s').val(name)
$('#%(real_input)s').val(ui.item.site_id).change()
''' % dict(dummy_input=dummy_input,
real_input=real_input,
url=url,
delay=self.delay,
min_length=self.min_length),
self.post_process, '''
%(real_input)s.accept=true
return false
}
}).data('autocomplete')._renderItem=function(ul,item){
var name=''
if(item.name!=null){
name+=item.name
}
if(item.instance_type!=''){
name+=' ('+s3_site_lookup(item.instance_type)+')'
}
return $('<li></li>').data('item.autocomplete',item).append('<a>'+name+'</a>').appendTo(ul)
}
$('#%(dummy_input)s').blur(function(){
if(!$('#%(dummy_input)s').val()){
$('#%(real_input)s').val('').change()
%(real_input)s.accept=true
}
if(!%(real_input)s.accept){
$('#%(dummy_input)s').val(%(real_input)s.val)
}else{
%(real_input)s.val=$('#%(dummy_input)s').val()
}
%(real_input)s.accept=false
})''' % dict(dummy_input=dummy_input,
real_input=real_input)))
if value:
# Provide the representation for the current/default Value
text = str(field.represent(default["value"]))
if "<" in text:
# Strip Markup
try:
markup = etree.XML(text)
text = markup.xpath(".//text()")
if text:
text = " ".join(text)
else:
text = ""
except etree.XMLSyntaxError:
pass
represent = text
else:
represent = ""
current.response.s3.jquery_ready.append(js_autocomplete)
return TAG[""](
INPUT(_id=dummy_input,
_class="string",
_value=represent),
IMG(_src="/%s/static/img/ajax-loader.gif" % \
current.request.application,
_height=32, _width=32,
_id="%s_throbber" % dummy_input,
_class="throbber hide"),
INPUT(**attr),
requires = field.requires
)
# -----------------------------------------------------------------------------
def S3GenericAutocompleteTemplate(post_process,
delay,
min_length,
field,
value,
attributes,
source,
name_getter = "function(item){return item.name}",
id_getter = "function(item){return item.id}",
transform_value = lambda value: value,
new_items = False, # Allow new items
tablename = None, # Needed if new_items=True
):
"""
Renders a SELECT as an INPUT field with AJAX Autocomplete
"""
value = transform_value(value)
default = dict(
_type = "text",
value = (value != None and s3_unicode(value)) or "",
)
attr = StringWidget._attributes(field, default, **attributes)
# Hide the real field
attr["_class"] = attr["_class"] + " hide"
real_input = str(field).replace(".", "_")
dummy_input = "dummy_%s" % real_input
js_autocomplete = "".join(('''
var %(real_input)s={val:$('#%(dummy_input)s').val(),accept:false}
var get_name=%(name_getter)s
var get_id=%(id_getter)s
$('#%(dummy_input)s').autocomplete({
source:%(source)s,
delay:%(delay)d,
minLength:%(min_length)d,
search:function(event,ui){
$('#%(dummy_input)s_throbber').removeClass('hide').show()
return true
},
response:function(event,ui,content){
$('#%(dummy_input)s_throbber').hide()
return content
},
focus:function(event,ui){
$('#%(dummy_input)s').val(get_name(ui.item))
return false
},
select:function(event,ui){
var item=ui.item
$('#%(dummy_input)s').val(get_name(ui.item))
$('#%(real_input)s').val(get_id(ui.item)).change()
''' % locals(),
post_process or "",
'''
%(real_input)s.accept=true
return false
}
}).data('autocomplete')._renderItem=function(ul,item){
return $('<li></li>').data('item.autocomplete',item).append('<a>'+get_name(item)+'</a>').appendTo(ul)
}''' % locals(),
'''
$('#%(dummy_input)s').blur(function(){
$('#%(real_input)s').val($('#%(dummy_input)s').val())
})''' % locals() if new_items else
'''
$('#%(dummy_input)s').blur(function(){
if(!$('#%(dummy_input)s').val()){
$('#%(real_input)s').val('').change()
%(real_input)s.accept=true
}
if(!%(real_input)s.accept){
$('#%(dummy_input)s').val(%(real_input)s.val)
}else{
%(real_input)s.val=$('#%(dummy_input)s').val()
}
%(real_input)s.accept=false
})''' % locals()))
if value:
# Provide the representation for the current/default Value
text = s3_unicode(field.represent(default["value"]))
if "<" in text:
# Strip Markup
try:
markup = etree.XML(text)
text = markup.xpath(".//text()")
if text:
text = " ".join(text)
else:
text = ""
except etree.XMLSyntaxError:
pass
represent = text
else:
represent = ""
current.response.s3.jquery_ready.append(js_autocomplete)
return TAG[""](
INPUT(_id=dummy_input,
_class="string",
value=represent),
IMG(_src="/%s/static/img/ajax-loader.gif" % \
current.request.application,
_height=32, _width=32,
_id="%s_throbber" % dummy_input,
_class="throbber hide"),
INPUT(**attr),
requires = field.requires
)
# =============================================================================
class S3LocationDropdownWidget(FormWidget):
"""
Renders a dropdown for an Lx level of location hierarchy
"""
def __init__(self, level="L0", default=None, empty=False):
""" Set Defaults """
self.level = level
self.default = default
self.empty = empty
def __call__(self, field, value, **attributes):
level = self.level
default = self.default
empty = self.empty
s3db = current.s3db
table = s3db.gis_location
query = (table.level == level)
locations = current.db(query).select(table.name,
table.id,
cache=s3db.cache)
opts = []
for location in locations:
opts.append(OPTION(location.name, _value=location.id))
if not value and default and location.name == default:
value = location.id
locations = locations.as_dict()
attr_dropdown = OptionsWidget._attributes(field,
dict(_type = "int",
value = value))
requires = IS_IN_SET(locations)
if empty:
requires = IS_NULL_OR(requires)
attr_dropdown["requires"] = requires
attr_dropdown["represent"] = \
lambda id: locations["id"]["name"] or UNKNOWN_OPT
return TAG[""](
SELECT(*opts, **attr_dropdown),
requires=field.requires
)
# =============================================================================
class S3LocationSelectorWidget(FormWidget):
"""
Renders a gis_location Foreign Key to allow inline display/editing of linked fields.
Designed for use for Resources which require a Specific Location, such as Sites, Persons, Assets, Incidents, etc
Not currently suitable for Resources which require a Hierarchical Location, such as Projects, Assessments, Plans, etc
- S3LocationAutocompleteWidget is more appropriate for these.
Can also be used to transparently wrap simple sites (such as project_site) using the IS_SITE_SELECTOR() validator
It uses s3.locationselector.widget.js to do all client-side functionality.
It requires the IS_LOCATION_SELECTOR() validator to process Location details upon form submission.
Create form
Active Tab: 'Create New Location'
Country Dropdown (to set the Number & Labels of Hierarchy)
Building Name (deployment_setting to hide)
Street Address (Line1/Line2?)
@ToDo: Trigger a geocoder lookup onblur
Postcode
@ToDo: Mode Strict:
Lx as dropdowns. Default label is 'Select previous to populate this dropdown' (Fixme!)
Mode not Strict (default):
L2-L5 as Autocompletes which create missing locations automatically
@ToDo: L1 as Dropdown? (Have a gis_config setting to inform whether this is populated for a given L0)
Map:
@ToDo: Inline or Popup? (Deployment Option?)
Set Map Viewport to default on best currently selected Hierarchy
@ToDo: L1+
Lat Lon
Inactive Tab: 'Select Existing Location'
Needs 2 modes:
Specific Locations only - for Sites/Incidents
@ToDo: Hierarchies ok (can specify which) - for Projects/Documents
@ToDo: Hierarchical Filters above the Search Box
Search is filtered to values shown
Filters default to any hierarchy selected on the Create tab?
Button to explicitly say 'Select this Location' which sets all the fields (inc hidden ID) & the UUID var
Tabs then change to View/Edit
Update form
Update form has uuid set server-side & hence S3.gis.uuid set client-side
Assume location is shared by other resources
Active Tab: 'View Location Details' (Fields are read-only)
Inactive Tab: 'Edit Location Details' (Fields are writable)
@ToDo: Inactive Tab: 'Move Location': Defaults to Searching for an Existing Location, with a button to 'Create New Location'
@see: http://eden.sahanafoundation.org/wiki/BluePrintGISLocationSelector
"""
def __init__(self,
hide_address=False,
site_type=None,
polygon=False):
self.hide_address = hide_address
self.site_type = site_type
self.polygon = polygon
def __call__(self, field, value, **attributes):
T = current.T
db = current.db
s3db = current.s3db
gis = current.gis
auth = current.auth
settings = current.deployment_settings
response = current.response
s3 = current.response.s3
appname = current.request.application
locations = s3db.gis_location
ctable = s3db.gis_config
requires = field.requires
# Main Input
defaults = dict(_type = "text",
value = (value != None and str(value)) or "")
attr = StringWidget._attributes(field, defaults, **attributes)
# Hide the real field
attr["_class"] = "hide"
# Is this a Site?
site = ""
if self.site_type:
# We are acting on a site_id not location_id
# Store the real variables
#site_value = value
#site_field = field
# Ensure that we have a name for the Location visible
settings.gis.building_name = True
# Set the variables to what they would be for a Location
stable = s3db[self.site_type]
field = stable.location_id
if value:
query = (stable.id == value)
record = db(query).select(stable.location_id,
limitby=(0, 1)).first()
if record:
value = record.location_id
defaults = dict(_type = "text",
value = str(value))
else:
raise HTTP(404)
else:
# Check for being a location_id on a site type
# If so, then the JS defaults the Building Name to Site Name
tablename = field.tablename
if tablename in auth.org_site_types:
site = tablename
# Full list of countries (by ID)
countries = gis.get_countries()
# Countries we should select from
_countries = settings.get_gis_countries()
if _countries:
__countries = gis.get_countries(key_type="code")
countrynames = []
append = countrynames.append
for k, v in __countries.iteritems():
if k in _countries:
append(v)
for k, v in countries.iteritems():
if v not in countrynames:
del countries[k]
# Read Options
config = gis.get_config()
default_L0 = Storage()
country_snippet = ""
if value:
default_L0.id = gis.get_parent_country(value)
elif config.default_location_id:
# Populate defaults with IDs & Names of ancestors at each level
defaults = gis.get_parent_per_level(defaults,
config.default_location_id,
feature=None,
ids=True,
names=True)
query = (locations.id == config.default_location_id)
default_location = db(query).select(locations.level,
locations.name).first()
if default_location.level:
# Add this one to the defaults too
defaults[default_location.level] = Storage(name = default_location.name,
id = config.default_location_id)
if "L0" in defaults:
default_L0 = defaults["L0"]
if default_L0:
id = default_L0.id
if id not in countries:
# Add the default country to the list of possibles
countries[id] = defaults["L0"].name
country_snippet = "S3.gis.country = '%s';\n" % \
gis.get_default_country(key_type="code")
elif len(countries) == 1:
default_L0.id = countries.keys()[0]
# Should we use a Map-based selector?
map_selector = settings.get_gis_map_selector()
if map_selector:
no_map = ""
else:
no_map = "S3.gis.no_map = true;\n"
# Should we display LatLon boxes?
latlon_selector = settings.get_gis_latlon_selector()
# Show we display Polygons?
polygon = self.polygon
# Navigate Away Confirm?
if settings.get_ui_navigate_away_confirm():
navigate_away_confirm = '''
S3.navigate_away_confirm=true'''
else:
navigate_away_confirm = ""
# Which tab should the widget open to by default?
# @ToDo: Act on this server-side instead of client-side
if s3.gis.tab:
tab = '''
S3.gis.tab="%s"''' % s3.gis.tab
else:
# Default to Create
tab = ""
# Which Levels do we have in our hierarchy & what are their initial Labels?
# If we have a default country or one from the value then we can lookup
# the labels we should use for that location
country = None
if default_L0:
country = default_L0.id
location_hierarchy = gis.get_location_hierarchy(location=country)
# This is all levels to start, but L0 will be dropped later.
levels = gis.hierarchy_level_keys
map_popup = ""
if value:
# Read current record
if auth.s3_has_permission("update", locations, record_id=value):
# Update mode
# - we assume this location could be shared by other resources
create = "hide" # Hide sections which are meant for create forms
update = ""
query = (locations.id == value)
this_location = db(query).select(locations.uuid,
locations.name,
locations.level,
locations.inherited,
locations.lat,
locations.lon,
locations.addr_street,
locations.addr_postcode,
locations.parent,
locations.path,
locations.wkt,
limitby=(0, 1)).first()
if this_location:
uid = this_location.uuid
level = this_location.level
defaults[level] = Storage()
defaults[level].id = value
if this_location.inherited:
lat = None
lon = None
wkt = None
else:
lat = this_location.lat
lon = this_location.lon
wkt = this_location.wkt
addr_street = this_location.addr_street or ""
#addr_street_encoded = ""
#if addr_street:
# addr_street_encoded = addr_street.replace("\r\n",
# "%0d").replace("\r",
# "%0d").replace("\n",
# "%0d")
postcode = this_location.addr_postcode
parent = this_location.parent
path = this_location.path
# Populate defaults with IDs & Names of ancestors at each level
defaults = gis.get_parent_per_level(defaults,
value,
feature=this_location,
ids=True,
names=True)
# If we have a non-specific location then not all keys will be populated.
# Populate these now:
for l in levels:
try:
defaults[l]
except:
defaults[l] = None
if level and not level == "XX":
# If within the locations hierarchy then don't populate the visible name box
represent = ""
else:
represent = this_location.name
if map_selector:
zoom = config.zoom
if zoom == None:
zoom = 1
if lat is None or lon is None:
map_lat = config.lat
map_lon = config.lon
else:
map_lat = lat
map_lon = lon
query = (locations.id == value)
row = db(query).select(locations.lat,
locations.lon,
limitby=(0, 1)).first()
if row:
feature = {"lat" : row.lat,
"lon" : row.lon }
features = [feature]
else:
features = []
map_popup = gis.show_map(
lat = map_lat,
lon = map_lon,
# Same as a single zoom on a cluster
zoom = zoom + 2,
features = features,
add_feature = True,#False,#True
add_feature_active = not polygon,#False,#not polygon,
add_polygon = polygon,#False,#polygon
add_polygon_active = polygon,#False,#polygon,
toolbar = True,
collapsed = True,
search = True,
window = True,
window_hide = True,
location_selector = True
)
else:
# Bad location_id
response.error = T("Invalid Location!")
value = None
elif auth.s3_has_permission("read", locations, record_id=value):
# Read mode
# @ToDo
return ""
else:
# No Permission to read location, so don't render a row
return ""
if not value:
# No default value
# Check that we're allowed to create records
if auth.s3_has_permission("update", locations):
# Create mode
create = ""
update = "hide" # Hide sections which are meant for update forms
uuid = ""
represent = ""
level = None
lat = None
lon = None
wkt = None
addr_street = ""
#addr_street_encoded = ""
postcode = ""
if map_selector:
map_popup = gis.show_map(
add_feature = True,
add_feature_active =not polygon, #False,#not polygon,
add_polygon =polygon, #False,#polygon,
add_polygon_active = polygon,#False,#polygon,
toolbar = True,
collapsed = True,
search = True,
window = True,
window_hide = True,
location_selector = True
)
else:
# No Permission to create a location, so don't render a row
return ""
# JS snippets of config
# (we only include items with data)
s3_gis_lat_lon = ""
# Components to inject into Form
divider = TR(TD(_class="subheading"),
_class="box_bottom locselect")
expand_button = DIV(_id="gis_location_expand", _class="expand")
label_row = TR(TD(expand_button, B("%s:" % field.label)),
_id="gis_location_label_row",
_class="box_top")
# Tabs to select between the modes
# @ToDo: Move Location tab
view_button = A(T("View Location Details"),
_style="cursor:pointer; cursor:hand",
_id="gis_location_view-btn")
edit_button = A(T("Edit Location Details"),
_style="cursor:pointer; cursor:hand",
_id="gis_location_edit-btn")
add_button = A(T("Create New Location"),
_style="cursor:pointer; cursor:hand",
_id="gis_location_add-btn")
search_button = A(T("Select Existing Location"),
_style="cursor:pointer; cursor:hand",
_id="gis_location_search-btn")
tabs = DIV(SPAN(add_button, _id="gis_loc_add_tab",
_class="tab_here %s" % create),
SPAN(search_button, _id="gis_loc_search_tab",
_class="tab_last %s" % create),
SPAN(view_button, _id="gis_loc_view_tab",
_class="tab_here %s" % update),
SPAN(edit_button, _id="gis_loc_edit_tab",
_class="tab_last %s" % update),
_class="tabs")
tab_rows = TR(TD(tabs), TD(),
_id="gis_location_tabs_row",
_class="locselect box_middle")
# L0 selector
SELECT_COUNTRY = T("Choose country")
level = "L0"
L0_rows = ""
if len(countries) == 1:
# Hard-coded country
id = countries.items()[0][0]
L0_rows = INPUT(value = id,
_id="gis_location_%s" % level,
_name="gis_location_%s" % level,
_class="hide box_middle")
else:
if default_L0:
attr_dropdown = OptionsWidget._attributes(field,
dict(_type = "int",
value = default_L0.id),
**attributes)
else:
attr_dropdown = OptionsWidget._attributes(field,
dict(_type = "int",
value = ""),
**attributes)
attr_dropdown["requires"] = \
IS_NULL_OR(IS_IN_SET(countries,
zero = SELECT_COUNTRY))
attr_dropdown["represent"] = \
lambda id: gis.get_country(id) or UNKNOWN_OPT
opts = [OPTION(SELECT_COUNTRY, _value="")]
if countries:
for (id, name) in countries.iteritems():
opts.append(OPTION(name, _value=id))
attr_dropdown["_id"] = "gis_location_%s" % level
## Old: Need to blank the name to prevent it from appearing in form.vars & requiring validation
#attr_dropdown["_name"] = ""
attr_dropdown["_name"] = "gis_location_%s" % level
if value:
# Update form => read-only
attr_dropdown["_disabled"] = "disabled"
try:
attr_dropdown["value"] = defaults[level].id
except:
pass
widget = SELECT(*opts, **attr_dropdown)
label = LABEL("%s:" % location_hierarchy[level])
L0_rows = DIV(TR(TD(label), TD(),
_class="locselect box_middle",
_id="gis_location_%s_label__row" % level),
TR(TD(widget), TD(),
_class="locselect box_middle",
_id="gis_location_%s__row" % level))
row = TR(INPUT(_id="gis_location_%s_search" % level,
_disabled="disabled"), TD(),
_class="hide locselect box_middle",
_id="gis_location_%s_search__row" % level)
L0_rows.append(row)
if self.site_type:
NAME_LABEL = T("Site Name")
else:
NAME_LABEL = T("Building Name")
STREET_LABEL = T("Street Address")
POSTCODE_LABEL = settings.get_ui_label_postcode()
LAT_LABEL = T("Latitude")
LON_LABEL = T("Longitude")
AUTOCOMPLETE_HELP = T("Enter some characters to bring up a list of possible matches")
NEW_HELP = T("If not found, you can have a new location created.")
def ac_help_widget(level):
try:
label = location_hierarchy[level]
except:
label = level
return DIV(_class="tooltip",
_title="%s|%s|%s" % (label, AUTOCOMPLETE_HELP, NEW_HELP))
hidden = ""
throbber = "/%s/static/img/ajax-loader.gif" % appname
Lx_rows = DIV()
if value:
# Display Read-only Fields
name_widget = INPUT(value=represent,
_id="gis_location_name",
_name="gis_location_name",
_disabled="disabled")
street_widget = TEXTAREA(value=addr_street,
_id="gis_location_street",
_class="text",
_name="gis_location_street",
_disabled="disabled")
postcode_widget = INPUT(value=postcode,
_id="gis_location_postcode",
_name="gis_location_postcode",
_disabled="disabled")
lat_widget = S3LatLonWidget("lat",
disabled=True).widget(value=lat)
lon_widget = S3LatLonWidget("lon",
switch_button=True,
disabled=True).widget(value=lon)
for level in levels:
if level == "L0":
# L0 has been handled as special case earlier
continue
elif level not in location_hierarchy:
# Skip levels not in hierarchy
continue
if defaults[level]:
id = defaults[level].id
name = defaults[level].name
else:
# Hide empty levels
hidden = "hide"
id = ""
name = ""
try:
label = LABEL("%s:" % location_hierarchy[level])
except:
label = LABEL("%s:" % level)
row = TR(TD(label), TD(),
_id="gis_location_%s_label__row" % level,
_class="%s locselect box_middle" % hidden)
Lx_rows.append(row)
widget = DIV(INPUT(value=id,
_id="gis_location_%s" % level,
_name="gis_location_%s" % level,
_class="hide"),
INPUT(value=name,
_id="gis_location_%s_ac" % level,
_disabled="disabled"),
IMG(_src=throbber,
_height=32, _width=32,
_id="gis_location_%s_throbber" % level,
_class="throbber hide"))
row = TR(TD(widget), TD(),
_id="gis_location_%s__row" % level,
_class="%s locselect box_middle" % hidden)
Lx_rows.append(row)
else:
name_widget = INPUT(_id="gis_location_name",
_name="gis_location_name")
street_widget = TEXTAREA(_id="gis_location_street",
_class="text",
_name="gis_location_street")
postcode_widget = INPUT(_id="gis_location_postcode",
_name="gis_location_postcode")
lat_widget = S3LatLonWidget("lat").widget()
lon_widget = S3LatLonWidget("lon", switch_button=True).widget()
for level in levels:
hidden = ""
if level == "L0":
# L0 has been handled as special case earlier
continue
elif level not in location_hierarchy:
# Hide unused levels
# (these can then be enabled for other regions)
hidden = "hide"
try:
label = LABEL("%s:" % location_hierarchy[level])
except:
label = LABEL("%s:" % level)
row = TR(TD(label), TD(),
_class="%s locselect box_middle" % hidden,
_id="gis_location_%s_label__row" % level)
Lx_rows.append(row)
if level in defaults and defaults[level]:
default = defaults[level]
default_id = default.id
default_name = default.name
else:
default_id = ""
default_name = ""
widget = DIV(INPUT(value=default_id,
_id="gis_location_%s" % level,
_name="gis_location_%s" % level,
_class="hide"),
INPUT(value=default_name,
_id="gis_location_%s_ac" % level,
_class="%s" % hidden),
IMG(_src=throbber,
_height=32, _width=32,
_id="gis_location_%s_throbber" % level,
_class="throbber hide"))
row = TR(TD(widget),
TD(ac_help_widget(level)),
_class="%s locselect box_middle" % hidden,
_id="gis_location_%s__row" % level)
Lx_rows.append(row)
row = TR(INPUT(_id="gis_location_%s_search" % level,
_disabled="disabled"), TD(),
_class="hide locselect box_middle",
_id="gis_location_%s_search__row" % level)
Lx_rows.append(row)
hide_address = self.hide_address
if settings.get_gis_building_name():
hidden = ""
if hide_address:
hidden = "hide"
elif value and not represent:
hidden = "hide"
name_rows = DIV(TR(LABEL("%s:" % NAME_LABEL), TD(),
_id="gis_location_name_label__row",
_class="%s locselect box_middle" % hidden),
TR(name_widget, TD(),
_id="gis_location_name__row",
_class="%s locselect box_middle" % hidden),
TR(INPUT(_id="gis_location_name_search",
_disabled="disabled"), TD(),
_id="gis_location_name_search__row",
_class="hide locselect box_middle"))
else:
name_rows = ""
hidden = ""
if hide_address:
hidden = "hide"
elif value and not addr_street:
hidden = "hide"
street_rows = DIV(TR(LABEL("%s:" % STREET_LABEL), TD(),
_id="gis_location_street_label__row",
_class="%s locselect box_middle" % hidden),
TR(street_widget, TD(),
_id="gis_location_street__row",
_class="%s locselect box_middle" % hidden),
TR(INPUT(_id="gis_location_street_search",
_disabled="disabled"), TD(),
_id="gis_location_street_search__row",
_class="hide locselect box_middle"))
if config.geocoder:
geocoder = '''
S3.gis.geocoder=true'''
else:
geocoder = ""
hidden = ""
if hide_address:
hidden = "hide"
elif value and not postcode:
hidden = "hide"
postcode_rows = DIV(TR(LABEL("%s:" % POSTCODE_LABEL), TD(),
_id="gis_location_postcode_label__row",
_class="%s locselect box_middle" % hidden),
TR(postcode_widget, TD(),
_id="gis_location_postcode__row",
_class="%s locselect box_middle" % hidden),
TR(INPUT(_id="gis_location_postcode_search",
_disabled="disabled"), TD(),
_id="gis_location_postcode_search__row",
_class="hide locselect box_middle"))
hidden = ""
no_latlon = ""
if not latlon_selector:
hidden = "hide"
no_latlon = '''S3.gis.no_latlon=true\n'''
elif value and lat is None:
hidden = "hide"
latlon_help = locations.lat.comment
converter_button = locations.lon.comment
converter_button = ""
latlon_rows = DIV(TR(LABEL("%s:" % LAT_LABEL), TD(),
_id="gis_location_lat_label__row",
_class="%s locselect box_middle" % hidden),
TR(TD(lat_widget), TD(latlon_help),
_id="gis_location_lat__row",
_class="%s locselect box_middle" % hidden),
TR(INPUT(_id="gis_location_lat_search",
_disabled="disabled"), TD(),
_id="gis_location_lat_search__row",
_class="hide locselect box_middle"),
TR(LABEL("%s:" % LON_LABEL), TD(),
_id="gis_location_lon_label__row",
_class="%s locselect box_middle" % hidden),
TR(TD(lon_widget), TD(converter_button),
_id="gis_location_lon__row",
_class="%s locselect box_middle" % hidden),
TR(INPUT(_id="gis_location_lon_search",
_disabled="disabled"), TD(),
_id="gis_location_lon_search__row",
_class="hide locselect box_middle"))
# Map Selector
PLACE_ON_MAP = T("Place on Map")
VIEW_ON_MAP = T("View on Map")
if map_selector:
if value:
map_button = A(VIEW_ON_MAP,
_style="cursor:pointer; cursor:hand",
_id="gis_location_map-btn",
_class="action-btn")
else:
map_button = A(PLACE_ON_MAP,
_style="cursor:pointer; cursor:hand",
_id="gis_location_map-btn",
_class="action-btn")
map_button_row = TR(map_button, TD(),
_id="gis_location_map_button_row",
_class="locselect box_middle")
else:
map_button_row = ""
# Search
widget = DIV(INPUT(_id="gis_location_search_ac"),
IMG(_src=throbber,
_height=32, _width=32,
_id="gis_location_search_throbber",
_class="throbber hide"),
_id="gis_location_search_div")
label = LABEL("%s:" % AUTOCOMPLETE_HELP)
select_button = A(T("Select This Location"),
_style="cursor:pointer; cursor:hand",
_id="gis_location_search_select-btn",
_class="hide action-btn")
search_rows = DIV(TR(label, TD(),
_id="gis_location_search_label__row",
_class="hide locselect box_middle"),
TR(TD(widget),
TD(select_button),
_id="gis_location_search__row",
_class="hide locselect box_middle"))
# @ToDo: Hierarchical Filter
Lx_search_rows = ""
# Error Messages
NAME_REQUIRED = T("Name field is required!")
COUNTRY_REQUIRED = T("Country is required!")
# Settings to be read by static/scripts/S3/s3.locationselector.widget.js
# Note: Currently we're limited to a single location selector per page
js_location_selector = '''
%s%s%s%s%s%s
S3.gis.location_id='%s'
S3.gis.site='%s'
i18n.gis_place_on_map='%s'
i18n.gis_view_on_map='%s'
i18n.gis_name_required='%s'
i18n.gis_country_required="%s"''' % (country_snippet,
geocoder,
navigate_away_confirm,
no_latlon,
no_map,
tab,
attr["_id"], # Name of the real location or site field
site,
PLACE_ON_MAP,
VIEW_ON_MAP,
NAME_REQUIRED,
COUNTRY_REQUIRED
)
s3.js_global.append(js_location_selector)
if s3.debug:
script = "s3.locationselector.widget.js"
else:
script = "s3.locationselector.widget.min.js"
s3.scripts.append("/%s/static/scripts/S3/%s" % (appname, script))
if self.polygon:
hidden = ""
if value:
# Display read-only view
wkt_widget = TEXTAREA(value = wkt,
_class="wkt-input",
_id="gis_location_wkt",
_name="gis_location_wkt",
_disabled="disabled")
if wkt:
hidden = "hide"
else:
wkt_widget = TEXTAREA(_class="wkt-input",
_id="gis_location_wkt",
_name="gis_location_wkt")
wkt_input_row = TAG[""](
TR(TD(LABEL("%s (WGS84)" % T("Polygon"))),
TD(),
_id="gis_location_wkt_label__row",
_class="box_middle %s" % hidden),
TR(
TD(wkt_widget),
TD(),
_id="gis_location_wkt__row",
_class="box_middle %s" % hidden)
)
else:
wkt_input_row = ""
# The overall layout of the components
return TAG[""](
TR(INPUT(**attr)), # Real input, which is hidden
label_row,
tab_rows,
Lx_search_rows,
search_rows,
L0_rows,
name_rows,
street_rows,
postcode_rows,
Lx_rows,
wkt_input_row,
map_button_row,
latlon_rows,
divider,
TR(map_popup, TD(), _class="box_middle"),
requires=requires
)
# =============================================================================
class S3LatLonWidget(DoubleWidget):
"""
Widget for latitude or longitude input, gives option to input in terms
of degrees, minutes and seconds
"""
def __init__(self, type, switch_button=False, disabled=False):
self._id = "gis_location_%s" % type
self._name = self._id
self.disabled = disabled
self.switch_button = switch_button
def widget(self, field=None, value=None):
T = current.T
s3 = current.response.s3
attr = dict(value=value,
_class="decimal %s" % self._class,
_id=self._id,
_name=self._name)
attr_dms = dict()
if self.disabled:
attr["_disabled"] = "disabled"
attr_dms["_disabled"] = "disabled"
dms_boxes = SPAN(
INPUT(_class="degrees", **attr_dms), "° ",
INPUT(_class="minutes", **attr_dms), "' ",
INPUT(_class="seconds", **attr_dms), "\" ",
["",
DIV(A(T("Use decimal"),
_class="action-btn gis_coord_switch_decimal"))
][self.switch_button],
_style="display: none;",
_class="gis_coord_dms"
)
decimal = SPAN(
INPUT(**attr),
["",
DIV(A(T("Use deg, min, sec"),
_class="action-btn gis_coord_switch_dms"))
][self.switch_button],
_class="gis_coord_decimal"
)
if not s3.lat_lon_i18n_appended:
s3.js_global.append('''
i18n.gis_only_numbers={degrees:'%s',minutes:'%s',seconds:'%s',decimal:'%s'}
i18n.gis_range_error={degrees:{lat:'%s',lon:'%s'},minutes:'%s',seconds:'%s',decimal:{lat:'%s',lon:'%s'}}
''' % (T("Degrees must be a number."),
T("Minutes must be a number."),
T("Seconds must be a number."),
T("Degrees must be a number."),
T("Degrees in a latitude must be between -90 to 90."),
T("Degrees in a longitude must be between -180 to 180."),
T("Minutes must be less than 60."),
T("Seconds must be less than 60."),
T("Latitude must be between -90 and 90."),
T("Longitude must be between -180 and 180.")))
s3.lat_lon_i18n_appended = True
if s3.debug and \
(not "S3/locationselector.widget.css" in s3.stylesheets):
s3.stylesheets.append("S3/locationselector.widget.css")
if (field == None):
return SPAN(decimal,
dms_boxes,
_class="gis_coord_wrap")
else:
return SPAN(
decimal,
dms_boxes,
*controls,
requires = field.requires,
_class="gis_coord_wrap"
)
# =============================================================================
class S3CheckboxesWidget(OptionsWidget):
"""
Generates a TABLE tag with <num_column> columns of INPUT
checkboxes (multiple allowed)
help_lookup_table_name_field will display tooltip help
:param lookup_table_name: int -
:param lookup_field_name: int -
:param multple: int -
:param options: list - optional -
value,text pairs for the Checkboxs -
If options = None, use options from requires.options().
This argument is useful for displaying a sub-set of the requires.options()
:param num_column: int -
:param help_lookup_field_name: string - optional -
:param help_footer: string -
Currently unused
"""
def __init__(self,
lookup_table_name = None,
lookup_field_name = None,
multiple = False,
options = None,
num_column = 1,
help_lookup_field_name = None,
help_footer = None
):
self.lookup_table_name = lookup_table_name
self.lookup_field_name = lookup_field_name
self.multiple = multiple
self.options = options
self.num_column = num_column
self.help_lookup_field_name = help_lookup_field_name
self.help_footer = help_footer
# -------------------------------------------------------------------------
def widget(self,
field,
value = None
):
if current.db:
db = current.db
else:
db = field._db
lookup_table_name = self.lookup_table_name
lookup_field_name = self.lookup_field_name
if lookup_table_name and lookup_field_name:
requires = IS_NULL_OR(IS_IN_DB(db,
db[lookup_table_name].id,
"%(" + lookup_field_name + ")s",
multiple = multiple))
else:
requires = self.requires
options = self.options
if not options:
if hasattr(requires, "options"):
options = requires.options()
else:
raise SyntaxError, "widget cannot determine options of %s" % field
values = s3_split_multi_value(value)
attr = OptionsWidget._attributes(field, {})
num_column = self.num_column
num_row = len(options) / num_column
# Ensure division rounds up
if len(options) % num_column > 0:
num_row = num_row +1
table = TABLE(_id = str(field).replace(".", "_"))
append = table.append
for i in range(0, num_row):
table_row = TR()
for j in range(0, num_column):
# Check that the index is still within options
index = num_row * j + i
if index < len(options):
input_options = {}
input_options = dict(requires = attr.get("requires", None),
_value = str(options[index][0]),
value = values,
_type = "checkbox",
_name = field.name,
hideerror = True
)
tip_attr = {}
help_text = ""
if self.help_lookup_field_name:
help_text = str(P(s3_get_db_field_value(tablename = lookup_table_name,
fieldname = self.help_lookup_field_name,
look_up_value = options[index][0],
look_up_field = "id")))
if self.help_footer:
help_text = help_text + str(self.help_footer)
if help_text:
tip_attr = dict(_class = "s3_checkbox_label",
#_title = options[index][1] + "|" + help_text
_rel = help_text
)
#table_row.append(TD(A(options[index][1],**option_attr )))
table_row.append(TD(INPUT(**input_options),
SPAN(options[index][1], **tip_attr)
)
)
append(table_row)
if self.multiple:
append(TR(I("(Multiple selections allowed)")))
return table
# -------------------------------------------------------------------------
def represent(self,
value):
list = [s3_get_db_field_value(tablename = lookup_table_name,
fieldname = lookup_field_name,
look_up_value = id,
look_up_field = "id")
for id in s3_split_multi_value(value) if id]
if list and not None in list:
return ", ".join(list)
else:
return None
# =============================================================================
class S3MultiSelectWidget(MultipleOptionsWidget):
"""
Standard MultipleOptionsWidget, but using the jQuery UI:
http://www.quasipartikel.at/multiselect/
static/scripts/ui.multiselect.js
"""
def __init__(self):
pass
def __call__(self, field, value, **attributes):
T = current.T
s3 = current.response.s3
selector = str(field).replace(".", "_")
s3.js_global.append('''
i18n.addAll='%s'
i18n.removeAll='%s'
i18n.itemsCount='%s'
i18n.search='%s'
''' % (T("Add all"),
T("Remove all"),
T("items selected"),
T("search")))
s3.jquery_ready.append('''
$('#%s').removeClass('list')
$('#%s').addClass('multiselect')
$('#%s').multiselect({
dividerLocation:0.5,
sortable:false
})
''' % (selector,
selector,
selector))
return TAG[""](
MultipleOptionsWidget.widget(field, value, **attributes),
requires = field.requires
)
# =============================================================================
class S3ACLWidget(CheckboxesWidget):
"""
Widget class for ACLs
@todo: add option dependency logic (JS)
@todo: configurable vertical/horizontal alignment
"""
@staticmethod
def widget(field, value, **attributes):
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
if hasattr(requires[0], "options"):
options = requires[0].options()
values = []
for k in options:
if isinstance(k, (list, tuple)):
k = k[0]
try:
flag = int(k)
if flag == 0:
if value == 0:
values.append(k)
break
else:
continue
elif value and value & flag == flag:
values.append(k)
except ValueError:
pass
value = values
#return CheckboxesWidget.widget(field, value, **attributes)
attr = OptionsWidget._attributes(field, {}, **attributes)
options = [(k, v) for k, v in options if k != ""]
opts = []
cols = attributes.get("cols", 1)
totals = len(options)
mods = totals%cols
rows = totals/cols
if mods:
rows += 1
for r_index in range(rows):
tds = []
for k, v in options[r_index*cols:(r_index+1)*cols]:
tds.append(TD(INPUT(_type="checkbox",
_name=attr.get("_name", field.name),
requires=attr.get("requires", None),
hideerror=True, _value=k,
value=(k in value)), v))
opts.append(TR(tds))
if opts:
opts[-1][0][0]["hideerror"] = False
return TABLE(*opts, **attr)
# was values = re.compile("[\w\-:]+").findall(str(value))
#values = not isinstance(value,(list,tuple)) and [value] or value
#requires = field.requires
#if not isinstance(requires, (list, tuple)):
#requires = [requires]
#if requires:
#if hasattr(requires[0], "options"):
#options = requires[0].options()
#else:
#raise SyntaxError, "widget cannot determine options of %s" \
#% field
# =============================================================================
class CheckboxesWidgetS3(OptionsWidget):
"""
S3 version of gluon.sqlhtml.CheckboxesWidget:
- supports also integer-type keys in option sets
- has an identifiable class
Used in Sync, Projects
"""
@staticmethod
def widget(field, value, **attributes):
"""
generates a TABLE tag, including INPUT checkboxes (multiple allowed)
see also: :meth:`FormWidget.widget`
"""
#values = re.compile("[\w\-:]+").findall(str(value))
values = not isinstance(value, (list, tuple)) and [value] or value
values = [str(v) for v in values]
attr = OptionsWidget._attributes(field, {}, **attributes)
attr["_class"] = "checkboxes-widget-s3"
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
if hasattr(requires[0], "options"):
options = requires[0].options()
else:
raise SyntaxError, "widget cannot determine options of %s" \
% field
options = [(k, v) for k, v in options if k != ""]
options_help = attributes.get("options_help", {})
input_index = attributes.get("start_at", 0)
opts = []
cols = attributes.get("cols", 1)
totals = len(options)
mods = totals % cols
rows = totals / cols
if mods:
rows += 1
if totals == 0:
T = current.T
opts.append(TR(TD(SPAN(T("no options available"),
_class="no-options-available"),
INPUT(_type="hide",
_name=field.name,
_value=None))))
for r_index in range(rows):
tds = []
for k, v in options[r_index * cols:(r_index + 1) * cols]:
input_id = "id-%s-%s" % (field.name, input_index)
option_help = options_help.get(str(k), "")
if option_help:
label = LABEL(v, _for=input_id, _title=option_help)
else:
# Don't provide empty client-side popups
label = LABEL(v, _for=input_id)
tds.append(TD(INPUT(_type="checkbox",
_name=field.name,
_id=input_id,
requires=attr.get("requires", None),
hideerror=True,
_value=k,
value=(str(k) in values)),
label))
input_index += 1
opts.append(TR(tds))
if opts:
opts[-1][0][0]["hideerror"] = False
return TABLE(*opts, **attr)
# =============================================================================
class S3AddPersonWidget(FormWidget):
"""
Renders a person_id field as a Create Person form,
with an embedded Autocomplete to select existing people.
It relies on JS code in static/S3/s3.select_person.js
"""
def __init__(self,
controller = None,
select_existing = True):
# Controller to retrieve the person record
self.controller = controller
self.select_existing = select_existing
def __call__(self, field, value, **attributes):
T = current.T
request = current.request
appname = request.application
s3 = current.response.s3
formstyle = s3.crud.formstyle
# Main Input
real_input = str(field).replace(".", "_")
default = dict(_type = "text",
value = (value != None and str(value)) or "")
attr = StringWidget._attributes(field, default, **attributes)
attr["_class"] = "hide"
if self.select_existing:
_class ="box_top"
else:
_class = "hide"
if self.controller is None:
controller = request.controller
else:
controller = self.controller
# Select from registry buttons
select_row = TR(TD(A(T("Select from registry"),
_href="#",
_id="select_from_registry",
_class="action-btn"),
A(T("Remove selection"),
_href="#",
_onclick="clear_person_form();",
_id="clear_form_link",
_class="action-btn hide",
_style="padding-left:15px;"),
A(T("Edit Details"),
_href="#",
_onclick="edit_selected_person_form();",
_id="edit_selected_person_link",
_class="action-btn hide",
_style="padding-left:15px;"),
IMG(_src="/%s/static/img/ajax-loader.gif" % appname,
_height=32,
_width=32,
_id="person_load_throbber",
_class="throbber hide",
_style="padding-left:85px;"),
_class="w2p_fw"),
TD(),
_id="select_from_registry_row",
_class=_class,
_controller=controller,
_field=real_input,
_value=str(value))
# Autocomplete
select = '''select_person($('#%s').val())''' % real_input
widget = S3PersonAutocompleteWidget(post_process=select)
ac_row = TR(TD(LABEL("%s: " % T("Name"),
_class="hide",
_id="person_autocomplete_label"),
widget(field,
None,
_class="hide")),
TD(),
_id="person_autocomplete_row",
_class="box_top")
# Embedded Form
s3db = current.s3db
ptable = s3db.pr_person
ctable = s3db.pr_contact
fields = [ptable.first_name,
ptable.middle_name,
ptable.last_name,
ptable.date_of_birth,
ptable.gender]
if controller == "hrm":
emailRequired = current.deployment_settings.get_hrm_email_required()
elif controller == "vol":
fields.append(s3db.pr_person_details.occupation)
emailRequired = current.deployment_settings.get_hrm_email_required()
else:
emailRequired = False
if emailRequired:
validator = IS_EMAIL()
else:
validator = IS_NULL_OR(IS_EMAIL())
fields.extend([Field("email",
notnull=emailRequired,
requires=validator,
label=T("Email Address")),
Field("mobile_phone",
label=T("Mobile Phone Number"))])
labels, required = s3_mark_required(fields)
if required:
s3.has_required = True
form = SQLFORM.factory(table_name="pr_person",
labels=labels,
formstyle=formstyle,
upload="default/download",
separator = "",
*fields)
trs = []
for tr in form[0]:
if not tr.attributes["_id"].startswith("submit_record"):
if "_class" in tr.attributes:
tr.attributes["_class"] = "%s box_middle" % \
tr.attributes["_class"]
else:
tr.attributes["_class"] = "box_middle"
trs.append(tr)
table = DIV(*trs)
# Divider
divider = TR(TD(_class="subheading"),
TD(),
_class="box_bottom")
# JavaScript
if s3.debug:
script = "s3.select_person.js"
else:
script = "s3.select_person.min.js"
s3.scripts.append("/%s/static/scripts/S3/%s" % (appname, script))
# Overall layout of components
return TAG[""](select_row,
ac_row,
table,
divider)
# =============================================================================
class S3AutocompleteOrAddWidget(FormWidget):
"""
This widget searches for or adds an object. It contains:
- an autocomplete field which can be used to search for an existing object.
- an add widget which is used to add an object.
It fills the field with that object after successful addition
"""
def __init__(self,
autocomplete_widget,
add_widget
):
self.autocomplete_widget = autocomplete_widget
self.add_widget = add_widget
def __call__(self, field, value, **attributes):
return TAG[""](
# this does the input field
self.autocomplete_widget(field, value, **attributes),
# this can fill it if it isn't autocompleted
self.add_widget(field, value, **attributes)
)
# =============================================================================
class S3AddObjectWidget(FormWidget):
"""
This widget displays an inline form loaded via AJAX on demand.
In the browser:
A load request must made to this widget to enable it.
The load request must include:
- a URL for the form
after a successful submission, the response callback is handed the
response.
"""
def __init__(self,
form_url,
table_name,
dummy_field_selector,
on_show,
on_hide
):
self.form_url = form_url
self.table_name = table_name
self.dummy_field_selector = dummy_field_selector
self.on_show = on_show
self.on_hide = on_hide
def __call__(self, field, value, **attributes):
T = current.T
s3 = current.response.s3
if s3.debug:
script_name = "/%s/static/scripts/jquery.ba-resize.js"
else:
script_name = "/%s/static/scripts/jquery.ba-resize.min.js"
if script_name not in s3.scripts:
s3.scripts.append(script_name)
return TAG[""](
# @ToDo: this might be better moved to its own script.
SCRIPT('''
$(function () {
var form_field = $('#%(form_field_name)s')
var throbber = $('<div id="%(form_field_name)s_ajax_throbber" class="ajax_throbber"/>')
throbber.hide()
throbber.insertAfter(form_field)
function request_add_form() {
throbber.show()
var dummy_field = $('%(dummy_field_selector)s')
// create an element for the form
var form_iframe = document.createElement('iframe')
var $form_iframe = $(form_iframe)
$form_iframe.attr('id', '%(form_field_name)s_form_iframe')
$form_iframe.attr('frameborder', '0')
$form_iframe.attr('scrolling', 'no')
$form_iframe.attr('src', '%(form_url)s')
var initial_iframe_style = {
width: add_object_link.width(),
height: add_object_link.height()
}
$form_iframe.css(initial_iframe_style)
function close_iframe() {
$form_iframe.unload()
form_iframe.contentWindow.close()
//iframe_controls.remove()
$form_iframe.animate(
initial_iframe_style,
{
complete: function () {
$form_iframe.remove()
add_object_link.show()
%(on_hide)s
dummy_field.show()
}
}
)
}
function reload_iframe() {
form_iframe.contentWindow.location.reload(true)
}
function resize_iframe_to_fit_content() {
var form_iframe_content = $form_iframe.contents().find('body');
// do first animation smoothly
$form_iframe.animate(
{
height: form_iframe_content.outerHeight(true),
width: 500
},
{
duration: jQuery.resize.delay,
complete: function () {
// iframe's own animations should be instant, as they
// have their own smoothing (e.g. expanding error labels)
function resize_iframe_to_fit_content_immediately() {
$form_iframe.css({
height: form_iframe_content.outerHeight(true),
width:500
})
}
// if the iframe content resizes, resize the iframe
// this depends on Ben Alman's resize plugin
form_iframe_content.bind(
'resize',
resize_iframe_to_fit_content_immediately
)
// when unloading, unbind the resizer (remove poller)
$form_iframe.bind(
'unload',
function () {
form_iframe_content.unbind(
'resize',
resize_iframe_to_fit_content_immediately
)
//iframe_controls.hide()
}
)
// there may have been content changes during animation
// so resize to make sure they are shown.
form_iframe_content.resize()
//iframe_controls.show()
%(on_show)s
}
}
)
}
function iframe_loaded() {
dummy_field.hide()
resize_iframe_to_fit_content()
form_iframe.contentWindow.close_iframe = close_iframe
throbber.hide()
}
$form_iframe.bind('load', iframe_loaded)
function set_object_id() {
// the server must give the iframe the object
// id of the created object for the field
// the iframe must also close itself.
var created_object_representation = form_iframe.contentWindow.created_object_representation
if (created_object_representation) {
dummy_field.val(created_object_representation)
}
var created_object_id = form_iframe.contentWindow.created_object_id
if (created_object_id) {
form_field.val(created_object_id)
close_iframe()
}
}
$form_iframe.bind('load', set_object_id)
add_object_link.hide()
/*
var iframe_controls = $('<span class="iframe_controls" style="float:right; text-align:right;"></span>')
iframe_controls.hide()
var close_button = $('<a>%(Close)s </a>')
close_button.click(close_iframe)
var reload_button = $('<a>%(Reload)s </a>')
reload_button.click(reload_iframe)
iframe_controls.append(close_button)
iframe_controls.append(reload_button)
iframe_controls.insertBefore(add_object_link)
*/
$form_iframe.insertAfter(add_object_link)
}
var add_object_link = $('<a>%(Add)s</a>')
add_object_link.click(request_add_form)
add_object_link.insertAfter(form_field)
})''' % dict(
field_name = field.name,
form_field_name = "_".join((self.table_name, field.name)),
form_url = self.form_url,
dummy_field_selector = self.dummy_field_selector(self.table_name, field.name),
on_show = self.on_show,
on_hide = self.on_hide,
Add = T("Add..."),
Reload = T("Reload"),
Close = T("Close"),
)
)
)
# =============================================================================
class S3SearchAutocompleteWidget(FormWidget):
"""
Uses the s3Search Module
"""
def __init__(self,
tablename,
represent,
get_fieldname = "id",
):
self.get_fieldname = get_fieldname
self.tablename = tablename
self.represent = represent
def __call__(self, field, value, **attributes):
request = current.request
response = current.response
session = current.session
tablename = self.tablename
modulename, resourcename = tablename.split("_", 1)
attributes["is_autocomplete"] = True
attributes["fieldname"] = field.name
attributes["get_fieldname"] = self.get_fieldname
# Display in the simple search widget
if value:
attributes["value"] = self.represent(value)
else:
attributes["value"] = ""
r = s3_request(modulename, resourcename, args=[])
search_div = r.resource.search( r, **attributes)["form"]
hidden_input = INPUT(value = value or "",
requires = field.requires,
_id = "%s_%s" % (tablename, field.name),
_class = "hide hide_input",
_name = field.name,
)
return TAG[""](
search_div,
hidden_input
)
# =============================================================================
class S3TimeIntervalWidget(FormWidget):
"""
Simple time interval widget for the scheduler task table
"""
multipliers = (("weeks", 604800),
("days", 86400),
("hours", 3600),
("minutes", 60),
("seconds", 1))
@staticmethod
def widget(field, value, **attributes):
multipliers = S3TimeIntervalWidget.multipliers
if value is None:
value = 0
if value == 0:
multiplier = 1
else:
for m in multipliers:
multiplier = m[1]
if int(value) % multiplier == 0:
break
options = []
for i in xrange(1, len(multipliers) + 1):
title, opt = multipliers[-i]
if opt == multiplier:
option = OPTION(title, _value=opt, _selected="selected")
else:
option = OPTION(title, _value=opt)
options.append(option)
val = value / multiplier
inp = DIV(INPUT(value = val,
requires = field.requires,
_id = ("%s" % field).replace(".", "_"),
_name = field.name),
SELECT(options,
_name=("%s_multiplier" % field).replace(".", "_")))
return inp
@staticmethod
def represent(value):
multipliers = S3TimeIntervalWidget.multipliers
try:
val = int(value)
except:
val = 0
if val == 0:
multiplier = multipliers[-1]
else:
for m in multipliers:
if val % m[1] == 0:
multiplier = m
break
val = val / multiplier[1]
return "%s %s" % (val, current.T(multiplier[0]))
# =============================================================================
class S3InvBinWidget(FormWidget):
"""
Widget used by S3CRUD to offer the user matching bins where
stock items can be placed
"""
def __init__(self,
tablename,):
self.tablename = tablename
def __call__(self, field, value, **attributes):
T = current.T
request = current.request
s3db = current.s3db
tracktable = s3db.inv_track_item
stocktable = s3db.inv_inv_item
new_div = INPUT(value = value or "",
requires = field.requires,
_id = "i_%s_%s" % (self.tablename, field.name),
_name = field.name,
)
id = None
function = self.tablename[4:]
if len(request.args) > 2:
if request.args[1] == function:
id = request.args[2]
if id == None or tracktable[id] == None:
return TAG[""](
new_div
)
record = tracktable[id]
site_id = s3db.inv_recv[record.recv_id].site_id
query = (stocktable.site_id == site_id) & \
(stocktable.item_id == record.item_id) & \
(stocktable.item_source_no == record.item_source_no) & \
(stocktable.item_pack_id == record.item_pack_id) & \
(stocktable.currency == record.currency) & \
(stocktable.pack_value == record.pack_value) & \
(stocktable.expiry_date == record.expiry_date) & \
(stocktable.supply_org_id == record.supply_org_id)
rows = current.db(query).select(stocktable.bin,
stocktable.id)
if len(rows) == 0:
return TAG[""](
new_div
)
bins = []
for row in rows:
bins.append(OPTION(row.bin))
match_lbl = LABEL(T("Select an existing bin"))
match_div = SELECT(bins,
_id = "%s_%s" % (self.tablename, field.name),
_name = field.name,
)
new_lbl = LABEL(T("...or add a new bin"))
return TAG[""](
match_lbl,
match_div,
new_lbl,
new_div
)
# =============================================================================
class S3EmbedComponentWidget(FormWidget):
"""
Widget used by S3CRUD for link-table components with actuate="embed".
Uses s3.embed_component.js for client-side processing, and
S3CRUD._postprocess_embedded to receive the data.
"""
def __init__(self,
link=None,
component=None,
widget=None,
autocomplete=None,
link_filter=None,
select_existing=True):
self.link = link
self.component = component
self.widget = widget
self.autocomplete = autocomplete
self.select_existing = select_existing
self.link_filter = link_filter
self.post_process = current.s3db.get_config(link,
"post_process",
None)
def __call__(self, field, value, **attributes):
T = current.T
db = current.db
s3db = current.s3db
request = current.request
appname = request.application
s3 = current.response.s3
appname = current.request.application
formstyle = s3.crud.formstyle
ltable = s3db[self.link]
ctable = s3db[self.component]
prefix, resourcename = self.component.split("_", 1)
if field.name in request.post_vars:
selected = request.post_vars[field.name]
else:
selected = None
# Main Input
real_input = str(field).replace(".", "_")
dummy = "dummy_%s" % real_input
default = dict(_type = "text",
value = (value != None and str(value)) or "")
attr = StringWidget._attributes(field, default, **attributes)
attr["_class"] = "hide"
if self.select_existing:
_class ="box_top"
else:
_class = "hide"
# Post-process selection/deselection
if self.post_process is not None:
try:
if self.autocomplete:
pp = self.post_process % real_input
else:
pp = self.post_process % dummy
except:
pp = self.post_process
else:
pp = None
clear = "clear_component_form();"
if pp is not None:
clear = "%s%s" % (clear, pp)
# Select from registry buttons
url = "/%s/%s/%s/" % (appname, prefix, resourcename)
select_row = TR(TD(A(T("Select from registry"),
_href="#",
_id="select_from_registry",
_class="action-btn"),
A(T("Remove selection"),
_href="#",
_onclick=clear,
_id="clear_form_link",
_class="action-btn hide",
_style="padding-left:15px;"),
A(T("Edit Details"),
_href="#",
_onclick="edit_selected_form();",
_id="edit_selected_link",
_class="action-btn hide",
_style="padding-left:15px;"),
IMG(_src="/%s/static/img/ajax-loader.gif" % \
appname,
_height=32,
_width=32,
_id="load_throbber",
_class="throbber hide",
_style="padding-left:85px;"),
_class="w2p_fw"),
TD(),
_id="select_from_registry_row",
_class=_class,
_controller=prefix,
_component=self.component,
_url=url,
_field=real_input,
_value=str(value))
# Autocomplete/Selector
if self.autocomplete:
ac_field = ctable[self.autocomplete]
select = "select_component($('#%s').val());" % real_input
if pp is not None:
select = "%s%s" % (pp, select)
widget = S3AutocompleteWidget(prefix,
resourcename=resourcename,
fieldname=self.autocomplete,
link_filter=self.link_filter,
post_process=select)
ac_row = TR(TD(LABEL("%s: " % ac_field.label,
_class="hide",
_id="component_autocomplete_label"),
widget(field, None, _class="hide")),
TD(),
_id="component_autocomplete_row",
_class="box_top")
else:
select = "select_component($('#%s').val());" % dummy
if pp is not None:
select = "%s%s" % (pp, select)
# @todo: add link_filter here as well
widget = OptionsWidget.widget
ac_row = TR(TD(LABEL("%s: " % field.label,
_class="hide",
_id="component_autocomplete_label"),
widget(field, None, _class="hide",
_id=dummy, _onchange=select)),
TD(INPUT(_id=real_input, _class="hide")),
_id="component_autocomplete_row",
_class="box_top")
# Embedded Form
fields = [f for f in ctable
if (f.writable or f.readable) and not f.compute]
if selected:
# Initialize validators with the correct record ID
for f in fields:
requires = f.requires or []
if not isinstance(requires, (list, tuple)):
requires = [requires]
[r.set_self_id(selected) for r in requires
if hasattr(r, "set_self_id")]
labels, required = s3_mark_required(fields)
if required:
s3.has_required = True
form = SQLFORM.factory(table_name=self.component,
labels=labels,
formstyle=formstyle,
upload="default/download",
separator = "",
*fields)
trs = []
att = "box_middle embedded"
for tr in form[0]:
if not tr.attributes["_id"].startswith("submit_record"):
if "_class" in tr.attributes:
tr.attributes["_class"] = "%s %s" % (tr.attributes["_class"], att)
else:
tr.attributes["_class"] = att
trs.append(tr)
table = DIV(*trs)
# Divider
divider = TR(TD(_class="subheading"), TD(), _class="box_bottom embedded")
# JavaScript
if s3.debug:
script = "s3.embed_component.js"
else:
script = "s3.embed_component.min.js"
s3.scripts.append("/%s/static/scripts/S3/%s" % (appname, script))
# Overall layout of components
return TAG[""](select_row,
ac_row,
table,
divider)
# =============================================================================
def s3_comments_widget(field, value):
"""
A smaller-than-normal textarea
to be used by the s3.comments() Reusable field
"""
return TEXTAREA(_name=field.name,
_id="%s_%s" % (field._tablename, field.name),
_class="comments %s" % (field.type),
value=value,
requires=field.requires)
# =============================================================================
def s3_richtext_widget(field, value):
"""
A larger-than-normal textarea to be used by the CMS Post Body field
"""
s3 = current.response.s3
id = "%s_%s" % (field._tablename, field.name)
# Load the scripts
ckeditor = URL(c="static", f="ckeditor", args="ckeditor.js")
s3.scripts.append(ckeditor)
adapter = URL(c="static", f="ckeditor", args=["adapters",
"jquery.js"])
s3.scripts.append(adapter)
# Toolbar options: http://docs.cksource.com/CKEditor_3.x/Developers_Guide/Toolbar
js = '''var ck_config={toolbar:[['Format','Bold','Italic','-','NumberedList','BulletedList','-','Link','Unlink','-','Image','Table','-','PasteFromWord','-','Source','Maximize']],toolbarCanCollapse:false,removePlugins:'elementspath'}'''
s3.js_global.append(js)
js = '''$('#%s').ckeditor(ck_config)''' % id
s3.jquery_ready.append(js)
return TEXTAREA(_name=field.name,
_id=id,
_class="richtext %s" % (field.type),
value=value,
requires=field.requires)
# =============================================================================
def s3_grouped_checkboxes_widget(field,
value,
size = 20,
**attributes):
"""
Displays checkboxes for each value in the table column "field".
If there are more than "size" options, they are grouped by the
first letter of their label.
@type field: Field
@param field: Field (or Storage) object
@type value: dict
@param value: current value from the form field
@type size: int
@param size: number of input elements for each group
Used by S3SearchOptionsWidget
"""
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
if hasattr(requires[0], "options"):
options = requires[0].options()
else:
raise SyntaxError, "widget cannot determine options of %s" \
% field
options = [(k, v) for k, v in options if k != ""]
total = len(options)
if total == 0:
T = current.T
options.append(TR(TD(SPAN(T("no options available"),
_class="no-options-available"),
INPUT(_type="hide",
_name=field.name,
_value=None))))
if total > size:
# Options are put into groups of "size"
import locale
letters = []
letters_options = {}
append = letters.append
for val, label in options:
letter = label
if letter:
letter = s3_unicode(letter).upper()[0]
if letter not in letters_options:
append(letter)
letters_options[letter] = [(val, label)]
else:
letters_options[letter].append((val, label))
widget = DIV(_class=attributes.pop("_class",
"s3-grouped-checkboxes-widget"),
_name = "%s_widget" % field.name)
input_index = 0
group_index = 0
group_options = []
from_letter = u"A"
to_letter = letters[0]
letters.sort(locale.strcoll)
lget = letters_options.get
for letter in letters:
if from_letter is None:
from_letter = letter
group_options += lget(letter, [])
count = len(group_options)
if count >= size or letter == letters[-1]:
if letter == letters[-1]:
to_letter = u"Z"
else:
to_letter = letter
# Are these options for a single letter or a range?
if to_letter != from_letter:
group_label = "%s - %s" % (from_letter, to_letter)
else:
group_label = from_letter
widget.append(DIV(group_label,
_id="%s-group-label-%s" % (field.name,
group_index),
_class="s3-grouped-checkboxes-widget-label expanded"))
group_field = field
# Can give Unicode issues:
#group_field.requires = IS_IN_SET(group_options,
# multiple=True)
letter_widget = s3_checkboxes_widget(group_field,
value,
options = group_options,
start_at_id=input_index,
**attributes)
widget.append(letter_widget)
input_index += count
group_index += 1
group_options = []
from_letter = None
else:
# not enough options to form groups
try:
widget = s3_checkboxes_widget(field, value, **attributes)
except:
# some versions of gluon/sqlhtml.py don't support non-integer keys
if s3_debug:
raise
else:
return None
return widget
# =============================================================================
def s3_checkboxes_widget(field,
value,
options = None,
cols = 1,
start_at_id = 0,
help_field = None,
**attributes):
"""
Display checkboxes for each value in the table column "field".
@type cols: int
@param cols: spread the input elements into "cols" columns
@type start_at_id: int
@param start_at_id: start input element ids at this number
@type help_text: string
@param help_text: field name string pointing to the field
containing help text for each option
"""
values = not isinstance(value, (list, tuple)) and [value] or value
values = [str(v) for v in values]
attributes["_name"] = "%s_widget" % field.name
if "_class" not in attributes:
attributes["_class"] = "s3-checkboxes-widget"
if options is None:
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
if hasattr(requires[0], "options"):
options = requires[0].options()
else:
raise SyntaxError, "widget cannot determine options of %s" % field
help_text = Storage()
if help_field:
ftype = str(field.type)
if ftype[:9] == "reference":
ktablename = ftype[10:]
elif ftype[:14] == "list:reference":
ktablename = ftype[15:]
else:
# not a reference - no expand
# option text = field representation
ktablename = None
if ktablename is not None:
if "." in ktablename:
ktablename, pkey = ktablename.split(".", 1)
else:
pkey = None
ktable = current.s3db[ktablename]
if pkey is None:
pkey = ktable._id.name
lookup_field = help_field
if lookup_field in ktable.fields:
query = ktable[pkey].belongs([k for k, v in options])
rows = current.db(query).select(ktable[pkey],
ktable[lookup_field]
)
for row in rows:
help_text[str(row[ktable[pkey]])] = row[ktable[lookup_field]]
else:
# Error => no comments available
pass
else:
# No lookup table => no comments available
pass
options = [(k, v) for k, v in options if k != ""]
options = sorted(options, key=lambda option: option[1])
input_index = start_at_id
rows = []
count = len(options)
mods = count % cols
num_of_rows = count / cols
if mods:
num_of_rows += 1
for r in range(num_of_rows):
cells = []
for k, v in options[r * cols:(r + 1) * cols]:
input_id = "id-%s-%s" % (field.name, str(input_index))
title = help_text.get(str(k), None)
if title:
label_attr = dict(_title=title)
else:
label_attr = {}
cells.append(TD(INPUT(_type="checkbox",
_name=field.name,
_id=input_id,
hideerror=True,
_value=k,
value=(k in values)),
LABEL(v,
_for=input_id,
**label_attr)))
input_index += 1
rows.append(TR(cells))
if rows:
rows[-1][0][0]["hideerror"] = False
return TABLE(*rows, **attributes)
# =============================================================================
class S3SliderWidget(FormWidget):
"""
Standard Slider Widget
@author: Daniel Klischies ([email protected])
@ToDo: The range of the slider should ideally be picked up from the Validator
@ToDo: Show the value of the slider numerically as well as simply a position
"""
def __init__(self,
minval,
maxval,
steprange,
value):
self.minval = minval
self.maxval = maxval
self.steprange = steprange
self.value = value
def __call__(self, field, value, **attributes):
response = current.response
fieldname = str(field).replace(".", "_")
sliderdiv = DIV(_id=fieldname, **attributes)
inputid = "%s_input" % fieldname
localfield = str(field).split(".")
sliderinput = INPUT(_name=localfield[1],
_id=inputid,
_class="hide",
_value=self.value)
response.s3.jquery_ready.append('''S3.slider('%s','%f','%f','%f','%f')''' % \
(fieldname,
self.minval,
self.maxval,
self.steprange,
self.value))
return TAG[""](sliderdiv, sliderinput)
# =============================================================================
class S3OptionsMatrixWidget(FormWidget):
"""
Constructs a two dimensional array/grid of checkboxes
with row and column headers.
"""
def __init__(self, rows, cols):
"""
@type rows: tuple
@param rows:
A tuple of tuples.
The nested tuples will have the row label followed by a value
for each checkbox in that row.
@type cols: tuple
@param cols:
A tuple containing the labels to use in the column headers
"""
self.rows = rows
self.cols = cols
def __call__(self, field, value, **attributes):
"""
Returns the grid/matrix of checkboxes as a web2py TABLE object and
adds references to required Javascript files.
@type field: Field
@param field:
This gets passed in when the widget is rendered or used.
@type value: list
@param value:
A list of the values matching those of the checkboxes.
@param attributes:
HTML attributes to assign to the table.
"""
if isinstance(value, (list, tuple)):
values = [str(v) for v in value]
else:
values = [str(value)]
# Create the table header
header_cells = []
for col in self.cols:
header_cells.append(TH(col, _scope="col"))
header = THEAD(TR(header_cells))
# Create the table body cells
grid_rows = []
for row in self.rows:
# Create a list to hold our table cells
# the first cell will hold the row label
row_cells = [TH(row[0], _scope="row")]
for option in row[1:]:
# This determines if the checkbox should be checked
if option in values:
checked = True
else:
checked = False
row_cells.append(TD(
INPUT(_type="checkbox",
_name=field.name,
_value=option,
value=checked
)
))
grid_rows.append(TR(row_cells))
s3 = current.response.s3
s3.scripts.append("/%s/static/scripts/S3/s3.optionsmatrix.js" % current.request.application)
# If the table has an id attribute, activate the jQuery plugin for it.
if "_id" in attributes:
s3.jquery_ready.append('''$('#{0}').s3optionsmatrix()'''.format(attributes.get("_id")))
return TABLE(header, TBODY(grid_rows), **attributes)
# =============================================================================
class S3KeyValueWidget(ListWidget):
"""
Allows for input of key-value pairs and stores them as list:string
"""
def __init__(self, key_label=None, value_label=None):
"""
Returns a widget with key-value fields
"""
self._class = "key-value-pairs"
T = current.T
self.key_label = key_label or T("Key")
self.value_label = value_label or T("Value")
def __call__(self, field, value, **attributes):
T = current.T
s3 = current.response.s3
_id = "%s_%s" % (field._tablename, field.name)
_name = field.name
_class = "text hide"
attributes["_id"] = _id
attributes["_name"] = _name
attributes["_class"] = _class
script = SCRIPT(
'''jQuery(document).ready(function(){jQuery('#%s').kv_pairs('%s','%s')})''' % \
(_id, self.key_label, self.value_label))
if not value: value = "[]"
if not isinstance(value, str):
try:
value = json.dumps(value)
except:
raise("Bad value for key-value pair field")
appname = current.request.application
jsfile = "/%s/static/scripts/S3/%s" % (appname, "s3.keyvalue.widget.js")
if jsfile not in s3.scripts:
s3.scripts.append(jsfile)
return TAG[""](
TEXTAREA(value, **attributes),
script
)
@staticmethod
def represent(value):
if isinstance(value, str):
try:
value = json.loads(value)
if isinstance(value, str):
raise ValueError("key-value JSON is wrong.")
except:
# XXX: log this!
#raise ValueError("Bad json was found as value for a key-value field: %s" % value)
return ""
rep = []
if isinstance(value, (tuple, list)):
for kv in value:
rep += ["%s: %s" % (kv["key"], kv["value"])]
return ", ".join(rep)
# END =========================================================================
| mit | -6,676,936,894,634,872,000 | 35.903405 | 239 | 0.454941 | false | 4.625897 | false | false | false |
sightmachine/simplecv2-examples | detection/template_matching.py | 1 | 1190 | """
This example uses the built in template matching. The easiest way
to think of this is if you played the card matching game, the cards would
pretty much have to be identical. The template method doesn't allow much
for the scale to change, nor for rotation. This is the most basic pattern
matching SimpleCV offers. If you are looking for something more complex
you will probably want to look into img.find()
"""
print __doc__
import time
from simplecv.api import Image, Color, TemplateMatch
source = Image("templatetest.png", sample=True) # the image to search
template = Image("template.png", sample=True) # the template to search the image for
t = 5
methods = ["SQR_DIFF", "SQR_DIFF_NORM", "CCOEFF",
"CCOEFF_NORM", "CCORR", "CCORR_NORM"] # the various types of template matching available
for m in methods:
img = Image("templatetest.png", sample=True)
img.dl().text("current method: {}".format(m), (10, 20), color=Color.RED)
fs = source.find(TemplateMatch, template, threshold=t, method=m)
for match in fs:
img.dl().rectangle((match.x, match.y), (match.width, match.height), color=Color.RED)
img.apply_layers().show()
time.sleep(3)
| bsd-2-clause | -6,215,875,184,857,550,000 | 38.666667 | 100 | 0.709244 | false | 3.489736 | false | false | false |
rsignell-usgs/python-training | web-services/02-create_map.py | 1 | 5754 |
# coding: utf-8
# <img style='float: left' width="150px" src="http://bostonlightswim.org/wp/wp-content/uploads/2011/08/BLS-front_4-color.jpg">
# <br><br>
#
# ## [The Boston Light Swim](http://bostonlightswim.org/)
#
# ### Sea Surface Temperature time-series maps
# ### Load configuration
# In[8]:
import os
try:
import cPickle as pickle
except ImportError:
import pickle
run_name = '2015-08-17'
fname = os.path.join(run_name, 'config.pkl')
with open(fname, 'rb') as f:
config = pickle.load(f)
# ### Load skill_score
# In[9]:
try:
import cPickle as pickle
except ImportError:
import pickle
fname = os.path.join(run_name, 'skill_score.pkl')
with open(fname, 'rb') as f:
skill_score = pickle.load(f)
# In[10]:
import numpy as np
from glob import glob
from pandas import Panel
from utilities import nc2df
def load_ncs(run_name):
fname = '{}-{}.nc'.format
ALL_OBS_DATA = nc2df(os.path.join(run_name,
fname(run_name, 'OBS_DATA')))
index = ALL_OBS_DATA.index
dfs = dict(OBS_DATA=ALL_OBS_DATA)
for fname in glob(os.path.join(run_name, "*.nc")):
if 'OBS_DATA' in fname:
continue
else:
model = fname.split('.')[0].split('-')[-1]
df = nc2df(fname)
# FIXME: Horrible work around duplicate times.
if len(df.index.values) != len(np.unique(df.index.values)):
kw = dict(subset='index', take_last=True)
df = df.reset_index().drop_duplicates(**kw).set_index('index')
kw = dict(method='time', limit=30)
df = df.reindex(index).interpolate(**kw).ix[index]
dfs.update({model: df})
return Panel.fromDict(dfs).swapaxes(0, 2)
# In[11]:
from mpld3 import save_html
import matplotlib.pyplot as plt
from mpld3.plugins import LineLabelTooltip, connect
from utilities import make_map
bbox = config['bbox']
units = config['units']
run_name = config['run_name']
kw = dict(zoom_start=12, line=True, states=False, secoora_stations=False)
mapa = make_map(bbox, **kw)
# ### Clusters
# In[12]:
from glob import glob
from operator import itemgetter
import iris
from pandas import DataFrame, read_csv
fname = '{}-all_obs.csv'.format(run_name)
all_obs = read_csv(os.path.join(run_name, fname), index_col='name')
big_list = []
for fname in glob(os.path.join(run_name, "*.nc")):
if 'OBS_DATA' in fname:
continue
cube = iris.load_cube(fname)
model = fname.split('-')[-1].split('.')[0]
lons = cube.coord(axis='X').points
lats = cube.coord(axis='Y').points
stations = cube.coord('station name').points
models = [model]*lons.size
lista = zip(models, lons.tolist(), lats.tolist(), stations.tolist())
big_list.extend(lista)
big_list.sort(key=itemgetter(3))
df = DataFrame(big_list, columns=['name', 'lon', 'lat', 'station'])
df.set_index('station', drop=True, inplace=True)
groups = df.groupby(df.index)
for station, info in groups:
sta_name = all_obs['station'][all_obs['station'].astype(str) == station].index[0]
for lat, lon, name in zip(info.lat, info.lon, info.name):
location = lat, lon
popup = '<b>{}</b>\n{}'.format(sta_name, name)
mapa.simple_marker(location=location, popup=popup,
clustered_marker=True)
# ### Model and observations plots
# In[13]:
mean_bias = skill_score['mean_bias'].applymap('{:.2f}'.format).replace('nan', '--')
skill = skill_score['rmse'].applymap('{:.2f}'.format).replace('nan', '--')
resolution, width, height = 75, 7, 3
def make_plot():
fig, ax = plt.subplots(figsize=(width, height))
ax.set_ylabel('Sea surface Temperature ({})'.format(units))
ax.grid(True)
return fig, ax
dfs = load_ncs(run_name)
dfs = dfs.swapaxes('items', 'major').resample('30min').swapaxes('items', 'major')
for station in dfs:
sta_name = all_obs['station'][all_obs['station'].astype(str) == station].index[0]
df = dfs[station].dropna(axis=1, how='all')
if df.empty:
continue
labels = []
fig, ax = make_plot()
for col in df.columns:
serie = df[col].dropna()
lines = ax.plot(serie.index, serie, label=col,
linewidth=2.5, alpha=0.5)
if 'OBS_DATA' not in col:
text0 = col
text1 = mean_bias[sta_name][col]
text2 = skill[sta_name][col]
tooltip = '{}:\nbias {}\nskill: {}'.format
labels.append(tooltip(text0, text1, text2))
else:
labels.append('OBS_DATA')
kw = dict(loc='upper center', bbox_to_anchor=(0.5, 1.05), numpoints=1,
ncol=2, framealpha=0)
l = ax.legend(**kw)
l.set_title("") # Workaround str(None).
[connect(fig, LineLabelTooltip(line, name))
for line, name in zip(ax.lines, labels)]
html = 'station_{}.html'.format(station)
save_html(fig, '{}/{}'.format(run_name, html))
plt.close(fig)
popup = "<div align='center'> {} <br><iframe src='{}' alt='image'"
popup += "width='{}px' height='{}px' frameBorder='0'></div>"
popup = popup.format('{}'.format(sta_name), html,
(width*resolution)+75, (height*resolution)+50)
kw = dict(popup=popup, width=(width*resolution)+75)
if (df.columns == 'OBS_DATA').all():
kw.update(dict(marker_color="blue", marker_icon="ok"))
else:
kw.update(dict(marker_color="green", marker_icon="ok"))
obs = all_obs[all_obs['station'].astype(str) == station].squeeze()
mapa.simple_marker(location=[obs['lat'], obs['lon']], **kw)
# ### Map
# In[14]:
from utilities import inline_map
mapa.create_map(path=os.path.join(run_name, 'mapa.html'))
inline_map(os.path.join(run_name, 'mapa.html'))
| cc0-1.0 | -4,326,278,565,068,629,000 | 27.344828 | 126 | 0.60723 | false | 3.0803 | true | false | false |
harayz/raspberry_pwn | src/pentest/voiper/sulley/pydbg/defines.py | 8 | 5923 | #
# PyDBG
# Copyright (C) 2006 Pedram Amini <[email protected]>
#
# $Id: defines.py 193 2007-04-05 13:30:01Z cameron $
#
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# windows_h.py was generated with:
#
# c:\Python\Lib\site-packages\ctypes\wrap
# c:\python\python h2xml.py windows.h -o windows.xml -q -c
# c:\python\python xml2py.py windows.xml -s DEBUG_EVENT -s CONTEXT -s MEMORY_BASIC_INFORMATION -s LDT_ENTRY \
# -s PROCESS_INFORMATION -s STARTUPINFO -s SYSTEM_INFO -o windows_h.py
#
# Then the import of ctypes was changed at the top of the file to utilize my_ctypes, which adds the necessary changes
# to support the pickle-ing of our defined data structures and ctype primitives.
#
'''
@author: Pedram Amini
@license: GNU General Public License 2.0 or later
@contact: [email protected]
@organization: www.openrce.org
'''
from my_ctypes import *
from windows_h import *
###
### manually declare entities from Tlhelp32.h since i was unable to import using h2xml.py.
###
TH32CS_SNAPHEAPLIST = 0x00000001
TH32CS_SNAPPROCESS = 0x00000002
TH32CS_SNAPTHREAD = 0x00000004
TH32CS_SNAPMODULE = 0x00000008
TH32CS_INHERIT = 0x80000000
TH32CS_SNAPALL = (TH32CS_SNAPHEAPLIST | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD | TH32CS_SNAPMODULE)
class THREADENTRY32(Structure):
_fields_ = [
('dwSize', DWORD),
('cntUsage', DWORD),
('th32ThreadID', DWORD),
('th32OwnerProcessID', DWORD),
('tpBasePri', DWORD),
('tpDeltaPri', DWORD),
('dwFlags', DWORD),
]
class PROCESSENTRY32(Structure):
_fields_ = [
('dwSize', DWORD),
('cntUsage', DWORD),
('th32ProcessID', DWORD),
('th32DefaultHeapID', DWORD),
('th32ModuleID', DWORD),
('cntThreads', DWORD),
('th32ParentProcessID', DWORD),
('pcPriClassBase', DWORD),
('dwFlags', DWORD),
('szExeFile', CHAR * 260),
]
class MODULEENTRY32(Structure):
_fields_ = [
("dwSize", DWORD),
("th32ModuleID", DWORD),
("th32ProcessID", DWORD),
("GlblcntUsage", DWORD),
("ProccntUsage", DWORD),
("modBaseAddr", DWORD),
("modBaseSize", DWORD),
("hModule", DWORD),
("szModule", CHAR * 256),
("szExePath", CHAR * 260),
]
###
### manually declare various structures as needed.
###
class SYSDBG_MSR(Structure):
_fields_ = [
("Address", c_ulong),
("Data", c_ulonglong),
]
###
### manually declare various #define's as needed.
###
# debug event codes.
EXCEPTION_DEBUG_EVENT = 0x00000001
CREATE_THREAD_DEBUG_EVENT = 0x00000002
CREATE_PROCESS_DEBUG_EVENT = 0x00000003
EXIT_THREAD_DEBUG_EVENT = 0x00000004
EXIT_PROCESS_DEBUG_EVENT = 0x00000005
LOAD_DLL_DEBUG_EVENT = 0x00000006
UNLOAD_DLL_DEBUG_EVENT = 0x00000007
OUTPUT_DEBUG_STRING_EVENT = 0x00000008
RIP_EVENT = 0x00000009
USER_CALLBACK_DEBUG_EVENT = 0xDEADBEEF # added for callback support in debug event loop.
# debug exception codes.
EXCEPTION_ACCESS_VIOLATION = 0xC0000005
EXCEPTION_BREAKPOINT = 0x80000003
EXCEPTION_GUARD_PAGE = 0x80000001
EXCEPTION_SINGLE_STEP = 0x80000004
# hw breakpoint conditions
HW_ACCESS = 0x00000003
HW_EXECUTE = 0x00000000
HW_WRITE = 0x00000001
CONTEXT_CONTROL = 0x00010001
CONTEXT_FULL = 0x00010007
CONTEXT_DEBUG_REGISTERS = 0x00010010
CREATE_NEW_CONSOLE = 0x00000010
DBG_CONTINUE = 0x00010002
DBG_EXCEPTION_NOT_HANDLED = 0x80010001
DBG_EXCEPTION_HANDLED = 0x00010001
DEBUG_PROCESS = 0x00000001
DEBUG_ONLY_THIS_PROCESS = 0x00000002
EFLAGS_RF = 0x00010000
EFLAGS_TRAP = 0x00000100
ERROR_NO_MORE_FILES = 0x00000012
FILE_MAP_READ = 0x00000004
FORMAT_MESSAGE_ALLOCATE_BUFFER = 0x00000100
FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
INVALID_HANDLE_VALUE = 0xFFFFFFFF
MEM_COMMIT = 0x00001000
MEM_DECOMMIT = 0x00004000
MEM_IMAGE = 0x01000000
MEM_RELEASE = 0x00008000
PAGE_NOACCESS = 0x00000001
PAGE_READONLY = 0x00000002
PAGE_READWRITE = 0x00000004
PAGE_WRITECOPY = 0x00000008
PAGE_EXECUTE = 0x00000010
PAGE_EXECUTE_READ = 0x00000020
PAGE_EXECUTE_READWRITE = 0x00000040
PAGE_EXECUTE_WRITECOPY = 0x00000080
PAGE_GUARD = 0x00000100
PAGE_NOCACHE = 0x00000200
PAGE_WRITECOMBINE = 0x00000400
PROCESS_ALL_ACCESS = 0x001F0FFF
SE_PRIVILEGE_ENABLED = 0x00000002
SW_SHOW = 0x00000005
THREAD_ALL_ACCESS = 0x001F03FF
TOKEN_ADJUST_PRIVILEGES = 0x00000020
# for NtSystemDebugControl()
SysDbgReadMsr = 16
SysDbgWriteMsr = 17 | gpl-3.0 | 7,708,887,906,329,216,000 | 34.90303 | 119 | 0.6078 | false | 3.536119 | false | false | false |
savi-dev/heat | contrib/rackspace/heat/engine/plugins/cloud_server.py | 1 | 18524 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
import json
import paramiko
from Crypto.PublicKey import RSA
import novaclient.exceptions as novaexception
from heat.common import exception
from heat.openstack.common import log as logging
from heat.engine import scheduler
from heat.engine.resources import instance
from heat.engine.resources import nova_utils
from heat.db.sqlalchemy import api as db_api
from . import rackspace_resource # noqa
logger = logging.getLogger(__name__)
class CloudServer(instance.Instance):
"""Resource for Rackspace Cloud Servers."""
properties_schema = {'flavor': {'Type': 'String', 'Required': True,
'UpdateAllowed': True},
'image': {'Type': 'String', 'Required': True},
'user_data': {'Type': 'String'},
'key_name': {'Type': 'String'},
'Volumes': {'Type': 'List'},
'name': {'Type': 'String'}}
attributes_schema = {'PrivateDnsName': ('Private DNS name of the specified'
' instance.'),
'PublicDnsName': ('Public DNS name of the specified '
'instance.'),
'PrivateIp': ('Private IP address of the specified '
'instance.'),
'PublicIp': ('Public IP address of the specified '
'instance.')}
base_script = """#!/bin/bash
# Install cloud-init and heat-cfntools
%s
# Create data source for cloud-init
mkdir -p /var/lib/cloud/seed/nocloud-net
mv /tmp/userdata /var/lib/cloud/seed/nocloud-net/user-data
touch /var/lib/cloud/seed/nocloud-net/meta-data
chmod 600 /var/lib/cloud/seed/nocloud-net/*
# Run cloud-init & cfn-init
cloud-init start || cloud-init init
bash -x /var/lib/cloud/data/cfn-userdata > /root/cfn-userdata.log 2>&1 ||
exit 42
"""
# - Ubuntu 12.04: Verified working
ubuntu_script = base_script % """\
apt-get update
export DEBIAN_FRONTEND=noninteractive
apt-get install -y -o Dpkg::Options::="--force-confdef" -o \
Dpkg::Options::="--force-confold" cloud-init python-boto python-pip gcc \
python-dev
pip install heat-cfntools
cfn-create-aws-symlinks --source /usr/local/bin
"""
# - Fedora 17: Verified working
# - Fedora 18: Not working. selinux needs to be in "Permissive"
# mode for cloud-init to work. It's disabled by default in the
# Rackspace Cloud Servers image. To enable selinux, a reboot is
# required.
# - Fedora 19: Verified working
fedora_script = base_script % """\
yum install -y cloud-init python-boto python-pip gcc python-devel
pip-python install heat-cfntools
cfn-create-aws-symlinks
"""
# - Centos 6.4: Verified working
centos_script = base_script % """\
rpm -ivh http://mirror.rackspace.com/epel/6/i386/epel-release-6-8.noarch.rpm
yum install -y cloud-init python-boto python-pip gcc python-devel \
python-argparse
pip-python install heat-cfntools
"""
# - RHEL 6.4: Verified working
rhel_script = base_script % """\
rpm -ivh http://mirror.rackspace.com/epel/6/i386/epel-release-6-8.noarch.rpm
# The RPM DB stays locked for a few secs
while fuser /var/lib/rpm/*; do sleep 1; done
yum install -y cloud-init python-boto python-pip gcc python-devel \
python-argparse
pip-python install heat-cfntools
cfn-create-aws-symlinks
"""
debian_script = base_script % """\
echo "deb http://mirror.rackspace.com/debian wheezy-backports main" >> \
/etc/apt/sources.list
apt-get update
apt-get -t wheezy-backports install -y cloud-init
export DEBIAN_FRONTEND=noninteractive
apt-get install -y -o Dpkg::Options::="--force-confdef" -o \
Dpkg::Options::="--force-confold" python-pip gcc python-dev
pip install heat-cfntools
"""
# - Arch 2013.6: Not working (deps not in default package repos)
# TODO(jason): Install cloud-init & other deps from third-party repos
arch_script = base_script % """\
pacman -S --noconfirm python-pip gcc
"""
# - Gentoo 13.2: Not working (deps not in default package repos)
# TODO(jason): Install cloud-init & other deps from third-party repos
gentoo_script = base_script % """\
emerge cloud-init python-boto python-pip gcc python-devel
"""
# - OpenSUSE 12.3: Not working (deps not in default package repos)
# TODO(jason): Install cloud-init & other deps from third-party repos
opensuse_script = base_script % """\
zypper --non-interactive rm patterns-openSUSE-minimal_base-conflicts
zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
"""
# List of supported Linux distros and their corresponding config scripts
image_scripts = {'arch': None,
'centos': centos_script,
'debian': None,
'fedora': fedora_script,
'gentoo': None,
'opensuse': None,
'rhel': rhel_script,
'ubuntu': ubuntu_script}
script_error_msg = ("The %(path)s script exited with a non-zero exit "
"status. To see the error message, log into the "
"server and view %(log)s")
# Template keys supported for handle_update. Properties not
# listed here trigger an UpdateReplace
update_allowed_keys = ('Metadata', 'Properties')
def __init__(self, name, json_snippet, stack):
super(CloudServer, self).__init__(name, json_snippet, stack)
self._private_key = None
self._server = None
self._distro = None
self._public_ip = None
self._private_ip = None
self._flavor = None
self._image = None
self.rs = rackspace_resource.RackspaceResource(name,
json_snippet,
stack)
def physical_resource_name(self):
name = self.properties.get('name')
if name:
return name
return super(CloudServer, self).physical_resource_name()
def nova(self):
return self.rs.nova() # Override the Instance method
def cinder(self):
return self.rs.cinder()
@property
def server(self):
"""Get the Cloud Server object."""
if not self._server:
logger.debug("Calling nova().servers.get()")
self._server = self.nova().servers.get(self.resource_id)
return self._server
@property
def distro(self):
"""Get the Linux distribution for this server."""
if not self._distro:
logger.debug("Calling nova().images.get()")
image_data = self.nova().images.get(self.image)
self._distro = image_data.metadata['os_distro']
return self._distro
@property
def script(self):
"""Get the config script for the Cloud Server image."""
return self.image_scripts[self.distro]
@property
def flavor(self):
"""Get the flavors from the API."""
if not self._flavor:
self._flavor = nova_utils.get_flavor_id(self.nova(),
self.properties['flavor'])
return self._flavor
@property
def image(self):
if not self._image:
self._image = nova_utils.get_image_id(self.nova(),
self.properties['image'])
return self._image
@property
def private_key(self):
"""Return the private SSH key for the resource."""
if self._private_key:
return self._private_key
if self.id is not None:
private_key = db_api.resource_data_get(self, 'private_key')
if not private_key:
return None
self._private_key = private_key
return private_key
@private_key.setter
def private_key(self, private_key):
"""Save the resource's private SSH key to the database."""
self._private_key = private_key
if self.id is not None:
db_api.resource_data_set(self, 'private_key', private_key, True)
def _get_ip(self, ip_type):
"""Return the IP of the Cloud Server."""
if ip_type in self.server.addresses:
for ip in self.server.addresses[ip_type]:
if ip['version'] == 4:
return ip['addr']
raise exception.Error("Could not determine the %s IP of %s." %
(ip_type, self.properties['image']))
@property
def public_ip(self):
"""Return the public IP of the Cloud Server."""
if not self._public_ip:
self._public_ip = self._get_ip('public')
return self._public_ip
@property
def private_ip(self):
"""Return the private IP of the Cloud Server."""
if not self._private_ip:
self._private_ip = self._get_ip('private')
return self._private_ip
@property
def has_userdata(self):
if self.properties['user_data'] or self.metadata != {}:
return True
else:
return False
def validate(self):
"""Validate user parameters."""
self.flavor
self.image
# It's okay if there's no script, as long as user_data and
# metadata are empty
if not self.script and self.has_userdata:
return {'Error': "user_data/metadata are not supported for image"
" %s." % self.properties['image']}
def _run_ssh_command(self, command):
"""Run a shell command on the Cloud Server via SSH."""
with tempfile.NamedTemporaryFile() as private_key_file:
private_key_file.write(self.private_key)
private_key_file.seek(0)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())
ssh.connect(self.public_ip,
username="root",
key_filename=private_key_file.name)
chan = ssh.get_transport().open_session()
chan.exec_command(command)
return chan.recv_exit_status()
def _sftp_files(self, files):
"""Transfer files to the Cloud Server via SFTP."""
with tempfile.NamedTemporaryFile() as private_key_file:
private_key_file.write(self.private_key)
private_key_file.seek(0)
pkey = paramiko.RSAKey.from_private_key_file(private_key_file.name)
transport = paramiko.Transport((self.public_ip, 22))
transport.connect(hostkey=None, username="root", pkey=pkey)
sftp = paramiko.SFTPClient.from_transport(transport)
for remote_file in files:
sftp_file = sftp.open(remote_file['path'], 'w')
sftp_file.write(remote_file['data'])
sftp_file.close()
def handle_create(self):
"""Create a Rackspace Cloud Servers container.
Rackspace Cloud Servers does not have the metadata service
running, so we have to transfer the user-data file to the
server and then trigger cloud-init.
"""
# Generate SSH public/private keypair
if self._private_key is not None:
rsa = RSA.importKey(self._private_key)
else:
rsa = RSA.generate(1024)
self.private_key = rsa.exportKey()
public_keys = [rsa.publickey().exportKey('OpenSSH')]
if self.properties.get('key_name'):
key_name = self.properties['key_name']
public_keys.append(nova_utils.get_keypair(self.nova(),
key_name).public_key)
personality_files = {
"/root/.ssh/authorized_keys": '\n'.join(public_keys)}
# Create server
client = self.nova().servers
logger.debug("Calling nova().servers.create()")
server = client.create(self.physical_resource_name(),
self.image,
self.flavor,
files=personality_files)
# Save resource ID to db
self.resource_id_set(server.id)
return server, scheduler.TaskRunner(self._attach_volumes_task())
def _attach_volumes_task(self):
tasks = (scheduler.TaskRunner(self._attach_volume, volume_id, device)
for volume_id, device in self.volumes())
return scheduler.PollingTaskGroup(tasks)
def _attach_volume(self, volume_id, device):
logger.debug("Calling nova().volumes.create_server_volume()")
self.nova().volumes.create_server_volume(self.server.id,
volume_id,
device or None)
yield
volume = self.cinder().get(volume_id)
while volume.status in ('available', 'attaching'):
yield
volume.get()
if volume.status != 'in-use':
raise exception.Error(volume.status)
def _detach_volumes_task(self):
tasks = (scheduler.TaskRunner(self._detach_volume, volume_id)
for volume_id, device in self.volumes())
return scheduler.PollingTaskGroup(tasks)
def _detach_volume(self, volume_id):
volume = self.cinder().get(volume_id)
volume.detach()
yield
while volume.status in ('in-use', 'detaching'):
yield
volume.get()
if volume.status != 'available':
raise exception.Error(volume.status)
def check_create_complete(self, cookie):
"""Check if server creation is complete and handle server configs."""
if not self._check_active(cookie):
return False
if self.has_userdata:
# Create heat-script and userdata files on server
raw_userdata = self.properties['user_data'] or ''
userdata = nova_utils.build_userdata(self, raw_userdata)
files = [{'path': "/tmp/userdata", 'data': userdata},
{'path': "/root/heat-script.sh", 'data': self.script}]
self._sftp_files(files)
# Connect via SSH and run script
cmd = "bash -ex /root/heat-script.sh > /root/heat-script.log 2>&1"
exit_code = self._run_ssh_command(cmd)
if exit_code == 42:
raise exception.Error(self.script_error_msg %
{'path': "cfn-userdata",
'log': "/root/cfn-userdata.log"})
elif exit_code != 0:
raise exception.Error(self.script_error_msg %
{'path': "heat-script.sh",
'log': "/root/heat-script.log"})
return True
# TODO(jason): Make this consistent with Instance and inherit
def _delete_server(self, server):
"""Return a coroutine that deletes the Cloud Server."""
server.delete()
while True:
yield
try:
server.get()
if server.status == "DELETED":
break
elif server.status == "ERROR":
raise exception.Error("Deletion of server %s failed." %
server.name)
except novaexception.NotFound:
break
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Try to update a Cloud Server's parameters.
If the Cloud Server's Metadata or flavor changed, update the
Cloud Server. If any other parameters changed, re-create the
Cloud Server with the new parameters.
"""
if 'Metadata' in tmpl_diff:
self.metadata = json_snippet['Metadata']
metadata_string = json.dumps(self.metadata)
files = [{'path': "/var/cache/heat-cfntools/last_metadata",
'data': metadata_string}]
self._sftp_files(files)
command = "bash -x /var/lib/cloud/data/cfn-userdata > " + \
"/root/cfn-userdata.log 2>&1"
exit_code = self._run_ssh_command(command)
if exit_code != 0:
raise exception.Error(self.script_error_msg %
{'path': "cfn-userdata",
'log': "/root/cfn-userdata.log"})
if 'flavor' in prop_diff:
flav = json_snippet['Properties']['flavor']
new_flavor = nova_utils.get_flavor_id(self.nova(), flav)
self.server.resize(new_flavor)
resize = scheduler.TaskRunner(nova_utils.check_resize,
self.server,
flav)
resize.start()
return resize
def _resolve_attribute(self, key):
"""Return the method that provides a given template attribute."""
attribute_function = {'PublicIp': self.public_ip,
'PrivateIp': self.private_ip,
'PublicDnsName': self.public_ip,
'PrivateDnsName': self.public_ip}
if key not in attribute_function:
raise exception.InvalidTemplateAttribute(resource=self.name,
key=key)
function = attribute_function[key]
logger.info('%s._resolve_attribute(%s) == %s'
% (self.name, key, function))
return unicode(function)
# pyrax module is required to work with Rackspace cloud server provider.
# If it is not installed, don't register cloud server provider
def resource_mapping():
if rackspace_resource.PYRAX_INSTALLED:
return {'Rackspace::Cloud::Server': CloudServer}
else:
return {}
| apache-2.0 | 1,099,201,296,044,303,500 | 37.672234 | 79 | 0.572932 | false | 4.190002 | false | false | false |
xpansa/pmis | purchase_request_to_rfq/models/purchase_request.py | 1 | 6455 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Eficent (<http://www.eficent.com/>)
# <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api, fields, models, _, exceptions
from datetime import datetime
from dateutil.relativedelta import relativedelta
_PURCHASE_ORDER_LINE_STATE = [
('none', 'No Purchase'),
('draft', 'RFQ'),
('confirmed', 'Confirmed'),
('done', 'Done'),
('cancel', 'Cancelled')
]
class PurchaseRequestLine(models.Model):
_inherit = "purchase.request.line"
@api.one
@api.depends('purchase_lines')
def _get_is_editable(self):
super(PurchaseRequestLine, self)._get_is_editable()
if self.purchase_lines:
self.is_editable = False
@api.one
def _purchased_qty(self):
purchased_qty = 0.0
for purchase_line in self.purchase_lines:
if purchase_line.state != 'cancel':
purchased_qty += purchase_line.product_qty
self.purchased_qty = purchased_qty
@api.one
@api.depends('purchase_lines.state')
def _get_purchase_state(self):
self.purchase_state = 'none'
if self.purchase_lines:
if any([po_line.state == 'done' for po_line in
self.purchase_lines]):
self.purchase_state = 'done'
elif all([po_line.state == 'cancel' for po_line in
self.purchase_lines]):
self.purchase_state = 'cancel'
elif any([po_line.state == 'confirmed' for po_line in
self.purchase_lines]):
self.purchase_state = 'confirmed'
elif all([po_line.state in ('draft', 'cancel') for po_line in
self.purchase_lines]):
self.purchase_state = 'draft'
purchased_qty = fields.Float(string='Quantity in RFQ or PO',
compute="_purchased_qty")
purchase_lines = fields.Many2many(
'purchase.order.line', 'purchase_request_purchase_order_line_rel',
'purchase_request_line_id',
'purchase_order_line_id', 'Purchase Order Lines', readonly=True)
purchase_state = fields.Selection(compute="_get_purchase_state",
string="Purchase Status",
selection=_PURCHASE_ORDER_LINE_STATE,
store=True,
default='none')
@api.one
def copy(self, default=None):
if default is None:
default = {}
default.update({
'purchase_lines': [],
})
return super(PurchaseRequestLine, self).copy(default)
@api.model
def _planned_date(self, request_line, delay=0.0):
company = request_line.company_id
date_planned = datetime.strptime(
request_line.date_required, '%Y-%m-%d') - \
relativedelta(days=company.po_lead)
if delay:
date_planned -= relativedelta(days=delay)
return date_planned and date_planned.strftime('%Y-%m-%d') \
or False
@api.model
def _calc_new_qty_price(self, request_line, po_line=None, cancel=False):
uom_obj = self.env['product.uom']
qty = uom_obj._compute_qty(request_line.product_uom_id.id,
request_line.product_qty,
request_line.product_id.uom_po_id.id)
# Make sure we use the minimum quantity of the partner corresponding
# to the PO. This does not apply in case of dropshipping
supplierinfo_min_qty = 0.0
if po_line.order_id.location_id.usage != 'customer':
if po_line.product_id.seller_id.id == \
po_line.order_id.partner_id.id:
supplierinfo_min_qty = po_line.product_id.seller_qty
else:
supplierinfo_obj = self.env['product.supplierinfo']
supplierinfos = supplierinfo_obj.search(
[('name', '=', po_line.order_id.partner_id.id),
('product_tmpl_id', '=',
po_line.product_id.product_tmpl_id.id)])
if supplierinfos:
supplierinfo_min_qty = supplierinfos[0].min_qty
if supplierinfo_min_qty == 0.0:
qty += po_line.product_qty
else:
# Recompute quantity by adding existing running procurements.
for rl in po_line.purchase_request_lines:
qty += uom_obj._compute_qty(rl.product_uom_id.id,
rl.product_qty,
rl.product_id.uom_po_id.id)
qty = max(qty, supplierinfo_min_qty) if qty > 0.0 else 0.0
price = po_line.price_unit
if qty != po_line.product_qty:
pricelist_obj = self.pool['product.pricelist']
pricelist_id = po_line.order_id.partner_id.\
property_product_pricelist_purchase.id
price = pricelist_obj.price_get(
self.env.cr, self.env.uid, [pricelist_id],
request_line.product_id.id, qty,
po_line.order_id.partner_id.id,
{'uom': request_line.product_id.uom_po_id.id})[pricelist_id]
return qty, price
@api.multi
def unlink(self):
for line in self:
if line.purchase_lines:
raise exceptions.Warning(
_('You cannot delete a record that refers to purchase '
'lines!'))
return super(PurchaseRequestLine, self).unlink()
| agpl-3.0 | 1,495,572,701,019,373,000 | 40.645161 | 78 | 0.550736 | false | 4.129878 | false | false | false |
PeterDaveHello/eden | modules/templates/RMSAmericas/menus.py | 4 | 31132 | # -*- coding: utf-8 -*-
from gluon import current
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
red_cross_filter = {"organisation_type.name" : "Red Cross / Red Crescent"}
# =============================================================================
class S3MainMenu(default.S3MainMenu):
""" Custom Application Main Menu """
# -------------------------------------------------------------------------
@classmethod
def menu(cls):
""" Compose Menu """
# Modules menus
main_menu = MM()(
cls.menu_modules(),
)
# Additional menus
current.menu.personal = cls.menu_personal()
current.menu.lang = cls.menu_lang()
current.menu.about = cls.menu_about()
current.menu.org = cls.menu_org()
# @todo: restore?
#current.menu.dashboard = cls.menu_dashboard()
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
T = current.T
auth = current.auth
has_role = auth.s3_has_role
root_org = auth.root_org_name()
system_roles = current.session.s3.system_roles
ADMIN = system_roles.ADMIN
ORG_ADMIN = system_roles.ORG_ADMIN
s3db = current.s3db
s3db.inv_recv_crud_strings()
inv_recv_list = current.response.s3.crud_strings.inv_recv.title_list
use_certs = lambda i: current.deployment_settings.get_hrm_use_certificates()
def hrm(item):
return root_org != "Honduran Red Cross" or \
has_role(ORG_ADMIN)
def inv(item):
return root_org != "Honduran Red Cross" or \
has_role("hn_wh_manager") or \
has_role("hn_national_wh_manager") or \
has_role(ORG_ADMIN)
def basic_warehouse(i):
if root_org == "Honduran Red Cross" and \
not (has_role("hn_national_wh_manager") or \
has_role(ORG_ADMIN)):
# Hide menu entries which user shouldn't need access to
return False
else:
return True
def multi_warehouse(i):
if root_org == "Honduran Red Cross" and \
not (has_role("hn_national_wh_manager") or \
has_role(ORG_ADMIN)):
# Only responsible for 1 warehouse so hide menu entries which should be accessed via Tabs on their warehouse
return False
else:
return True
def vol(item):
return root_org != "Honduran Red Cross" or \
has_role(ORG_ADMIN)
return [
homepage("gis")(
),
homepage("hrm", "org", name=T("Staff"),
vars=dict(group="staff"), check=hrm)(
MM("Staff", c="hrm", f="staff", m="summary"),
MM("Teams", c="hrm", f="group"),
MM("National Societies", c="org", f="organisation",
vars = red_cross_filter),
MM("Offices", c="org", f="office"),
MM("Job Titles", c="hrm", f="job_title"),
#MM("Skill List", c="hrm", f="skill"),
MM("Training Events", c="hrm", f="training_event"),
MM("Training Courses", c="hrm", f="course"),
MM("Certificate List", c="hrm", f="certificate", check=use_certs),
),
homepage("vol", name=T("Volunteers"), check=vol)(
MM("Volunteers", c="vol", f="volunteer", m="summary"),
MM("Teams", c="vol", f="group"),
MM("Volunteer Roles", c="vol", f="job_title"),
MM("Programs", c="vol", f="programme"),
#MM("Skill List", c="vol", f="skill"),
MM("Training Events", c="vol", f="training_event"),
MM("Training Courses", c="vol", f="course"),
MM("Certificate List", c="vol", f="certificate", check=use_certs),
),
#homepage("member")(
# MM("Members", c="member", f="membership", m="summary"),
#),
homepage("inv", "supply", "req", check=inv)(
MM("Warehouses", c="inv", f="warehouse", m="summary", check=multi_warehouse),
MM(inv_recv_list, c="inv", f="recv", check=multi_warehouse),
MM("Sent Shipments", c="inv", f="send", check=multi_warehouse),
MM("Items", c="supply", f="item", check=basic_warehouse),
MM("Catalogs", c="supply", f="catalog", check=basic_warehouse),
#MM("Item Categories", c="supply", f="item_category"),
M("Suppliers", c="inv", f="supplier", check=basic_warehouse)(),
M("Facilities", c="inv", f="facility", check=basic_warehouse)(),
M("Requests", c="req", f="req")(),
#M("Commitments", f="commit")(),
),
#homepage("asset")(
# MM("Assets", c="asset", f="asset", m="summary"),
# MM("Items", c="asset", f="item", m="summary"),
#),
#homepage("survey")(
# MM("Assessment Templates", c="survey", f="template"),
# MM("Disaster Assessments", c="survey", f="series"),
#),
homepage("project")(
MM("Projects", c="project", f="project", m="summary"),
MM("Locations", c="project", f="location"),
#MM("Outreach", c="po", f="index", check=outreach),
),
#homepage("vulnerability")(
# MM("Map", c="vulnerability", f="index"),
#),
#homepage("event")(
# MM("Events", c="event", f="event"),
# MM("Incident Reports", c="event", f="incident_report"),
#),
#homepage("deploy", name="RDRT", f="mission", m="summary",
# vars={"~.status__belongs": "2"})(
# MM("Missions", c="deploy", f="mission", m="summary"),
# MM("Members", c="deploy", f="human_resource", m="summary"),
#),
]
# -------------------------------------------------------------------------
@classmethod
def menu_org(cls):
""" Custom Organisation Menu """
OM = S3OrgMenuLayout
return OM()
# -------------------------------------------------------------------------
@classmethod
def menu_lang(cls):
s3 = current.response.s3
# Language selector
menu_lang = ML("Language", right=True)
for language in s3.l10n_languages.items():
code, name = language
menu_lang(
ML(name, translate=False, lang_code=code, lang_name=name)
)
return menu_lang
# -------------------------------------------------------------------------
@classmethod
def menu_personal(cls):
""" Custom Personal Menu """
auth = current.auth
s3 = current.response.s3
settings = current.deployment_settings
if not auth.is_logged_in():
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
self_registration = settings.get_security_self_registration()
menu_personal = MP()(
MP("Register", c="default", f="user",
m="register", check=self_registration),
MP("Login", c="default", f="user",
m="login", vars=dict(_next=login_next)),
MP("Lost Password", c="default", f="user",
m="retrieve_password"),
)
else:
s3_has_role = auth.s3_has_role
is_org_admin = lambda i: s3_has_role("ORG_ADMIN") and \
not s3_has_role("ADMIN")
menu_personal = MP()(
MP("Administration", c="admin", f="index",
check=s3_has_role("ADMIN")),
MP("Administration", c="admin", f="user",
check=is_org_admin),
MP("Profile", c="default", f="person"),
MP("Change Password", c="default", f="user",
m="change_password"),
MP("Logout", c="default", f="user",
m="logout"),
)
return menu_personal
# -------------------------------------------------------------------------
@classmethod
def menu_about(cls):
menu_about = MA(c="default")(
MA("About Us", f="about"),
MA("Contact", f="contact"),
MA("Help", f="help"),
MA("Privacy", f="privacy"),
)
return menu_about
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
""" Custom Controller Menus """
# -------------------------------------------------------------------------
def admin(self):
""" ADMIN menu """
# Standard Admin Menu
menu = super(S3OptionsMenu, self).admin()
# Additional Items
menu(M("Map Settings", c="gis", f="config"),
M("Content Management", c="cms", f="index"),
)
return menu
# -------------------------------------------------------------------------
def gis(self):
""" GIS / GIS Controllers """
if current.request.function == "index":
# Empty so as to leave maximum space for the Map
# - functionality accessible via the Admin menu instead
return None
else:
return super(S3OptionsMenu, self).gis()
# -------------------------------------------------------------------------
@staticmethod
def hrm():
""" HRM Human Resource Management """
has_role = current.auth.s3_has_role
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
settings = current.deployment_settings
if "hrm" not in s3:
current.s3db.hrm_vars()
hrm_vars = s3.hrm
SECTORS = "Clusters" if settings.get_ui_label_cluster() \
else "Sectors"
manager_mode = lambda i: hrm_vars.mode is None
personal_mode = lambda i: hrm_vars.mode is not None
is_org_admin = lambda i: hrm_vars.orgs and True or \
has_role(ADMIN)
is_super_editor = lambda i: has_role("staff_super") or \
has_role("vol_super")
staff = {"group": "staff"}
use_certs = lambda i: settings.get_hrm_use_certificates()
return M()(
M("Staff", c="hrm", f=("staff", "person"), m="summary",
check=manager_mode)(
M("Create", m="create"),
M("Import", f="person", m="import",
vars=staff, p="create"),
),
M("Staff & Volunteers (Combined)",
c="hrm", f="human_resource", m="summary",
check=[manager_mode, is_super_editor]),
M("Teams", c="hrm", f="group",
check=manager_mode)(
M("Create", m="create"),
M("Search Members", f="group_membership"),
M("Import", f="group_membership", m="import"),
),
M("National Societies", c="org",
f="organisation",
vars=red_cross_filter,
check=manager_mode)(
M("Create", m="create",
vars=red_cross_filter
),
M("Import", m="import", p="create", check=is_org_admin)
),
M("Offices", c="org", f="office",
check=manager_mode)(
M("Create", m="create"),
M("Import", m="import", p="create"),
),
M("Department Catalog", c="hrm", f="department",
check=manager_mode)(
M("Create", m="create"),
),
M("Job Title Catalog", c="hrm", f="job_title",
check=manager_mode)(
M("Create", m="create"),
M("Import", m="import", p="create", check=is_org_admin),
),
#M("Skill Catalog", f="skill",
# check=manager_mode)(
# M("Create", m="create"),
# #M("Skill Provisions", f="skill_provision"),
#),
M("Training Events", c="hrm", f="training_event",
check=manager_mode)(
M("Create", m="create"),
M("Search Training Participants", f="training"),
M("Import Participant List", f="training", m="import"),
),
M("Reports", c="hrm", f="staff", m="report",
check=manager_mode)(
M("Staff Report", m="report"),
M("Expiring Staff Contracts Report",
vars=dict(expiring="1")),
M("Training Report", f="training", m="report"),
),
M("Training Course Catalog", c="hrm", f="course",
check=manager_mode)(
M("Create", m="create"),
M("Import", m="import", p="create", check=is_org_admin),
M("Course Certificates", f="course_certificate"),
),
M("Certificate Catalog", c="hrm", f="certificate",
check=[manager_mode, use_certs])(
M("Create", m="create"),
M("Import", m="import", p="create", check=is_org_admin),
#M("Skill Equivalence", f="certificate_skill"),
),
M("Organization Types", c="org", f="organisation_type",
restrict=[ADMIN],
check=manager_mode)(
M("Create", m="create"),
),
M("Office Types", c="org", f="office_type",
restrict=[ADMIN],
check=manager_mode)(
M("Create", m="create"),
),
#M("Facility Types", c="org", f="facility_type",
# restrict=[ADMIN],
# check=manager_mode)(
# M("Create", m="create"),
#),
#M("My Profile", c="hrm", f="person",
# check=personal_mode, vars=dict(access="personal")),
# This provides the link to switch to the manager mode:
M("Human Resources", c="hrm", f="index",
check=[personal_mode, is_org_admin]),
# This provides the link to switch to the personal mode:
#M("Personal Profile", c="hrm", f="person",
# check=manager_mode, vars=dict(access="personal"))
)
# -------------------------------------------------------------------------
def org(self):
""" Organisation Management """
# Same as HRM
return self.hrm()
# -------------------------------------------------------------------------
@staticmethod
def vol():
""" Volunteer Management """
auth = current.auth
has_role = auth.s3_has_role
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
root_org = auth.root_org_name()
# Custom conditions for the check-hook, as lambdas in order
# to have them checked only immediately before rendering:
manager_mode = lambda i: s3.hrm.mode is None
personal_mode = lambda i: s3.hrm.mode is not None
is_org_admin = lambda i: s3.hrm.orgs and True or \
has_role(ADMIN)
is_super_editor = lambda i: has_role("vol_super") or \
has_role("staff_super")
settings = current.deployment_settings
use_certs = lambda i: settings.get_hrm_use_certificates()
show_programmes = lambda i: settings.get_hrm_vol_experience() == "programme"
show_tasks = lambda i: settings.has_module("project") and \
settings.get_project_mode_task()
teams = settings.get_hrm_teams()
use_teams = lambda i: teams
return M(c="vol")(
M("Volunteers", f="volunteer", m="summary",
check=[manager_mode])(
M("Create", m="create"),
M("Import", f="person", m="import",
vars={"group":"volunteer"}, p="create"),
),
M("Staff & Volunteers (Combined)",
c="vol", f="human_resource", m="summary",
check=[manager_mode, is_super_editor]),
M(teams, f="group",
check=[manager_mode, use_teams])(
M("Create", m="create"),
M("Search Members", f="group_membership"),
M("Import", f="group_membership", m="import"),
),
#M("Department Catalog", f="department",
# check=manager_mode)(
# M("Create", m="create"),
#),
M("Volunteer Role Catalog", f="job_title",
check=[manager_mode])(
M("Create", m="create"),
M("Import", m="import", p="create", check=is_org_admin),
),
#M("Skill Catalog", f="skill",
# check=[manager_mode])(
# M("Create", m="create"),
# #M("Skill Provisions", f="skill_provision"),
#),
M("Training Events", f="training_event",
check=manager_mode)(
M("Create", m="create"),
M("Search Training Participants", f="training"),
M("Import Participant List", f="training", m="import"),
),
M("Training Course Catalog", f="course",
check=manager_mode)(
M("Create", m="create"),
#M("Course Certificates", f="course_certificate"),
),
M("Certificate Catalog", f="certificate",
check=[manager_mode, use_certs])(
M("Create", m="create"),
#M("Skill Equivalence", f="certificate_skill"),
),
M("Programs", f="programme",
check=[manager_mode, show_programmes])(
M("Create", m="create"),
M("Import Hours", f="programme_hours", m="import"),
),
M("Awards", f="award",
check=[manager_mode, is_org_admin])(
M("Create", m="create"),
),
M("Reports", f="volunteer", m="report",
check=manager_mode)(
M("Volunteer Report", m="report"),
M("Hours by Role Report", f="programme_hours", m="report",
vars=Storage(rows="job_title_id",
cols="month",
fact="sum(hours)"),
check=show_programmes),
M("Hours by Program Report", f="programme_hours", m="report",
vars=Storage(rows="programme_id",
cols="month",
fact="sum(hours)"),
check=show_programmes),
M("Training Report", f="training", m="report"),
),
#M("My Profile", f="person",
# check=personal_mode, vars=dict(access="personal")),
M("My Tasks", f="task",
check=[personal_mode, show_tasks],
vars=dict(access="personal",
mine=1)),
# This provides the link to switch to the manager mode:
M("Volunteer Management", f="index",
check=[personal_mode, is_org_admin]),
# This provides the link to switch to the personal mode:
#M("Personal Profile", f="person",
# check=manager_mode, vars=dict(access="personal"))
)
# -------------------------------------------------------------------------
@staticmethod
def inv():
""" INV / Inventory """
auth = current.auth
has_role = auth.s3_has_role
system_roles = current.session.s3.system_roles
ADMIN = system_roles.ADMIN
ORG_ADMIN = system_roles.ORG_ADMIN
s3db = current.s3db
s3db.inv_recv_crud_strings()
inv_recv_list = current.response.s3.crud_strings.inv_recv.title_list
settings = current.deployment_settings
#use_adjust = lambda i: not settings.get_inv_direct_stock_edits()
root_org = auth.root_org_name()
def use_adjust(i):
if root_org in ("Australian Red Cross", "Honduran Red Cross"):
# Australian & Honduran RC use proper Logistics workflow
return True
else:
# Others use simplified version
return False
#def use_facilities(i):
# if root_org == "Honduran Red Cross":
# # Honduran RC don't use Facilities
# return False
# else:
# return True
def basic_warehouse(i):
if root_org == "Honduran Red Cross" and \
not (has_role("hn_national_wh_manager") or \
has_role(ORG_ADMIN)):
# Hide menu entries which user shouldn't need access to
return False
else:
return True
def multi_warehouse(i):
if root_org == "Honduran Red Cross" and \
not (has_role("hn_national_wh_manager") or \
has_role(ORG_ADMIN)):
# Only responsible for 1 warehouse so hide menu entries which should be accessed via Tabs on their warehouse
# & other things that HNRC
return False
else:
return True
def use_kits(i):
if root_org == "Honduran Red Cross":
# Honduran RC use Kits
return True
else:
return False
def use_types(i):
if root_org == "Nepal Red Cross Society":
# Nepal RC use Warehouse Types
return True
else:
return False
use_commit = lambda i: settings.get_req_use_commit()
return M()(
#M("Home", f="index"),
M("Warehouses", c="inv", f="warehouse", m="summary", check=multi_warehouse)(
M("Create", m="create"),
M("Import", m="import", p="create"),
),
M("Warehouse Stock", c="inv", f="inv_item", args="summary")(
M("Search Shipped Items", f="track_item"),
M("Adjust Stock Levels", f="adj", check=use_adjust),
M("Kitting", f="kitting", check=use_kits),
M("Import", f="inv_item", m="import", p="create"),
),
M("Reports", c="inv", f="inv_item")(
M("Warehouse Stock", f="inv_item",m="report"),
M("Expiration Report", c="inv", f="track_item",
vars=dict(report="exp")),
#M("Monetization Report", c="inv", f="inv_item",
# vars=dict(report="mon")),
#M("Utilization Report", c="inv", f="track_item",
# vars=dict(report="util")),
#M("Summary of Incoming Supplies", c="inv", f="track_item",
# vars=dict(report="inc")),
# M("Summary of Releases", c="inv", f="track_item",
# vars=dict(report="rel")),
),
M(inv_recv_list, c="inv", f="recv", check=multi_warehouse)(
M("Create", m="create"),
),
M("Sent Shipments", c="inv", f="send", check=multi_warehouse)(
M("Create", m="create"),
M("Search Shipped Items", f="track_item"),
),
M("Items", c="supply", f="item", m="summary", check=basic_warehouse)(
M("Create", m="create"),
M("Import", f="catalog_item", m="import", p="create"),
),
# Catalog Items moved to be next to the Item Categories
#M("Catalog Items", c="supply", f="catalog_item")(
# M("Create", m="create"),
#),
#M("Brands", c="supply", f="brand",
# restrict=[ADMIN])(
# M("Create", m="create"),
#),
M("Catalogs", c="supply", f="catalog", check=basic_warehouse)(
M("Create", m="create"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[ADMIN])(
M("Create", m="create"),
),
M("Suppliers", c="inv", f="supplier", check=basic_warehouse)(
M("Create", m="create"),
M("Import", m="import", p="create"),
),
M("Facilities", c="inv", f="facility", check=basic_warehouse)(
M("Create", m="create", t="org_facility"),
),
M("Facility Types", c="inv", f="facility_type",
restrict=[ADMIN])(
M("Create", m="create"),
),
M("Warehouse Types", c="inv", f="warehouse_type", check=use_types,
restrict=[ADMIN])(
M("Create", m="create"),
),
M("Requests", c="req", f="req")(
M("Create", m="create"),
M("Requested Items", f="req_item"),
),
M("Commitments", c="req", f="commit", check=use_commit)(
),
)
# -------------------------------------------------------------------------
def req(self):
""" Requests Management """
# Same as Inventory
return self.inv()
# -------------------------------------------------------------------------
@staticmethod
def project():
""" PROJECT / Project Tracking & Management """
root_org = current.auth.root_org_name()
def community_volunteers(i):
if root_org == "Honduran Red Cross":
return True
else:
return False
menu = M(c="project")(
M("Programs", f="programme")(
M("Create", m="create"),
),
M("Projects", f="project", m="summary")(
M("Create", m="create"),
),
M("Locations", f="location")(
# Better created from tab (otherwise Activity Type filter won't work)
#M("Create", m="create"),
M("Map", m="map"),
M("Community Contacts", f="location_contact"),
M("Community Volunteers", f="volunteer",
check=community_volunteers),
),
M("Reports", f="location", m="report")(
M("3W", f="location", m="report"),
M("Beneficiaries", f="beneficiary", m="report"),
#M("Indicators", f="indicator", m="report",
# check=indicators,
# ),
#M("Indicators over Time", f="indicator", m="timeplot",
# check=indicators,
# ),
M("Funding", f="organisation", m="report"),
),
M("Import", f="project", m="import", p="create")(
M("Import Projects", m="import", p="create"),
M("Import Project Organizations", f="organisation",
m="import", p="create"),
M("Import Project Communities", f="location",
m="import", p="create"),
),
M("Partner Organizations", f="partners")(
M("Create", m="create"),
M("Import", m="import", p="create"),
),
M("Activity Types", f="activity_type")(
M("Create", m="create"),
),
M("Beneficiary Types", f="beneficiary_type")(
M("Create", m="create"),
),
M("Demographics", f="demographic")(
M("Create", m="create"),
),
M("Hazards", f="hazard")(
M("Create", m="create"),
),
#M("Indicators", f="indicator",
# check=indicators)(
# M("Create", m="create"),
#),
M("Sectors", f="sector")(
M("Create", m="create"),
),
M("Themes", f="theme")(
M("Create", m="create"),
),
)
return menu
# END =========================================================================
| mit | 115,121,731,783,432,460 | 41.94069 | 124 | 0.416645 | false | 4.618991 | false | false | false |
qxf2/qxf2-page-object-model | page_objects/hamburger_menu_object.py | 1 | 1429 | """
This class models the hamburger menu object as a Page Object
The hamburger menu has a bunch of options that can be:
a) Clicked
b) Hovered over
"""
import conf.locators_conf as locators
from utils.Wrapit import Wrapit
class Hamburger_Menu_Object:
"Page Object for the hamburger menu"
#locators
menu_icon = locators.menu_icon
menu_link = locators.menu_link
menu_item = locators.menu_item
@Wrapit._exceptionHandler
def goto_menu_link(self,my_link,expected_url_string=None):
"Navigate to a link: Hover + Click or just Click"
#Format for link: string separated by '>'
#E.g.: 'Approach > Where we start'
split_link = my_link.split('>')
hover_list = split_link[:-1]
self.click_hamburger_menu()
for element in hover_list:
self.hover(self.menu_item%element.strip())
result_flag = self.click_element(self.menu_link%split_link[-1].strip())
#Additional check to see if we went to the right page
if expected_url_string is not None:
result_flag &= True if expected_url_string in self.get_current_url() else False
#If the action failed, close the Hamburger menu
if result_flag is False:
self.click_hamburger_menu()
return result_flag
def click_hamburger_menu(self):
"Click on the hamburger menu icon"
return self.click_element(self.menu_icon)
| mit | -714,251,458,347,728,100 | 31.477273 | 91 | 0.658502 | false | 3.563591 | false | false | false |
jgarzik/smartcoin | p2p.py | 1 | 1652 | #
# p2p.py - Distributed bond P2P network node
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import struct
import hashlib
messagemap = {
"version",
"verack",
"ping",
"pong",
"addr",
"getaddr",
}
class MsgNull(object):
def __init__(self):
pass
def SerializeToString(self):
return ''
def ParseFromString(self, data):
pass
def __str__(self):
return "MsgNull()"
def message_read(msg_start, f):
try:
recvbuf = f.read(4 + 12 + 4 + 4)
except IOError:
return None
# check magic
if len(recvbuf) < 4:
return None
if recvbuf[:4] != msg_start:
raise ValueError("got garbage %s" % repr(recvbuf))
# check checksum
if len(recvbuf) < 4 + 12 + 4 + 4:
return None
# remaining header fields: command, msg length, checksum
command = recvbuf[4:4+12].split("\x00", 1)[0]
msglen = struct.unpack("<i", recvbuf[4+12:4+12+4])[0]
checksum = recvbuf[4+12+4:4+12+4+4]
# read message body
try:
recvbuf += f.read(msglen)
except IOError:
return None
msg = recvbuf[4+12+4+4:4+12+4+4+msglen]
th = hashlib.sha256(msg).digest()
h = hashlib.sha256(th).digest()
if checksum != h[:4]:
raise ValueError("got bad checksum %s" % repr(recvbuf))
recvbuf = recvbuf[4+12+4+4+msglen:]
return (command, msg)
def message_to_str(msg_start, command, message):
data = message.SerializeToString()
tmsg = msg_start
tmsg += command
tmsg += "\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
# add checksum
th = hashlib.sha256(data).digest()
h = hashlib.sha256(th).digest()
tmsg += h[:4]
tmsg += data
return tmsg
| mit | -4,048,811,819,991,636,000 | 19.395062 | 70 | 0.659806 | false | 2.655949 | false | false | false |
hasgeek/boxoffice | migrations/versions/45de268cd444_add_discount_policy_id_to_price.py | 1 | 1452 | """add discount_policy_id to price.
Revision ID: 45de268cd444
Revises: 4d7f840202d2
Create Date: 2016-03-31 15:29:51.897720
"""
# revision identifiers, used by Alembic.
revision = '45de268cd444'
down_revision = '4d7f840202d2'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
op.add_column(
'price',
sa.Column(
'discount_policy_id',
sqlalchemy_utils.types.uuid.UUIDType(binary=False),
nullable=True,
),
)
op.create_unique_constraint(
'price_item_id_discount_policy_id_key',
'price',
['item_id', 'discount_policy_id'],
)
op.create_foreign_key(
'price_discount_policy_id_fkey',
'price',
'discount_policy',
['discount_policy_id'],
['id'],
)
op.alter_column(
'discount_policy', 'percentage', existing_type=sa.INTEGER, nullable=True
)
op.add_column(
'discount_policy', sa.Column('is_price_based', sa.Boolean(), nullable=True)
)
def downgrade():
op.drop_constraint('price_discount_policy_id_fkey', 'price', type_='foreignkey')
op.drop_constraint('price_item_id_discount_policy_id_key', 'price', type_='unique')
op.drop_column('price', 'discount_policy_id')
op.drop_column('discount_policy', 'is_price_based')
op.alter_column(
'discount_policy', 'percentage', existing_type=sa.INTEGER, nullable=False
)
| agpl-3.0 | -152,441,631,084,219,000 | 25.888889 | 87 | 0.626722 | false | 3.322654 | false | false | false |
SublimeText/Pywin32 | lib/x32/win32com/client/tlbrowse.py | 29 | 7827 | import win32ui
import win32con
import win32api
import commctrl
import pythoncom
from pywin.mfc import dialog
class TLBrowserException(Exception):
"TypeLib browser internal error"
error = TLBrowserException
FRAMEDLG_STD = win32con.WS_CAPTION | win32con.WS_SYSMENU
SS_STD = win32con.WS_CHILD | win32con.WS_VISIBLE
BS_STD = SS_STD | win32con.WS_TABSTOP
ES_STD = BS_STD | win32con.WS_BORDER
LBS_STD = ES_STD | win32con.LBS_NOTIFY | win32con.LBS_NOINTEGRALHEIGHT | win32con.WS_VSCROLL
CBS_STD = ES_STD | win32con.CBS_NOINTEGRALHEIGHT | win32con.WS_VSCROLL
typekindmap = {
pythoncom.TKIND_ENUM : 'Enumeration',
pythoncom.TKIND_RECORD : 'Record',
pythoncom.TKIND_MODULE : 'Module',
pythoncom.TKIND_INTERFACE : 'Interface',
pythoncom.TKIND_DISPATCH : 'Dispatch',
pythoncom.TKIND_COCLASS : 'CoClass',
pythoncom.TKIND_ALIAS : 'Alias',
pythoncom.TKIND_UNION : 'Union'
}
TypeBrowseDialog_Parent=dialog.Dialog
class TypeBrowseDialog(TypeBrowseDialog_Parent):
"Browse a type library"
IDC_TYPELIST = 1000
IDC_MEMBERLIST = 1001
IDC_PARAMLIST = 1002
IDC_LISTVIEW = 1003
def __init__(self, typefile = None):
TypeBrowseDialog_Parent.__init__(self, self.GetTemplate())
try:
if typefile:
self.tlb = pythoncom.LoadTypeLib(typefile)
else:
self.tlb = None
except pythoncom.ole_error:
self.MessageBox("The file does not contain type information")
self.tlb = None
self.HookCommand(self.CmdTypeListbox, self.IDC_TYPELIST)
self.HookCommand(self.CmdMemberListbox, self.IDC_MEMBERLIST)
def OnAttachedObjectDeath(self):
self.tlb = None
self.typeinfo = None
self.attr = None
return TypeBrowseDialog_Parent.OnAttachedObjectDeath(self)
def _SetupMenu(self):
menu = win32ui.CreateMenu()
flags=win32con.MF_STRING|win32con.MF_ENABLED
menu.AppendMenu(flags, win32ui.ID_FILE_OPEN, "&Open...")
menu.AppendMenu(flags, win32con.IDCANCEL, "&Close")
mainMenu = win32ui.CreateMenu()
mainMenu.AppendMenu(flags|win32con.MF_POPUP, menu.GetHandle(), "&File")
self.SetMenu(mainMenu)
self.HookCommand(self.OnFileOpen,win32ui.ID_FILE_OPEN)
def OnFileOpen(self, id, code):
openFlags = win32con.OFN_OVERWRITEPROMPT | win32con.OFN_FILEMUSTEXIST
fspec = "Type Libraries (*.tlb, *.olb)|*.tlb;*.olb|OCX Files (*.ocx)|*.ocx|DLL's (*.dll)|*.dll|All Files (*.*)|*.*||"
dlg = win32ui.CreateFileDialog(1, None, None, openFlags, fspec)
if dlg.DoModal() == win32con.IDOK:
try:
self.tlb = pythoncom.LoadTypeLib(dlg.GetPathName())
except pythoncom.ole_error:
self.MessageBox("The file does not contain type information")
self.tlb = None
self._SetupTLB()
def OnInitDialog(self):
self._SetupMenu()
self.typelb = self.GetDlgItem(self.IDC_TYPELIST)
self.memberlb = self.GetDlgItem(self.IDC_MEMBERLIST)
self.paramlb = self.GetDlgItem(self.IDC_PARAMLIST)
self.listview = self.GetDlgItem(self.IDC_LISTVIEW)
# Setup the listview columns
itemDetails = (commctrl.LVCFMT_LEFT, 100, "Item", 0)
self.listview.InsertColumn(0, itemDetails)
itemDetails = (commctrl.LVCFMT_LEFT, 1024, "Details", 0)
self.listview.InsertColumn(1, itemDetails)
if self.tlb is None:
self.OnFileOpen(None,None)
else:
self._SetupTLB()
return TypeBrowseDialog_Parent.OnInitDialog(self)
def _SetupTLB(self):
self.typelb.ResetContent()
self.memberlb.ResetContent()
self.paramlb.ResetContent()
self.typeinfo = None
self.attr = None
if self.tlb is None: return
n = self.tlb.GetTypeInfoCount()
for i in range(n):
self.typelb.AddString(self.tlb.GetDocumentation(i)[0])
def _SetListviewTextItems(self, items):
self.listview.DeleteAllItems()
index = -1
for item in items:
index = self.listview.InsertItem(index+1,item[0])
data = item[1]
if data is None: data = ""
self.listview.SetItemText(index, 1, data)
def SetupAllInfoTypes(self):
infos = self._GetMainInfoTypes() + self._GetMethodInfoTypes()
self._SetListviewTextItems(infos)
def _GetMainInfoTypes(self):
pos = self.typelb.GetCurSel()
if pos<0: return []
docinfo = self.tlb.GetDocumentation(pos)
infos = [('GUID', str(self.attr[0]))]
infos.append(('Help File', docinfo[3]))
infos.append(('Help Context', str(docinfo[2])))
try:
infos.append(('Type Kind', typekindmap[self.tlb.GetTypeInfoType(pos)]))
except:
pass
info = self.tlb.GetTypeInfo(pos)
attr = info.GetTypeAttr()
infos.append(('Attributes', str(attr)))
for j in range(attr[8]):
flags = info.GetImplTypeFlags(j)
refInfo = info.GetRefTypeInfo(info.GetRefTypeOfImplType(j))
doc = refInfo.GetDocumentation(-1)
attr = refInfo.GetTypeAttr()
typeKind = attr[5]
typeFlags = attr[11]
desc = doc[0]
desc = desc + ", Flags=0x%x, typeKind=0x%x, typeFlags=0x%x" % (flags, typeKind, typeFlags)
if flags & pythoncom.IMPLTYPEFLAG_FSOURCE:
desc = desc + "(Source)"
infos.append( ('Implements', desc))
return infos
def _GetMethodInfoTypes(self):
pos = self.memberlb.GetCurSel()
if pos<0: return []
realPos, isMethod = self._GetRealMemberPos(pos)
ret = []
if isMethod:
funcDesc = self.typeinfo.GetFuncDesc(realPos)
id = funcDesc[0]
ret.append(("Func Desc", str(funcDesc)))
else:
id = self.typeinfo.GetVarDesc(realPos)[0]
docinfo = self.typeinfo.GetDocumentation(id)
ret.append(('Help String', docinfo[1]))
ret.append(('Help Context', str(docinfo[2])))
return ret
def CmdTypeListbox(self, id, code):
if code == win32con.LBN_SELCHANGE:
pos = self.typelb.GetCurSel()
if pos >= 0:
self.memberlb.ResetContent()
self.typeinfo = self.tlb.GetTypeInfo(pos)
self.attr = self.typeinfo.GetTypeAttr()
for i in range(self.attr[7]):
id = self.typeinfo.GetVarDesc(i)[0]
self.memberlb.AddString(self.typeinfo.GetNames(id)[0])
for i in range(self.attr[6]):
id = self.typeinfo.GetFuncDesc(i)[0]
self.memberlb.AddString(self.typeinfo.GetNames(id)[0])
self.SetupAllInfoTypes()
return 1
def _GetRealMemberPos(self, pos):
pos = self.memberlb.GetCurSel()
if pos >= self.attr[7]:
return pos - self.attr[7], 1
elif pos >= 0:
return pos, 0
else:
raise error("The position is not valid")
def CmdMemberListbox(self, id, code):
if code == win32con.LBN_SELCHANGE:
self.paramlb.ResetContent()
pos = self.memberlb.GetCurSel()
realPos, isMethod = self._GetRealMemberPos(pos)
if isMethod:
id = self.typeinfo.GetFuncDesc(realPos)[0]
names = self.typeinfo.GetNames(id)
for i in range(len(names)):
if i > 0:
self.paramlb.AddString(names[i])
self.SetupAllInfoTypes()
return 1
def GetTemplate(self):
"Return the template used to create this dialog"
w = 272 # Dialog width
h = 192 # Dialog height
style = FRAMEDLG_STD | win32con.WS_VISIBLE | win32con.DS_SETFONT | win32con.WS_MINIMIZEBOX
template = [['Type Library Browser', (0, 0, w, h), style, None, (8, 'Helv')], ]
template.append([130, "&Type", -1, (10, 10, 62, 9), SS_STD | win32con.SS_LEFT])
template.append([131, None, self.IDC_TYPELIST, (10, 20, 80, 80), LBS_STD])
template.append([130, "&Members", -1, (100, 10, 62, 9), SS_STD | win32con.SS_LEFT])
template.append([131, None, self.IDC_MEMBERLIST, (100, 20, 80, 80), LBS_STD])
template.append([130, "&Parameters", -1, (190, 10, 62, 9), SS_STD | win32con.SS_LEFT])
template.append([131, None, self.IDC_PARAMLIST, (190, 20, 75, 80), LBS_STD])
lvStyle = SS_STD | commctrl.LVS_REPORT | commctrl.LVS_AUTOARRANGE | commctrl.LVS_ALIGNLEFT | win32con.WS_BORDER | win32con.WS_TABSTOP
template.append(["SysListView32", "", self.IDC_LISTVIEW, (10, 110, 255, 65), lvStyle])
return template
if __name__=='__main__':
import sys
fname = None
try:
fname = sys.argv[1]
except:
pass
dlg = TypeBrowseDialog(fname)
try:
win32api.GetConsoleTitle()
dlg.DoModal()
except:
dlg.CreateWindow(win32ui.GetMainFrame())
| bsd-3-clause | 6,818,348,952,618,188,000 | 31.209877 | 135 | 0.700013 | false | 2.741506 | false | false | false |
our-city-app/oca-backend | src/shop/handlers.py | 1 | 26782 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import binascii
import datetime
import json
import logging
import os
import time
import urllib
import webapp2
from babel import Locale
from google.appengine.api import search, users as gusers
from google.appengine.ext import db
from google.appengine.ext.deferred import deferred
from google.appengine.ext.webapp import template
from markdown import Markdown
from mcfw.cache import cached
from mcfw.consts import MISSING
from mcfw.exceptions import HttpNotFoundException
from mcfw.restapi import rest, GenericRESTRequestHandler
from mcfw.rpc import serialize_complex_value, arguments, returns
from rogerthat.bizz.communities.communities import get_communities_by_country, get_community, get_community_countries
from rogerthat.bizz.friends import user_code_by_hash, makeFriends, ORIGIN_USER_INVITE
from rogerthat.bizz.registration import get_headers_for_consent
from rogerthat.bizz.service import SERVICE_LOCATION_INDEX, re_index_map_only
from rogerthat.bizz.session import create_session
from rogerthat.dal.app import get_app_by_id
from rogerthat.exceptions.login import AlreadyUsedUrlException, InvalidUrlException, ExpiredUrlException
from rogerthat.models import ProfilePointer, ServiceProfile
from rogerthat.pages.legal import DOC_TERMS_SERVICE, get_current_document_version, get_version_content, \
get_legal_language, LANGUAGES as LEGAL_LANGUAGES
from rogerthat.pages.login import SetPasswordHandler
from rogerthat.rpc import users
from rogerthat.rpc.service import BusinessException
from rogerthat.settings import get_server_settings
from rogerthat.templates import get_languages_from_request
from rogerthat.to import ReturnStatusTO, RETURNSTATUS_TO_SUCCESS, WarningReturnStatusTO
from rogerthat.translations import DEFAULT_LANGUAGE
from rogerthat.utils import bizz_check, try_or_defer, get_country_code_by_ipaddress
from rogerthat.utils.app import get_app_id_from_app_user
from rogerthat.utils.cookie import set_cookie
from rogerthat.utils.service import create_service_identity_user
from shop import SHOP_JINJA_ENVIRONMENT
from shop.bizz import create_customer_signup, complete_customer_signup, get_organization_types, \
update_customer_consents, get_customer_signup, validate_customer_url_data, \
get_customer_consents
from shop.business.permissions import is_admin
from shop.constants import OFFICIALLY_SUPPORTED_LANGUAGES
from shop.models import Customer
from shop.to import CompanyTO, CustomerTO, CustomerLocationTO
from shop.view import get_shop_context, get_current_http_host
from solution_server_settings import get_solution_server_settings
from solutions import translate
from solutions.common.bizz.grecaptcha import recaptcha_verify
from solutions.common.bizz.settings import get_consents_for_community
from solutions.common.integrations.cirklo.cirklo import get_whitelisted_merchant
from solutions.common.integrations.cirklo.models import CirkloMerchant, CirkloCity
from solutions.common.markdown_newtab import NewTabExtension
from solutions.common.models import SolutionServiceConsent
from solutions.common.restapi.services import do_create_service
from solutions.common.to.settings import PrivacySettingsGroupTO
class StaticFileHandler(webapp2.RequestHandler):
def get(self, filename):
cur_path = os.path.dirname(__file__)
path = os.path.join(cur_path, u'html', filename)
with open(path, 'r') as f:
self.response.write(f.read())
class GenerateQRCodesHandler(webapp2.RequestHandler):
def get(self):
current_user = gusers.get_current_user()
if not is_admin(current_user):
self.abort(403)
path = os.path.join(os.path.dirname(__file__), 'html', 'generate_qr_codes.html')
context = get_shop_context()
self.response.out.write(template.render(path, context))
class CustomerMapHandler(webapp2.RequestHandler):
def get(self, app_id):
path = os.path.join(os.path.dirname(__file__), 'html', 'customer_map.html')
settings = get_server_settings()
lang = get_languages_from_request(self.request)[0]
translations = {
'merchants': translate(lang, 'merchants'),
'merchants_with_terminal': translate(lang, 'merchants_with_terminal'),
'community_services': translate(lang, 'community_services'),
'care': translate(lang, 'care'),
'associations': translate(lang, 'associations'),
}
params = {
'maps_key': settings.googleMapsKey,
'app_id': app_id,
'translations': json.dumps(translations)
}
self.response.out.write(template.render(path, params))
@cached(2, 21600)
@returns(unicode)
@arguments(app_id=unicode)
def get_customer_locations_for_app(app_id):
query_string = (u'app_ids:"%s"' % app_id)
query = search.Query(query_string=query_string,
options=search.QueryOptions(returned_fields=['service', 'name', 'location', 'description'],
limit=1000))
search_result = search.Index(name=SERVICE_LOCATION_INDEX).search(query)
customers = {customer.service_email: customer for customer in Customer.list_by_app_id(app_id)}
def map_result(service_search_result):
customer_location = CustomerLocationTO()
for field in service_search_result.fields:
if field.name == 'service':
customer = customers.get(field.value.split('/')[0])
if customer:
customer_location.address = customer.address1
customer_location.type = customer.organization_type
if customer.address2:
customer_location.address += '\n%s' % customer.address2
if customer.zip_code or customer.city:
customer_location.address += '\n'
if customer.zip_code:
customer_location.address += customer.zip_code
if customer.zip_code and customer.city:
customer_location.address += ' '
if customer.city:
customer_location.address += customer.city
else:
customer_location.type = ServiceProfile.ORGANIZATION_TYPE_PROFIT
continue
if field.name == 'name':
customer_location.name = field.value
continue
if field.name == 'location':
customer_location.lat = field.value.latitude
customer_location.lon = field.value.longitude
continue
if field.name == 'description':
customer_location.description = field.value
continue
return customer_location
return json.dumps(serialize_complex_value([map_result(r) for r in search_result.results], CustomerLocationTO, True))
class CustomerMapServicesHandler(webapp2.RequestHandler):
def get(self, app_id):
customer_locations = get_customer_locations_for_app(app_id)
self.response.write(customer_locations)
@rest('/unauthenticated/loyalty/scanned', 'get', read_only_access=True, authenticated=False)
@returns(ReturnStatusTO)
@arguments(user_email_hash=unicode, merchant_email=unicode, app_id=unicode)
def rest_loyalty_scanned(user_email_hash, merchant_email, app_id):
try:
bizz_check(user_email_hash is not MISSING, 'user_email_hash is required')
bizz_check(merchant_email is not MISSING, 'merchant_email is required')
bizz_check(app_id is not MISSING, 'app_id is required')
user_code = user_code_by_hash(binascii.unhexlify(user_email_hash))
profile_pointer = ProfilePointer.get(user_code)
if not profile_pointer:
logging.debug('No ProfilePointer found with user_code %s', user_code)
raise BusinessException('User not found')
app_user = profile_pointer.user
bizz_check(get_app_by_id(app_id), 'App not found')
bizz_check(app_id == get_app_id_from_app_user(profile_pointer.user), 'Invalid user email hash')
merchant_found = False
for customer in Customer.list_by_user_email(merchant_email):
merchant_found = True
service_user = users.User(customer.service_email)
logging.info('Received loyalty scan of %s by %s (%s)', app_user, service_user, customer.user_email)
makeFriends(service_user, app_user, None, None, ORIGIN_USER_INVITE,
notify_invitee=False,
notify_invitor=False,
allow_unsupported_apps=True)
bizz_check(merchant_found, 'Merchant not found')
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
else:
return RETURNSTATUS_TO_SUCCESS
class PublicPageHandler(webapp2.RequestHandler):
@property
def language(self):
return get_languages_from_request(self.request)[0]
def translate(self, key, **kwargs):
return translate(self.language, key, **kwargs)
def render(self, template_name, **params):
if not params.get('language'):
params['language'] = self.language
routes = ['signin', 'signup', 'reset_password', 'set_password']
for route_name in routes:
url = self.url_for(route_name)
params[route_name + '_url'] = url
template_path = 'public/%s.html' % template_name
return SHOP_JINJA_ENVIRONMENT.get_template(template_path).render(params)
def return_error(self, message, **kwargs):
translated_message = self.translate(message, **kwargs)
self.response.out.write(self.render('error', message=translated_message))
def dispatch(self):
if users.get_current_user():
return self.redirect('/')
return super(PublicPageHandler, self).dispatch()
class CustomerSigninHandler(PublicPageHandler):
def get(self, app_id=None):
self.response.write(self.render('signin'))
class CustomerSignupHandler(PublicPageHandler):
def get(self):
language = (self.request.get('language') or self.language).split('_')[0]
if language not in LEGAL_LANGUAGES:
language = DEFAULT_LANGUAGE
solution_server_settings = get_solution_server_settings()
version = get_current_document_version(DOC_TERMS_SERVICE)
legal_language = get_legal_language(language)
countries = get_community_countries()
selected_country = get_country_code_by_ipaddress(os.environ.get('HTTP_X_FORWARDED_FOR', None))
if selected_country:
communities = get_communities_by_country(selected_country)
else:
communities = []
params = {
'recaptcha_site_key': solution_server_settings.recaptcha_site_key,
'email_verified': False,
'toc_content': get_version_content(legal_language, DOC_TERMS_SERVICE, version),
'language': language.lower(),
'languages': [(code, name) for code, name in OFFICIALLY_SUPPORTED_LANGUAGES.iteritems()
if code in LEGAL_LANGUAGES],
'countries': [(country, Locale(language, country).get_territory_name()) for country in countries],
'communities': communities,
'selected_country': selected_country,
'signup_success': json.dumps(self.render('signup_success', language=language))
}
self.response.write(self.render('signup', **params))
class CustomerSignupPasswordHandler(PublicPageHandler):
def get(self):
data = self.request.get('data')
email = self.request.get('email').rstrip('.')
params = {
'email': email,
'data': data,
'language': self.language,
'error': None,
}
self.response.write(self.render('signup_setpassword', **params))
def post(self):
json_data = json.loads(self.request.body)
email = json_data.get('email')
data = json_data.get('data')
password = json_data.get('password', '')
password_confirm = json_data.get('password_confirm')
error = None
try:
signup, _ = get_customer_signup(email, data) # type: CustomerSignup, dict
except ExpiredUrlException:
error = self.translate('link_expired', action='')
except AlreadyUsedUrlException:
error = self.translate('link_is_already_used', action='')
except InvalidUrlException:
error = self.translate('invalid_url')
if len(password) < 8:
error = self.translate('password_length_error', length=8)
elif password != password_confirm:
error = self.translate('password_match_error')
if not error:
tos_version = get_current_document_version(DOC_TERMS_SERVICE)
result = do_create_service(signup.city_customer, signup.language, True, signup, password, tos_version=tos_version)
if result.success:
service_email = result.data['service_email']
deferred.defer(complete_customer_signup, email, data, service_email)
try:
# Sleep to allow datastore indexes to update
time.sleep(2)
secret, _ = create_session(users.User(signup.company_email), ignore_expiration=True, cached=False)
server_settings = get_server_settings()
set_cookie(self.response, server_settings.cookieSessionName, secret)
except:
logging.error("Failed to create session", exc_info=True)
else:
result = WarningReturnStatusTO.create(False, error)
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(result.to_dict()))
class CustomerResetPasswordHandler(PublicPageHandler):
def get(self):
self.response.out.write(self.render('reset_password'))
class CustomerSetPasswordHandler(PublicPageHandler, SetPasswordHandler):
"""Inherit PublicPageHandler first to override SetPasswordHandler return_error()"""
def get(self):
email = self.request.get('email')
data = self.request.get('data')
try:
parsed_data = self.parse_and_validate_data(email, data)
except ExpiredUrlException as e:
return self.return_error("link_expired", action=e.action)
except AlreadyUsedUrlException as e:
return self.return_error("link_is_already_used", action=e.action)
except InvalidUrlException:
return self.return_error('invalid_url')
params = {
'name': parsed_data['n'],
'email': email,
'action': parsed_data['a'],
'data': data,
}
self.response.out.write(self.render('set_password', **params))
def post(self):
super(CustomerSetPasswordHandler, self).post()
@rest('/unauthenticated/osa/customer/signup', 'post', read_only_access=True, authenticated=False)
@returns(ReturnStatusTO)
@arguments(city_customer_id=(int, long), company=CompanyTO, customer=CustomerTO, recaptcha_token=unicode,
email_consents=dict)
def customer_signup(city_customer_id, company, customer, recaptcha_token, email_consents=None):
try:
headers = get_headers_for_consent(GenericRESTRequestHandler.getCurrentRequest())
create_customer_signup(city_customer_id, company, customer, recaptcha_token,
domain=get_current_http_host(with_protocol=True), headers=headers, accept_missing=True)
headers = get_headers_for_consent(GenericRESTRequestHandler.getCurrentRequest())
consents = email_consents or {}
context = u'User signup'
try_or_defer(update_customer_consents, customer.user_email, consents, headers, context)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
def parse_euvat_address_eu(address):
address = address.strip().splitlines()
zc_ci = address.pop()
zip_code, city = zc_ci.split(' ', 1)
address1 = address.pop(0) if len(address) > 0 else ''
address2 = address.pop(0) if len(address) > 0 else ''
return address1, address2, zip_code, city
@rest('/unauthenticated/osa/signup/community-info/<community_id:[^/]+>', 'get', read_only_access=True,
authenticated=False)
@returns(dict)
@arguments(community_id=(int, long), language=unicode)
def get_customer_info(community_id, language=None):
community = get_community(community_id)
if not community:
raise HttpNotFoundException('Community not found')
if not language:
request = GenericRESTRequestHandler.getCurrentRequest()
language = get_languages_from_request(request)[0]
customer = Customer.get_by_service_email(community.main_service) # type: Customer
organization_types = dict(get_organization_types(customer, community.default_app, language))
return {
'customer': {
'id': customer.id,
},
'organization_types': organization_types
}
@rest('/unauthenticated/osa/signup/communities/<country_code:[^/]+>', 'get', read_only_access=True, authenticated=False)
@returns([dict])
@arguments(country_code=unicode)
def api_get_communities(country_code):
return [{'name': community.name, 'id': community.id} for community in get_communities_by_country(country_code)]
@rest('/unauthenticated/osa/signup/privacy-settings/<community_id:[^/]+>', 'get', read_only_access=True,
authenticated=False)
@returns([PrivacySettingsGroupTO])
@arguments(community_id=(int, long), language=unicode)
def get_privacy_settings(community_id, language=None):
if not language:
request = GenericRESTRequestHandler.getCurrentRequest()
language = get_languages_from_request(request)[0]
return get_consents_for_community(community_id, language, [])
class CustomerCirkloAcceptHandler(PublicPageHandler):
def get_url(self, customer):
url_params = urllib.urlencode({'cid': customer.id})
return '/customers/consent/cirklo?{}'.format(url_params)
def dispatch(self):
# Don't redirect to dashboard when logged in
return super(PublicPageHandler, self).dispatch()
def get(self):
customer_id = self.request.get('cid')
if customer_id:
try:
customer = Customer.get_by_id(long(customer_id))
except:
return self.return_error('invalid_url')
else:
email = self.request.get('email')
data = self.request.get('data')
try:
data = validate_customer_url_data(email, data)
except InvalidUrlException:
return self.return_error('invalid_url')
customer = db.get(data['s']) # Customer
if not customer:
return self.abort(404)
consents = get_customer_consents(customer.user_email)
should_accept = False
if SolutionServiceConsent.TYPE_CITY_CONTACT not in consents.types:
consents.types.append(SolutionServiceConsent.TYPE_CITY_CONTACT)
should_accept = True
if SolutionServiceConsent.TYPE_CIRKLO_SHARE not in consents.types:
consents.types.append(SolutionServiceConsent.TYPE_CIRKLO_SHARE)
should_accept = True
params = {
'cirklo_accept_url': self.get_url(customer),
'should_accept': should_accept
}
self.response.out.write(self.render('cirklo_accept', **params))
def post(self):
try:
customer_id = self.request.get('cid')
customer = Customer.get_by_id(long(customer_id)) # type: Customer
if not customer:
raise Exception('Customer not found')
except:
self.redirect('/')
return
consents = get_customer_consents(customer.user_email)
should_put_consents = False
if SolutionServiceConsent.TYPE_CITY_CONTACT not in consents.types:
consents.types.append(SolutionServiceConsent.TYPE_CITY_CONTACT)
should_put_consents = True
if SolutionServiceConsent.TYPE_CIRKLO_SHARE not in consents.types:
consents.types.append(SolutionServiceConsent.TYPE_CIRKLO_SHARE)
should_put_consents = True
if should_put_consents:
consents.put()
community = get_community(customer.community_id)
city_id = CirkloCity.get_by_service_email(community.main_service).city_id
service_user_email = customer.service_user.email()
cirklo_merchant_key = CirkloMerchant.create_key(service_user_email)
cirklo_merchant = cirklo_merchant_key.get() # type: CirkloMerchant
if not cirklo_merchant:
cirklo_merchant = CirkloMerchant(key=cirklo_merchant_key) # type: CirkloMerchant
cirklo_merchant.denied = False
logging.debug('Creating new cirklo merchant')
cirklo_merchant.creation_date = datetime.datetime.utcfromtimestamp(customer.creation_time)
cirklo_merchant.service_user_email = service_user_email
cirklo_merchant.customer_id = customer.id
cirklo_merchant.city_id = city_id
cirklo_merchant.data = None
cirklo_merchant.populate_from_cirklo(get_whitelisted_merchant(city_id, customer.user_email))
cirklo_merchant.put()
logging.debug('Saving cirklo merchant: %s', cirklo_merchant)
service_identity_user = create_service_identity_user(customer.service_user)
try_or_defer(re_index_map_only, service_identity_user)
else:
logging.debug('Not saving cirklo merchant, consents:', consents)
self.redirect(self.get_url(customer))
class VouchersCirkloSignupHandler(PublicPageHandler):
def get(self, city_id=''):
supported_languages = ["nl", "fr"]
language = (self.request.get('language') or self.language).split('_')[0].lower()
cities = []
if city_id and city_id != 'staging':
city = CirkloCity.create_key(city_id).get()
if city:
cities = [city]
if not cities:
if city_id and city_id == 'staging':
cities = [city for city in CirkloCity.list_signup_enabled() if city.city_id.startswith('staging-')]
else:
cities = [city for city in CirkloCity.list_signup_enabled() if not city.city_id.startswith('staging-')]
solution_server_settings = get_solution_server_settings()
if language not in supported_languages:
language = supported_languages[0]
if language == 'fr':
sorted_cities = sorted(cities, key=lambda x: x.signup_names.fr)
else:
sorted_cities = sorted(cities, key=lambda x: x.signup_names.nl)
params = {
'city_id': city_id or None,
'cities': sorted_cities,
'recaptcha_site_key': solution_server_settings.recaptcha_site_key,
'language': language,
'languages': [(code, name) for code, name in OFFICIALLY_SUPPORTED_LANGUAGES.iteritems()
if code in supported_languages]
}
md = Markdown(output='html', extensions=['nl2br', NewTabExtension()])
lines = [
'#### %s' % translate(language, 'cirklo_info_title'),
'<br />',
translate(language, 'cirklo_info_text_signup'),
'',
translate(language, 'cirklo_participation_text_signup'),
]
params['privacy_settings'] = {
'cirklo': {
'label': translate(language, 'consent_cirklo_share'),
'description': md.convert('\n\n'.join(lines))
},
'city': {
'label': translate(language, 'consent_city_contact'),
'description': '<h4>%s</h4>' % translate(language, 'consent_share_with_city')
}
}
params['signup_success'] = md.convert('\n\n'.join([translate(language, 'cirklo.signup.success')]))
self.response.write(self.render('cirklo_signup', **params))
def post(self):
json_data = json.loads(self.request.body)
logging.debug(json_data)
if not recaptcha_verify(json_data['recaptcha_token']):
logging.debug('Cannot verify recaptcha response')
self.abort(400)
if not CirkloCity.create_key(json_data['city_id']).get():
logging.debug('CirkloCity was invalid')
self.abort(400)
self.response.headers['Content-Type'] = 'text/json'
whitelisted_merchant = get_whitelisted_merchant(json_data['city_id'], json_data['company']['email'])
if whitelisted_merchant:
logging.debug('email found in cirklo db')
else:
cirklo_merchant = CirkloMerchant.get_by_city_id_and_email(json_data['city_id'], json_data['company']['email'])
if cirklo_merchant:
logging.debug('email found in osa db')
whitelisted_merchant = True
if whitelisted_merchant:
return self.response.out.write(json.dumps({
'success': False,
'errormsg': translate(json_data['language'], 'cirklo.email_already_used')
}))
merchant = CirkloMerchant()
merchant.service_user_email = None
merchant.customer_id = -1
merchant.city_id = json_data['city_id']
merchant.data = {
u'company': json_data['company'],
u'language': json_data['language']
}
merchant.emails = [json_data['company']['email']]
merchant.populate_from_cirklo(None)
merchant.denied = False
merchant.put()
self.response.headers['Content-Type'] = 'text/json'
return self.response.out.write(json.dumps({'success': True, 'errormsg': None}))
| apache-2.0 | -555,640,803,217,862,100 | 41.8512 | 126 | 0.647786 | false | 3.885391 | false | false | false |
foursquare/pants | src/python/pants/backend/python/tasks/wrapped_pex.py | 1 | 1760 | # coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from builtins import object
from copy import copy
logger = logging.getLogger(__name__)
class WrappedPEX(object):
"""Wrapper around a PEX that exposes only its run() method.
Allows us to set the PEX_PATH in the environment when running.
"""
_PEX_PATH_ENV_VAR_NAME = 'PEX_PATH'
def __init__(self, pex, extra_pex_paths=None):
"""
:param pex: The main pex we wrap.
:param extra_pex_paths: Other pexes, to "merge" in via the PEX_PATH mechanism.
"""
self._pex = pex
self._extra_pex_paths = extra_pex_paths
@property
def interpreter(self):
return self._pex._interpreter
def path(self):
return self._pex.path()
def cmdline(self, args=()):
cmdline = ' '.join(self._pex.cmdline(args))
pex_path = self._pex_path()
if pex_path:
return '{env_var_name}={pex_path} {cmdline}'.format(env_var_name=self._PEX_PATH_ENV_VAR_NAME,
pex_path=pex_path,
cmdline=cmdline)
else:
return cmdline
def run(self, *args, **kwargs):
env = copy(kwargs.pop('env', {}))
pex_path = self._pex_path()
if pex_path:
env[self._PEX_PATH_ENV_VAR_NAME] = pex_path
logger.debug('Executing WrappedPEX using: {}'.format(self.cmdline(args=tuple(*args))))
return self._pex.run(*args, env=env, **kwargs)
def _pex_path(self):
if self._extra_pex_paths:
return ':'.join(self._extra_pex_paths)
else:
return None
| apache-2.0 | 2,174,291,343,092,144,000 | 27.387097 | 99 | 0.615909 | false | 3.499006 | false | false | false |
tgquintela/pySpatialTools | pySpatialTools/Retrieve/tools_retriever.py | 1 | 4869 |
"""
Retriever tools
---------------
Tools to use retrievers
"""
import numpy as np
from pySpatialTools.Discretization import _discretization_parsing_creation
from retrievers import BaseRetriever
###############################################################################
############################# Create aggretriever #############################
###############################################################################
def create_aggretriever(aggregation_info):
"""This function works to aggregate a retriever following the instructions
input in the aggregation_info variable. It returns an instance of a
retriever object to be appended in the collection manager list of
retrievers.
Parameters
----------
aggregation_info: tuple
the information to create a retriever aggregation.
Returns
-------
ret_out: pst.BaseRetriever
the retriever instance.
"""
## 0. Preparing inputs
assert(type(aggregation_info) == tuple)
disc_info, _, retriever_out, agg_info = aggregation_info
assert(type(agg_info) == tuple)
assert(len(agg_info) == 2)
aggregating_ret, _ = agg_info
## 1. Computing retriever_out
locs, regs, disc = _discretization_parsing_creation(disc_info)
ret_out = aggregating_ret(retriever_out, locs, regs, disc)
assert(isinstance(ret_out, BaseRetriever))
return ret_out
###############################################################################
################### Candidates to aggregating_out functions ###################
###############################################################################
def dummy_implicit_outretriver(retriever_out, locs, regs, disc):
"""Dummy implicit outretriever creation. It only maps the common output
to a regs discretized space.
Parameters
----------
retriever_out: class (pst.BaseRetriever)
the retriever object.
locs: list, np.ndarray or other
the spatial information of the retrievable elements.
regs: np.ndarray
the assigned region for each of the retrievable spatial elements.
disc: pst.BaseDiscretizor
a discretizor.
Returns
-------
ret_out: pst.BaseRetriever
the retriever instance.
"""
## Assert inputs
assert(type(retriever_out) == tuple)
assert(isinstance(retriever_out[0], object))
## Preparing function output and pars_ret
def m_out(self, i_locs, neighs_info):
neighs, dists = neighs_info
for i in range(len(neighs)):
for nei in range(len(neighs[i])):
neighs[i][nei] = regs[neighs[i][nei]]
return neighs, dists
pars_ret = {}
if len(retriever_out) == 2:
pars_ret = retriever_out[1]
pars_ret['output_map'] = m_out
## Instantiation
ret_out = retriever_out[0](locs, **pars_ret)
assert(isinstance(ret_out, BaseRetriever))
return ret_out
def dummy_explicit_outretriver(retriever_out, locs, regs, disc):
"""Dummy explicit outretriever creation. It computes a regiondistances
between each regions.
Parameters
----------
retriever_out: tuple (class (pst.BaseRetriever), dict, function)
the retriever information.
locs: list, np.ndarray or other
the spatial information of the retrievable elements.
regs: np.ndarray
the assigned region for each of the retrievable spatial elements.
disc: pst.BaseDiscretizor
a discretizor.
Returns
-------
ret_out: pst.BaseRetriever
the retriever instance.
"""
## Assert inputs
assert(type(retriever_out) == tuple)
assert(isinstance(retriever_out[0], object))
pars_ret = {}
if len(retriever_out) == 2:
pars_ret = retriever_out[1]
main_mapper = retriever_out[2](retriever_out[3])
ret_out = retriever_out[0](main_mapper, **pars_ret)
assert(isinstance(ret_out, BaseRetriever))
return ret_out
def avgregionlocs_outretriever(retriever_out, locs, regs, disc):
"""Retriever creation for avg region locations. It retrieves the
prototype of the region, the average location of the region each one
belong.
Parameters
----------
retriever_out: class (pst.BaseRetriever)
the retriever object.
locs: list, np.ndarray or other
the spatial information of the retrievable elements.
regs: np.ndarray
the assigned region for each of the retrievable spatial elements.
disc: pst.BaseDiscretizor
a discretizor.
Returns
-------
ret_out: pst.BaseRetriever
the retriever instance.
"""
u_regs = np.unique(regs)
avg_locs = np.zeros((len(u_regs), locs.shape[1]))
for i in xrange(len(u_regs)):
avg_locs[i] = np.mean(locs[regs == regs[i]], axis=0)
ret_out = retriever_out[0](avg_locs, **retriever_out[1])
return ret_out
| mit | -7,693,449,400,527,166,000 | 30.211538 | 79 | 0.600945 | false | 3.870429 | false | false | false |
leanlyne/ShootColorX | cocos2d/tools/project-creator/create_project.py | 13 | 2021 | #!/usr/bin/python
#coding=utf-8
"""****************************************************************************
Copyright (c) 2013 cocos2d-x.org
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************"""
import sys
def commandCreate():
from module.core import CocosProject
project = CocosProject()
name, package, language, path = project.checkParams()
project.createPlatformProjects(name, package, language, path)
# ------------ main --------------
if __name__ == '__main__':
"""
There have double ways to create cocos project.
1.UI
2.console
#create_project.py --help
#create_project.py -n MyGame -k com.MyCompany.AwesomeGame -l javascript -p c:/mycompany
"""
if len(sys.argv)==1:
try:
from module.ui import createTkCocosDialog
createTkCocosDialog()
except ImportError:
commandCreate()
else:
commandCreate()
| mit | 5,378,159,879,600,896,000 | 37.132075 | 95 | 0.65809 | false | 4.541573 | false | false | false |
SUNET/eduid-webapp | src/eduid_webapp/signup/tests/test_app.py | 1 | 22920 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 NORDUnet A/S
# Copyright (c) 2018-2019 SUNET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import json
from contextlib import contextmanager
from typing import Any, Dict, Mapping, Optional
from mock import patch
from eduid_common.api.testing import EduidAPITestCase
from eduid_userdb.exceptions import UserOutOfSync
from eduid_userdb.signup import SignupUser
from eduid_webapp.signup.app import SignupApp, signup_init_app
from eduid_webapp.signup.verifications import send_verification_mail
class SignupTests(EduidAPITestCase):
app: SignupApp
def setUp(self):
super(SignupTests, self).setUp(copy_user_to_private=True)
def load_app(self, config: Mapping[str, Any]) -> SignupApp:
"""
Called from the parent class, so we can provide the appropriate flask
app for this test case.
"""
return signup_init_app(name='signup', test_config=config)
def update_config(self, config: Dict[str, Any]) -> Dict[str, Any]:
config.update(
{
'available_languages': {'en': 'English', 'sv': 'Svenska'},
'signup_authn_url': '/services/authn/signup-authn',
'signup_url': 'https://localhost/',
'dashboard_url': 'https://localhost/',
'development': 'DEBUG',
'application_root': '/',
'log_level': 'DEBUG',
'password_length': 10,
'vccs_url': 'http://turq:13085/',
'tou_version': '2018-v1',
'tou_url': 'https://localhost/get-tous',
'default_finish_url': 'https://www.eduid.se/',
'recaptcha_public_key': 'XXXX',
'recaptcha_private_key': 'XXXX',
'students_link': 'https://www.eduid.se/index.html',
'technicians_link': 'https://www.eduid.se/tekniker.html',
'staff_link': 'https://www.eduid.se/personal.html',
'faq_link': 'https://www.eduid.se/faq.html',
'celery_config': {
'result_backend': 'amqp',
'task_serializer': 'json',
'mongo_uri': config['mongo_uri'],
},
'environment': 'dev',
}
)
return config
# parameterized test methods
@patch('eduid_webapp.signup.views.verify_recaptcha')
@patch('eduid_common.api.mail_relay.MailRelay.sendmail')
def _captcha_new(
self,
mock_sendmail: Any,
mock_recaptcha: Any,
data1: Optional[dict] = None,
email: str = '[email protected]',
recaptcha_return_value: bool = True,
add_magic_cookie: bool = False,
):
"""
:param data1: to control the data POSTed to the /trycaptcha endpoint
:param email: the email to use for registration
:param recaptcha_return_value: to mock captcha verification failure
:param add_magic_cookie: add magic cookie to the trycaptcha request
"""
mock_sendmail.return_value = True
mock_recaptcha.return_value = recaptcha_return_value
with self.session_cookie_anon(self.browser) as client:
with client.session_transaction() as sess:
with self.app.test_request_context():
data = {
'email': email,
'recaptcha_response': 'dummy',
'tou_accepted': True,
'csrf_token': sess.get_csrf_token(),
}
if data1 is not None:
data.update(data1)
if add_magic_cookie:
client.set_cookie(
'localhost', key=self.app.conf.magic_cookie_name, value=self.app.conf.magic_cookie
)
return client.post('/trycaptcha', data=json.dumps(data), content_type=self.content_type_json)
@patch('eduid_webapp.signup.views.verify_recaptcha')
@patch('eduid_common.api.mail_relay.MailRelay.sendmail')
def _resend_email(
self, mock_sendmail: Any, mock_recaptcha: Any, data1: Optional[dict] = None, email: str = '[email protected]'
):
"""
Trigger re-sending an email with a verification code.
:param data1: to control the data POSTed to the resend-verification endpoint
:param email: what email address to use
"""
mock_sendmail.return_value = True
mock_recaptcha.return_value = True
with self.session_cookie_anon(self.browser) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'email': email, 'csrf_token': sess.get_csrf_token()}
if data1 is not None:
data.update(data1)
return client.post('/resend-verification', data=json.dumps(data), content_type=self.content_type_json)
@patch('eduid_webapp.signup.views.verify_recaptcha')
@patch('eduid_common.api.mail_relay.MailRelay.sendmail')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('vccs_client.VCCSClient.add_credentials')
def _verify_code(
self,
mock_add_credentials: Any,
mock_request_user_sync: Any,
mock_sendmail: Any,
mock_recaptcha: Any,
code: str = '',
email: str = '[email protected]',
):
"""
Test the verification link sent by email
:param code: the code to use
:param email: the email address to use
"""
mock_add_credentials.return_value = True
mock_request_user_sync.return_value = True
mock_sendmail.return_value = True
mock_recaptcha.return_value = True
with self.session_cookie_anon(self.browser) as client:
with client.session_transaction():
with self.app.test_request_context():
# lower because we are purposefully calling it with a mixed case mail address in tests
send_verification_mail(email.lower())
signup_user = self.app.private_userdb.get_user_by_pending_mail_address(email)
code = code or signup_user.pending_mail_address.verification_code
return client.get('/verify-link/' + code)
@patch('eduid_webapp.signup.views.verify_recaptcha')
@patch('eduid_common.api.mail_relay.MailRelay.sendmail')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('vccs_client.VCCSClient.add_credentials')
def _verify_code_after_captcha(
self,
mock_add_credentials: Any,
mock_request_user_sync: Any,
mock_sendmail: Any,
mock_recaptcha: Any,
data1: Optional[dict] = None,
email: str = '[email protected]',
):
"""
Verify the pending account with an emailed verification code after creating the account by verifying the captcha.
:param data1: to control the data sent to the trycaptcha endpoint
:param email: what email address to use
"""
mock_add_credentials.return_value = True
mock_request_user_sync.return_value = True
mock_sendmail.return_value = True
mock_recaptcha.return_value = True
with self.session_cookie_anon(self.browser) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {
'email': email,
'recaptcha_response': 'dummy',
'tou_accepted': True,
'csrf_token': sess.get_csrf_token(),
}
if data1 is not None:
data.update(data1)
client.post('/trycaptcha', data=json.dumps(data), content_type=self.content_type_json)
if data1 is None:
# lower because we are purposefully calling it with a mixed case mail address in tests
send_verification_mail(email.lower())
signup_user = self.app.private_userdb.get_user_by_pending_mail_address(email)
response = client.get('/verify-link/' + signup_user.pending_mail_address.verification_code)
return json.loads(response.data)
@patch('eduid_webapp.signup.views.verify_recaptcha')
@patch('eduid_common.api.mail_relay.MailRelay.sendmail')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('vccs_client.VCCSClient.add_credentials')
def _get_code_backdoor(
self,
mock_add_credentials: Any,
mock_request_user_sync: Any,
mock_sendmail: Any,
mock_recaptcha: Any,
email: str,
):
"""
Test getting the generated verification code through the backdoor
"""
mock_add_credentials.return_value = True
mock_request_user_sync.return_value = True
mock_sendmail.return_value = True
mock_recaptcha.return_value = True
with self.session_cookie_anon(self.browser) as client:
with client.session_transaction():
with self.app.test_request_context():
send_verification_mail(email)
client.set_cookie(
'localhost', key=self.app.conf.magic_cookie_name, value=self.app.conf.magic_cookie
)
return client.get(f'/get-code?email={email}')
def test_get_code_backdoor(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'dev'
email = '[email protected]'
resp = self._get_code_backdoor(email=email)
signup_user = self.app.private_userdb.get_user_by_pending_mail_address(email)
self.assertEqual(signup_user.pending_mail_address.verification_code, resp.data.decode('ascii'))
def test_get_code_no_backdoor_in_pro(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'pro'
email = '[email protected]'
resp = self._get_code_backdoor(email=email)
self.assertEqual(resp.status_code, 400)
def test_get_code_no_backdoor_misconfigured1(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = ''
self.app.conf.environment = 'dev'
email = '[email protected]'
resp = self._get_code_backdoor(email=email)
self.assertEqual(resp.status_code, 400)
def test_get_code_no_backdoor_misconfigured2(self):
self.app.conf.magic_cookie = ''
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'dev'
email = '[email protected]'
resp = self._get_code_backdoor(email=email)
self.assertEqual(resp.status_code, 400)
# actual tests
def test_captcha_new_user(self):
response = self._captcha_new()
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_SUCCESS')
self.assertEqual(data['payload']['next'], 'new')
def test_captcha_new_user_mixed_case(self):
response = self._captcha_new(email='[email protected]')
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_SUCCESS')
self.assertEqual(data['payload']['next'], 'new')
mixed_user: SignupUser = self.app.private_userdb.get_user_by_pending_mail_address('[email protected]')
lower_user: SignupUser = self.app.private_userdb.get_user_by_pending_mail_address('[email protected]')
assert mixed_user.eppn == lower_user.eppn
assert mixed_user.pending_mail_address.email == lower_user.pending_mail_address.email
def test_captcha_new_no_key(self):
self.app.conf.recaptcha_public_key = None
response = self._captcha_new()
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_FAIL')
self.assertEqual(data['payload']['message'], 'signup.recaptcha-not-verified')
def test_captcha_new_wrong_csrf(self):
data1 = {'csrf_token': 'wrong-token'}
response = self._captcha_new(data1=data1)
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_FAIL')
self.assertEqual(data['payload']['error']['csrf_token'], ['CSRF failed to validate'])
def test_captcha_existing_user(self):
response = self._captcha_new(email='[email protected]')
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_FAIL')
self.assertEqual(data['payload']['message'], 'signup.registering-address-used')
def test_captcha_existing_user_mixed_case(self):
response = self._captcha_new(email='[email protected]')
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_FAIL')
self.assertEqual(data['payload']['message'], 'signup.registering-address-used')
def test_captcha_remove_existing_signup_user(self):
response = self._captcha_new(email='[email protected]')
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_SUCCESS')
self.assertEqual(data['payload']['next'], 'new')
def test_captcha_remove_existing_signup_user_mixed_case(self):
response = self._captcha_new(email='[email protected]')
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_SUCCESS')
self.assertEqual(data['payload']['next'], 'new')
mixed_user: SignupUser = self.app.private_userdb.get_user_by_pending_mail_address('[email protected]')
lower_user: SignupUser = self.app.private_userdb.get_user_by_pending_mail_address('[email protected]')
assert mixed_user.eppn == lower_user.eppn
assert mixed_user.pending_mail_address.email == lower_user.pending_mail_address.email
def test_captcha_fail(self):
response = self._captcha_new(recaptcha_return_value=False)
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_FAIL')
def test_captcha_backdoor(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'dev'
response = self._captcha_new(recaptcha_return_value=False, add_magic_cookie=True)
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_SUCCESS')
def test_captcha_no_backdoor_in_pro(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'pro'
response = self._captcha_new(recaptcha_return_value=False, add_magic_cookie=True)
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_FAIL')
def test_captcha_no_backdoor_misconfigured1(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = ''
self.app.conf.environment = 'dev'
response = self._captcha_new(recaptcha_return_value=False, add_magic_cookie=True)
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_FAIL')
def test_captcha_no_backdoor_misconfigured2(self):
self.app.conf.magic_cookie = ''
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'dev'
response = self._captcha_new(recaptcha_return_value=False, add_magic_cookie=True)
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_FAIL')
def test_captcha_unsynced(self):
with patch('eduid_webapp.signup.helpers.save_and_sync_user') as mock_save:
mock_save.side_effect = UserOutOfSync('unsync')
response = self._captcha_new()
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_SUCCESS')
self.assertEqual(data['payload']['next'], 'new')
def test_captcha_no_data_fail(self):
with self.session_cookie_anon(self.browser) as client:
response = client.post('/trycaptcha')
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(data['error'], True)
self.assertEqual(data['type'], 'POST_SIGNUP_TRYCAPTCHA_FAIL')
self.assertIn('email', data['payload']['error'])
self.assertIn('csrf_token', data['payload']['error'])
self.assertIn('recaptcha_response', data['payload']['error'])
def test_resend_email(self):
response = self._resend_email()
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_RESEND_VERIFICATION_SUCCESS')
def test_resend_email_mixed_case(self):
response = self._resend_email(email='[email protected]')
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_RESEND_VERIFICATION_SUCCESS')
mixed_user: SignupUser = self.app.private_userdb.get_user_by_pending_mail_address('[email protected]')
lower_user: SignupUser = self.app.private_userdb.get_user_by_pending_mail_address('[email protected]')
assert mixed_user.eppn == lower_user.eppn
assert mixed_user.pending_mail_address.email == lower_user.pending_mail_address.email
def test_resend_email_wrong_csrf(self):
data1 = {'csrf_token': 'wrong-token'}
response = self._resend_email(data1=data1)
data = json.loads(response.data)
self.assertEqual(data['type'], 'POST_SIGNUP_RESEND_VERIFICATION_FAIL')
self.assertEqual(data['payload']['error']['csrf_token'], ['CSRF failed to validate'])
def test_verify_code(self):
response = self._verify_code()
data = json.loads(response.data)
self.assertEqual(data['type'], 'GET_SIGNUP_VERIFY_LINK_SUCCESS')
self.assertEqual(data['payload']['status'], 'verified')
def test_verify_code_mixed_case(self):
response = self._verify_code(email='[email protected]')
data = json.loads(response.data)
self.assertEqual(data['type'], 'GET_SIGNUP_VERIFY_LINK_SUCCESS')
self.assertEqual(data['payload']['status'], 'verified')
mixed_user: SignupUser = self.app.private_userdb.get_user_by_mail('[email protected]')
lower_user: SignupUser = self.app.private_userdb.get_user_by_mail('[email protected]')
assert mixed_user.eppn == lower_user.eppn
assert mixed_user.mail_addresses.primary.email == lower_user.mail_addresses.primary.email
def test_verify_code_unsynced(self):
with patch('eduid_webapp.signup.helpers.save_and_sync_user') as mock_save:
mock_save.side_effect = UserOutOfSync('unsync')
response = self._verify_code()
data = json.loads(response.data)
self.assertEqual(data['type'], 'GET_SIGNUP_VERIFY_LINK_FAIL')
self.assertEqual(data['payload']['message'], 'user-out-of-sync')
def test_verify_existing_email(self):
response = self._verify_code(email='[email protected]')
data = json.loads(response.data)
self.assertEqual(data['type'], 'GET_SIGNUP_VERIFY_LINK_FAIL')
self.assertEqual(data['payload']['status'], 'already-verified')
def test_verify_existing_email_mixed_case(self):
response = self._verify_code(email='[email protected]')
data = json.loads(response.data)
self.assertEqual(data['type'], 'GET_SIGNUP_VERIFY_LINK_FAIL')
self.assertEqual(data['payload']['status'], 'already-verified')
def test_verify_code_after_captcha(self):
data = self._verify_code_after_captcha()
self.assertEqual(data['type'], 'GET_SIGNUP_VERIFY_LINK_SUCCESS')
def test_verify_code_after_captcha_mixed_case(self):
data = self._verify_code_after_captcha(email='[email protected]')
self.assertEqual(data['type'], 'GET_SIGNUP_VERIFY_LINK_SUCCESS')
def test_verify_code_after_captcha_proofing_log_error(self):
from eduid_webapp.signup.verifications import ProofingLogFailure
with patch('eduid_webapp.signup.views.verify_email_code') as mock_verify:
mock_verify.side_effect = ProofingLogFailure('fail')
data = self._verify_code_after_captcha()
self.assertEqual(data['type'], 'GET_SIGNUP_VERIFY_LINK_FAIL')
self.assertEqual(data['payload']['message'], 'Temporary technical problems')
def test_verify_code_after_captcha_wrong_csrf(self):
with self.assertRaises(AttributeError):
data1 = {'csrf_token': 'wrong-token'}
self._verify_code_after_captcha(data1=data1)
def test_verify_code_after_captcha_dont_accept_tou(self):
with self.assertRaises(AttributeError):
data1 = {'tou_accepted': False}
self._verify_code_after_captcha(data1=data1)
| bsd-3-clause | -7,160,105,695,666,060,000 | 43.853229 | 121 | 0.633202 | false | 3.723802 | true | false | false |
itsbenweeks/orcoursetrion | orcoursetrion/lib/github.py | 2 | 17364 | # -*- coding: utf-8 -*-
"""
Github class for making needed API calls to github
"""
import base64
from itertools import chain
import shutil
import tempfile
import requests
import sh
CLONE_DIR = 'cloned_repo'
class GitHubException(Exception):
"""Base exception class others inherit."""
pass
class GitHubRepoExists(GitHubException):
"""Repo exists, and thus cannot be created."""
pass
class GitHubRepoDoesNotExist(GitHubException):
"""Repo does not exist, and therefore actions can't be taken on it."""
pass
class GitHubUnknownError(GitHubException):
"""Unexpected status code exception"""
pass
class GitHubNoTeamFound(GitHubException):
"""Name team not found in list"""
pass
class GitHub(object):
"""
API class for handling calls to github
"""
def __init__(self, api_url, oauth2_token):
"""Initialize a requests session for use with this class by
specifying the base API endpoint and key.
Args:
api_url (str): Github API URL such as https://api.github.com/
oauth2_token (str): Github OAUTH2 token for v3
"""
self.api_url = api_url
if not api_url.endswith('/'):
self.api_url += '/'
self.session = requests.Session()
# Add OAUTH2 token to session headers and set Agent
self.session.headers = {
'Authorization': 'token {0}'.format(oauth2_token),
'User-Agent': 'Orcoursetrion',
}
def _get_all(self, url):
"""Return all results from URL given (i.e. page through them)
Args:
url(str): Full github URL with results.
Returns:
list: List of items returned.
"""
results = None
response = self.session.get(url)
if response.status_code == 200:
results = response.json()
while (
response.links.get('next', False) and
response.status_code == 200
):
response = self.session.get(response.links['next']['url'])
results += response.json()
if response.status_code not in [200, 404]:
raise GitHubUnknownError(response.text)
return results
def _get_repo(self, org, repo):
"""Either return the repo dictionary, or None if it doesn't exists.
Args:
org (str): Organization the repo lives in.
repo (str): The name of the repo.
Raises:
requests.exceptions.RequestException
GitHubUnknownError
Returns:
dict or None: Repo dictionary from github
(https://developer.github.com/v3/repos/#get) or None if it
doesn't exist.
"""
repo_url = '{url}repos/{org}/{repo}'.format(
url=self.api_url,
org=org,
repo=repo
)
# Try and get the URL, if it 404's we are good, otherwise raise
repo_response = self.session.get(repo_url)
if repo_response.status_code == 200:
return repo_response.json()
if repo_response.status_code != 404:
raise GitHubUnknownError(repo_response.text)
def _find_team(self, org, team):
"""Find a team in an org by name, or raise.
Args:
org (str): Organization to create the repo in.
team (str): Team to find by name.
Raises:
GitHubUnknownError
GitHubNoTeamFound
Returns:
dict: Team dictionary
(https://developer.github.com/v3/orgs/teams/#response)
"""
list_teams_url = '{url}orgs/{org}/teams'.format(
url=self.api_url,
org=org
)
teams = self._get_all(list_teams_url)
if not teams:
raise GitHubUnknownError(
"No teams found in org. This shouldn't happen"
)
found_team = [
x for x in teams
if x['name'].strip().lower() == team.strip().lower()
]
if len(found_team) != 1:
raise GitHubNoTeamFound(
'{0} not in list of teams for {1}'.format(team, org)
)
found_team = found_team[0]
return found_team
def create_repo(self, org, repo, description):
"""Creates a new github repository or raises exceptions
Args:
org (str): Organization to create the repo in.
repo (str): Name of the repo to create.
description (str): Description of repo to use.
Raises:
GitHubRepoExists
GitHubUnknownError
requests.exceptions.RequestException
Returns:
dict: Github dictionary of a repo
(https://developer.github.com/v3/repos/#create)
"""
repo_dict = self._get_repo(org, repo)
if repo_dict is not None:
raise GitHubRepoExists('This repository already exists')
# Everything looks clean, create the repo.
create_url = '{url}orgs/{org}/repos'.format(
url=self.api_url,
org=org
)
payload = {
'name': repo,
'description': description,
'private': True,
}
repo_create_response = self.session.post(create_url, json=payload)
if repo_create_response.status_code != 201:
raise GitHubUnknownError(repo_create_response.text)
return repo_create_response.json()
def _create_team(self, org, team_name, read_only):
"""Internal function to create a team.
Args:
org (str): Organization to create the repo in.
team_name (str): Name of team to create.
read_only (bool): If false, read/write, if true read_only.
Raises:
GitHubUnknownError
requests.RequestException
Returns:
dict: Team dictionary
(https://developer.github.com/v3/orgs/teams/#response)
"""
if read_only:
permission = 'pull'
else:
permission = 'push'
create_url = '{url}orgs/{org}/teams'.format(
url=self.api_url,
org=org
)
response = self.session.post(create_url, json={
'name': team_name,
'permission': permission
})
if response.status_code != 201:
raise GitHubUnknownError(response.text)
return response.json()
def put_team(self, org, team_name, read_only, members):
"""Create a team in a github organization.
Utilize
https://developer.github.com/v3/orgs/teams/#list-teams,
https://developer.github.com/v3/orgs/teams/#create-team,
https://developer.github.com/v3/orgs/teams/#list-team-members,
https://developer.github.com/v3/orgs/teams/#add-team-membership,
and
https://developer.github.com/v3/orgs/teams/#remove-team-membership.
to create a team and/or replace an existing team's membership
with the ``members`` list.
Args:
org (str): Organization to create the repo in.
team_name (str): Name of team to create.
read_only (bool): If false, read/write, if true read_only.
members (list): List of github usernames to add to the
team. If none, membership changes won't occur
Raises:
GitHubUnknownError
requests.RequestException
Returns:
dict: The team dictionary
(https://developer.github.com/v3/orgs/teams/#response-1)
"""
# Disabling too-many-locals because I need them as a human to
# keep track of the sets going on here.
# pylint: disable=too-many-locals
try:
team_dict = self._find_team(org, team_name)
except GitHubNoTeamFound:
team_dict = self._create_team(org, team_name, read_only)
# Just get the team and exit if no members are given
if members is None:
return team_dict
# Have the team, now replace member list with the one we have
members_url = '{url}teams/{id}/members'.format(
url=self.api_url,
id=team_dict['id']
)
existing_members = self._get_all(members_url)
# Filter list of dicts down to just username list
existing_members = [x['login'] for x in existing_members]
# Grab everyone that should no longer be members
remove_members = dict(
[(x, False) for x in existing_members if x not in members]
)
# Grab everyone that should be added
add_members = dict(
[(x, True) for x in members if x not in existing_members]
)
# merge the dictionary of usernames dict with True to add,
# False to remove.
membership_dict = dict(
chain(remove_members.items(), add_members.items())
)
# Now do the adds and removes of membership to sync them
for member, add in membership_dict.items():
url = '{url}teams/{id}/memberships/{member}'.format(
url=self.api_url,
id=team_dict['id'],
member=member
)
if add:
response = self.session.put(url)
else:
response = self.session.delete(url)
if response.status_code not in [200, 204]:
raise GitHubUnknownError(
'Failed to add or remove {0}. Got: {1}'.format(
member, response.text
)
)
return team_dict
def add_team_repo(self, org, repo, team):
"""Add a repo to an existing team (by name) in the specified org.
We first look up the team to get its ID
(https://developer.github.com/v3/orgs/teams/#list-teams), and
then add the repo to that team
(https://developer.github.com/v3/orgs/teams/#add-team-repo).
Args:
org (str): Organization to create the repo in.
repo (str): Name of the repo to create.
team (str): Name of team to add.
Raises:
GitHubNoTeamFound
GitHubUnknownError
requests.exceptions.RequestException
"""
found_team = self._find_team(org, team)
team_repo_url = '{url}teams/{id}/repos/{org}/{repo}'.format(
url=self.api_url,
id=found_team['id'],
org=org,
repo=repo
)
response = self.session.put(team_repo_url)
if response.status_code != 204:
raise GitHubUnknownError(response.text)
def add_web_hook(self, org, repo, url):
"""Adds an active hook to a github repository.
This utilizes
https://developer.github.com/v3/repos/hooks/#create-a-hook to
create a form type Web hook that responds to push events
(basically all the defaults).
Args:
org (str): Organization to create the repo in.
repo (str): Name of the repo the hook will live in.
url (str): URL of the hook to add.
Raises:
GitHubUnknownError
requests.exceptions.RequestException
Returns:
dict: Github dictionary of a hook
(https://developer.github.com/v3/repos/hooks/#response-2)
"""
hook_url = '{url}repos/{org}/{repo}/hooks'.format(
url=self.api_url,
org=org,
repo=repo
)
payload = {
'name': 'web',
'active': True,
'config': {
'url': url,
}
}
response = self.session.post(hook_url, json=payload)
if response.status_code != 201:
raise GitHubUnknownError(response.text)
return response.json()
def delete_web_hooks(self, org, repo):
"""Delete all the Web hooks for a repository
Uses https://developer.github.com/v3/repos/hooks/#list-hooks
to get a list of all hooks, and then runs
https://developer.github.com/v3/repos/hooks/#delete-a-hook
to remove each of them.
Args:
org (str): Organization to create the repo in.
repo (str): Name of the repo to remove hooks from.
Raises:
GitHubUnknownError
GitHubRepoDoesNotExist
requests.exceptions.RequestException
Returns:
int: Number of hooks removed
"""
# Verify the repo exists first
repo_dict = self._get_repo(org, repo)
if repo_dict is None:
raise GitHubRepoDoesNotExist(
'Repo does not exist. Cannot remove hooks'
)
url = '{url}repos/{org}/{repo}/hooks'.format(
url=self.api_url,
org=org,
repo=repo
)
hooks = self._get_all(url)
num_hooks_removed = 0
for hook in hooks or []:
response = self.session.delete(hook['url'])
if response.status_code != 204:
raise GitHubUnknownError(response.text)
num_hooks_removed += 1
return num_hooks_removed
@staticmethod
def shallow_copy_repo(src_repo, dst_repo, committer, branch=None):
"""Copies one branch repo's contents to a new repo in the same
organization without history.
.. DANGER::
This will overwrite the destination repo's default branch and
rewrite its history.
The basic workflow is:
- Clone source repo
- Remove source repo ``.git`` folder
- Initialize as new git repo
- Set identity
- Add everything and commit
- Force push to destination repo
Args:
src_repo (str): Full git url to source repo.
dst_repo (str): Full git url to destination repo.
committer (dict): {'name': ..., 'email': ...} for the name
and e-mail to use in the initial commit of the
destination repo.
branch (str): Option branch, if not specified default is used.
Raises:
sh.ErrorReturnCode
Returns:
None
"""
# Disable member use because pylint doesn't get dynamic members
# pylint: disable=no-member
# Grab current working directory so we return after we are done
cwd = unicode(sh.pwd().rstrip('\n'))
tmp_dir = tempfile.mkdtemp(prefix='orc_git')
try:
sh.cd(tmp_dir)
if branch is None:
sh.git.clone(src_repo, CLONE_DIR, depth=1)
else:
sh.git.clone(src_repo, CLONE_DIR, depth=1, branch=branch)
sh.cd(CLONE_DIR)
shutil.rmtree('.git')
sh.git.init()
sh.git.config('user.email', committer['email'])
sh.git.config('user.name', committer['name'])
sh.git.remote.add.origin(dst_repo)
sh.git.add('.')
sh.git.commit(
m='Initial rerun copy by Orcoursetrion from {0}'.format(
src_repo
)
)
sh.git.push.origin.master(f=True)
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
sh.cd(cwd)
def add_repo_file(self, org, repo, committer, message, path, contents):
"""Adds the ``contents`` provided to the ``path`` in the repo
specified and committed by the ``commiter`` parameters
provided.
https://developer.github.com/v3/repos/contents/#create-a-file
.. NOTE::
This commits directly to the default branch of the repo.
Args:
org (str): Organization the repo lives in.
repo (str): The name of the repo.
committer (dict): {'name': ..., 'email': ...} for the name
and e-mail to use in the initial commit of the
destination repo.
message (str): Commit message to use for the addition.
path (str): The content path, i.e. ``docs/.gitignore``
contents (str): The actual string Contents of the file.
Raises:
requests.exceptions.RequestException
GitHubRepoDoesNotExist
GitHubUnknownError
Returns:
None
"""
repo_dict = self._get_repo(org, repo)
if repo_dict is None:
raise GitHubRepoDoesNotExist(
'Repo does not exist. Cannot add file'
)
url = '{url}repos/{org}/{repo}/contents/{path}'.format(
url=self.api_url,
org=org,
repo=repo,
path=path
)
payload = {
'message': message,
'committer': committer,
'content': base64.b64encode(contents).decode('ascii'),
}
response = self.session.put(url, json=payload)
if response.status_code != 201:
raise GitHubUnknownError(
'Failed to add contents to {org}/{repo}/{path}. '
'Got: {response}'.format(
org=org, repo=repo, path=path, response=response.text
)
)
| bsd-2-clause | 323,984,314,876,844,800 | 32.782101 | 75 | 0.552292 | false | 4.356247 | false | false | false |
vortex-ape/scikit-learn | sklearn/impute.py | 7 | 24042 | """Transformers for missing value imputation"""
# Authors: Nicolas Tresegnie <[email protected]>
# Sergey Feldman <[email protected]>
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import numpy.ma as ma
from scipy import sparse
from scipy import stats
from .base import BaseEstimator, TransformerMixin
from .utils import check_array
from .utils.sparsefuncs import _get_median
from .utils.validation import check_is_fitted
from .utils.validation import FLOAT_DTYPES
from .utils.fixes import _object_dtype_isnan
from .utils import is_scalar_nan
from .externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'MissingIndicator',
'SimpleImputer',
]
def _check_inputs_dtype(X, missing_values):
if (X.dtype.kind in ("f", "i", "u") and
not isinstance(missing_values, numbers.Real)):
raise ValueError("'X' and 'missing_values' types are expected to be"
" both numerical. Got X.dtype={} and "
" type(missing_values)={}."
.format(X.dtype, type(missing_values)))
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == missing_values."""
if is_scalar_nan(value_to_mask):
if X.dtype.kind == "f":
return np.isnan(X)
elif X.dtype.kind in ("i", "u"):
# can't have NaNs in integer array.
return np.zeros(X.shape, dtype=bool)
else:
# np.isnan does not work on object dtypes.
return _object_dtype_isnan(X)
else:
# X == value_to_mask with object dytpes does not always perform
# element-wise for old versions of numpy
return np.equal(X, value_to_mask)
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
with warnings.catch_warnings():
# stats.mode raises a warning when input array contains objects due
# to incapacity to detect NaNs. Irrelevant here since input array
# has already been NaN-masked.
warnings.simplefilter("ignore", RuntimeWarning)
mode = stats.mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# Ties the breaks. Copy the behaviour of scipy.stats.mode
if most_frequent_value < extra_value:
return most_frequent_value
else:
return extra_value
class SimpleImputer(BaseEstimator, TransformerMixin):
"""Imputation transformer for completing missing values.
Read more in the :ref:`User Guide <impute>`.
Parameters
----------
missing_values : number, string, np.nan (default) or None
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed.
strategy : string, optional (default="mean")
The imputation strategy.
- If "mean", then replace missing values using the mean along
each column. Can only be used with numeric data.
- If "median", then replace missing values using the median along
each column. Can only be used with numeric data.
- If "most_frequent", then replace missing using the most frequent
value along each column. Can be used with strings or numeric data.
- If "constant", then replace missing values with fill_value. Can be
used with strings or numeric data.
.. versionadded:: 0.20
strategy="constant" for fixed value imputation.
fill_value : string or numerical value, optional (default=None)
When strategy == "constant", fill_value is used to replace all
occurrences of missing_values.
If left to the default, fill_value will be 0 when imputing numerical
data and "missing_value" for strings or object data types.
verbose : integer, optional (default=0)
Controls the verbosity of the imputer.
copy : boolean, optional (default=True)
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, in the following cases,
a new copy will always be made, even if `copy=False`:
- If X is not an array of floating values;
- If X is encoded as a CSR matrix.
Attributes
----------
statistics_ : array of shape (n_features,)
The imputation fill value for each feature.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import SimpleImputer
>>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
>>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
... # doctest: +NORMALIZE_WHITESPACE
SimpleImputer(copy=True, fill_value=None, missing_values=nan,
strategy='mean', verbose=0)
>>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
>>> print(imp_mean.transform(X))
... # doctest: +NORMALIZE_WHITESPACE
[[ 7. 2. 3. ]
[ 4. 3.5 6. ]
[10. 3.5 9. ]]
Notes
-----
Columns which only contained missing values at `fit` are discarded upon
`transform` if strategy is not "constant".
"""
def __init__(self, missing_values=np.nan, strategy="mean",
fill_value=None, verbose=0, copy=True):
self.missing_values = missing_values
self.strategy = strategy
self.fill_value = fill_value
self.verbose = verbose
self.copy = copy
def _validate_input(self, X):
allowed_strategies = ["mean", "median", "most_frequent", "constant"]
if self.strategy not in allowed_strategies:
raise ValueError("Can only use these strategies: {0} "
" got strategy={1}".format(allowed_strategies,
self.strategy))
if self.strategy in ("most_frequent", "constant"):
dtype = None
else:
dtype = FLOAT_DTYPES
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
try:
X = check_array(X, accept_sparse='csc', dtype=dtype,
force_all_finite=force_all_finite, copy=self.copy)
except ValueError as ve:
if "could not convert" in str(ve):
raise ValueError("Cannot use {0} strategy with non-numeric "
"data. Received datatype :{1}."
"".format(self.strategy, X.dtype.kind))
else:
raise ve
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ("i", "u", "f", "O"):
raise ValueError("SimpleImputer does not support data with dtype "
"{0}. Please provide either a numeric array (with"
" a floating point or integer dtype) or "
"categorical data represented either as an array "
"with integer dtype or an array of string values "
"with an object dtype.".format(X.dtype))
return X
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : SimpleImputer
"""
X = self._validate_input(X)
# default fill_value is 0 for numerical input and "missing_value"
# otherwise
if self.fill_value is None:
if X.dtype.kind in ("i", "u", "f"):
fill_value = 0
else:
fill_value = "missing_value"
else:
fill_value = self.fill_value
# fill_value should be numerical in case of numerical input
if (self.strategy == "constant" and
X.dtype.kind in ("i", "u", "f") and
not isinstance(fill_value, numbers.Real)):
raise ValueError("'fill_value'={0} is invalid. Expected a "
"numerical value when imputing numerical "
"data".format(fill_value))
if sparse.issparse(X):
# missing_values = 0 not allowed with sparse data as it would
# force densification
if self.missing_values == 0:
raise ValueError("Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead.")
else:
self.statistics_ = self._sparse_fit(X,
self.strategy,
self.missing_values,
fill_value)
else:
self.statistics_ = self._dense_fit(X,
self.strategy,
self.missing_values,
fill_value)
return self
def _sparse_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on sparse data."""
mask_data = _get_mask(X.data, missing_values)
n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
statistics = np.empty(X.shape[1])
if strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
statistics.fill(fill_value)
else:
for i in range(X.shape[1]):
column = X.data[X.indptr[i]:X.indptr[i + 1]]
mask_column = mask_data[X.indptr[i]:X.indptr[i + 1]]
column = column[~mask_column]
# combine explicit and implicit zeros
mask_zeros = _get_mask(column, 0)
column = column[~mask_zeros]
n_explicit_zeros = mask_zeros.sum()
n_zeros = n_implicit_zeros[i] + n_explicit_zeros
if strategy == "mean":
s = column.size + n_zeros
statistics[i] = np.nan if s == 0 else column.sum() / s
elif strategy == "median":
statistics[i] = _get_median(column,
n_zeros)
elif strategy == "most_frequent":
statistics[i] = _most_frequent(column,
0,
n_zeros)
return statistics
def _dense_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on dense data."""
mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
median_masked = np.ma.median(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# scipy.stats.mstats.mode cannot be used because it will no work
# properly if the first element is masked and if its frequency
# is equal to the frequency of the most frequent valid element
# See https://github.com/scipy/scipy/issues/2636
# To be able access the elements by columns
X = X.transpose()
mask = mask.transpose()
if X.dtype.kind == "O":
most_frequent = np.empty(X.shape[0], dtype=object)
else:
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(np.bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
# Constant
elif strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
return np.full(X.shape[1], fill_value, dtype=X.dtype)
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
"""
check_is_fitted(self, 'statistics_')
X = self._validate_input(X)
statistics = self.statistics_
if X.shape[1] != statistics.shape[0]:
raise ValueError("X has %d features per sample, expected %d"
% (X.shape[1], self.statistics_.shape[0]))
# Delete the invalid columns if strategy is not constant
if self.strategy == "constant":
valid_statistics = statistics
else:
# same as np.isnan but also works for object dtypes
invalid_mask = _get_mask(statistics, np.nan)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.flatnonzero(valid_mask)
if invalid_mask.any():
missing = np.arange(X.shape[1])[invalid_mask]
if self.verbose:
warnings.warn("Deleting features without "
"observed values: %s" % missing)
X = X[:, valid_statistics_indexes]
# Do actual imputation
if sparse.issparse(X):
if self.missing_values == 0:
raise ValueError("Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead.")
else:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
np.diff(X.indptr))[mask]
X.data[mask] = valid_statistics[indexes].astype(X.dtype,
copy=False)
else:
mask = _get_mask(X, self.missing_values)
n_missing = np.sum(mask, axis=0)
values = np.repeat(valid_statistics, n_missing)
coordinates = np.where(mask.transpose())[::-1]
X[coordinates] = values
return X
class MissingIndicator(BaseEstimator, TransformerMixin):
"""Binary indicators for missing values.
Parameters
----------
missing_values : number, string, np.nan (default) or None
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed.
features : str, optional
Whether the imputer mask should represent all or a subset of
features.
- If "missing-only" (default), the imputer mask will only represent
features containing missing values during fit time.
- If "all", the imputer mask will represent all features.
sparse : boolean or "auto", optional
Whether the imputer mask format should be sparse or dense.
- If "auto" (default), the imputer mask will be of same type as
input.
- If True, the imputer mask will be a sparse matrix.
- If False, the imputer mask will be a numpy array.
error_on_new : boolean, optional
If True (default), transform will raise an error when there are
features with missing values in transform that have no missing values
in fit This is applicable only when ``features="missing-only"``.
Attributes
----------
features_ : ndarray, shape (n_missing_features,) or (n_features,)
The features indices which will be returned when calling ``transform``.
They are computed during ``fit``. For ``features='all'``, it is
to ``range(n_features)``.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import MissingIndicator
>>> X1 = np.array([[np.nan, 1, 3],
... [4, 0, np.nan],
... [8, 1, 0]])
>>> X2 = np.array([[5, 1, np.nan],
... [np.nan, 2, 3],
... [2, 4, 0]])
>>> indicator = MissingIndicator()
>>> indicator.fit(X1)
MissingIndicator(error_on_new=True, features='missing-only',
missing_values=nan, sparse='auto')
>>> X2_tr = indicator.transform(X2)
>>> X2_tr
array([[False, True],
[ True, False],
[False, False]])
"""
def __init__(self, missing_values=np.nan, features="missing-only",
sparse="auto", error_on_new=True):
self.missing_values = missing_values
self.features = features
self.sparse = sparse
self.error_on_new = error_on_new
def _get_missing_features_info(self, X):
"""Compute the imputer mask and the indices of the features
containing missing values.
Parameters
----------
X : {ndarray or sparse matrix}, shape (n_samples, n_features)
The input data with missing values. Note that ``X`` has been
checked in ``fit`` and ``transform`` before to call this function.
Returns
-------
imputer_mask : {ndarray or sparse matrix}, shape \
(n_samples, n_features) or (n_samples, n_features_with_missing)
The imputer mask of the original data.
features_with_missing : ndarray, shape (n_features_with_missing)
The features containing missing values.
"""
if sparse.issparse(X) and self.missing_values != 0:
mask = _get_mask(X.data, self.missing_values)
# The imputer mask will be constructed with the same sparse format
# as X.
sparse_constructor = (sparse.csr_matrix if X.format == 'csr'
else sparse.csc_matrix)
imputer_mask = sparse_constructor(
(mask, X.indices.copy(), X.indptr.copy()),
shape=X.shape, dtype=bool)
missing_values_mask = imputer_mask.copy()
missing_values_mask.eliminate_zeros()
features_with_missing = (
np.flatnonzero(np.diff(missing_values_mask.indptr))
if missing_values_mask.format == 'csc'
else np.unique(missing_values_mask.indices))
if self.sparse is False:
imputer_mask = imputer_mask.toarray()
elif imputer_mask.format == 'csr':
imputer_mask = imputer_mask.tocsc()
else:
if sparse.issparse(X):
# case of sparse matrix with 0 as missing values. Implicit and
# explicit zeros are considered as missing values.
X = X.toarray()
imputer_mask = _get_mask(X, self.missing_values)
features_with_missing = np.flatnonzero(imputer_mask.sum(axis=0))
if self.sparse is True:
imputer_mask = sparse.csc_matrix(imputer_mask)
return imputer_mask, features_with_missing
def fit(self, X, y=None):
"""Fit the transformer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : object
Returns self.
"""
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
X = check_array(X, accept_sparse=('csc', 'csr'),
force_all_finite=force_all_finite)
_check_inputs_dtype(X, self.missing_values)
self._n_features = X.shape[1]
if self.features not in ('missing-only', 'all'):
raise ValueError("'features' has to be either 'missing-only' or "
"'all'. Got {} instead.".format(self.features))
if not ((isinstance(self.sparse, six.string_types) and
self.sparse == "auto") or isinstance(self.sparse, bool)):
raise ValueError("'sparse' has to be a boolean or 'auto'. "
"Got {!r} instead.".format(self.sparse))
self.features_ = (self._get_missing_features_info(X)[1]
if self.features == 'missing-only'
else np.arange(self._n_features))
return self
def transform(self, X):
"""Generate missing values indicator for X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray or sparse matrix}, shape (n_samples, n_features)
The missing indicator for input data. The data type of ``Xt``
will be boolean.
"""
check_is_fitted(self, "features_")
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
X = check_array(X, accept_sparse=('csc', 'csr'),
force_all_finite=force_all_finite)
_check_inputs_dtype(X, self.missing_values)
if X.shape[1] != self._n_features:
raise ValueError("X has a different number of features "
"than during fitting.")
imputer_mask, features = self._get_missing_features_info(X)
if self.features == "missing-only":
features_diff_fit_trans = np.setdiff1d(features, self.features_)
if (self.error_on_new and features_diff_fit_trans.size > 0):
raise ValueError("The features {} have missing values "
"in transform but have no missing values "
"in fit.".format(features_diff_fit_trans))
if (self.features_.size > 0 and
self.features_.size < self._n_features):
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
def fit_transform(self, X, y=None):
"""Generate missing values indicator for X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray or sparse matrix}, shape (n_samples, n_features)
The missing indicator for input data. The data type of ``Xt``
will be boolean.
"""
return self.fit(X, y).transform(X)
| bsd-3-clause | -4,766,323,149,635,850,000 | 37.101426 | 79 | 0.550453 | false | 4.281745 | false | false | false |
asharahmed/artemis | scraper.py | 1 | 1205 | import requests
import os
shows=[]
page=str(requests.get("http://dramaonline.com").text.encode('ascii','ignore'))
for item in page.split("\n"):
if "title=" in item:
print ("Title tag in item {0}".format(item))
if "<a href=" in item:
print ("<a href= found")
episodepage=item.split('<a href="')[1].split('"')[0]
print "Episode page is {0}".format(episodepage)
episodename=item.split('title="')[1].split('"')[0]
print "Searching for vidrail link in {0}".format(episodename)
for line in str(requests.get(episodepage).text.encode('ascii','ignore')).split("\n"):
if "vidrail" in line:
print line
if 'src="http://www.vidrail' in line:
print "Vidrail link found in line {0}".format(line)
vidraillink=line.split('src="')[1].split('"')[0]
print "Vidrail link is {0}".format(vidraillink)
for line in str(requests.get(vidraillink).text).encode('ascii','utf8').split("\n"):
if ".mp4" in line:
print ".mp4 found in line {0}".format(line)
episodelink=line.split('src="')[1].split('"')[0]
print "Episode link is {0}".format(episodelink)
shows.append({episodename,episodelink})
f=open("shows.txt",'w')
f.write(str(shows))
f.close() | gpl-2.0 | -6,017,492,612,978,569,000 | 40.586207 | 88 | 0.638174 | false | 2.975309 | false | false | false |
twerp/django-admin-flexselect-py3 | test_app/models.py | 1 | 1141 | from django.db import models as m
from django.core.exceptions import ValidationError
"""
No changes to the models are needed to use flexselect.
"""
class Company(m.Model):
name = m.CharField(max_length=80)
def __str__(self):
return self.name
class CompanyContactPerson(m.Model):
company = m.ForeignKey(Company)
name = m.CharField(max_length=80)
email = m.EmailField()
def __str__(self):
return self.name
class Client(m.Model):
company = m.ForeignKey(Company)
name = m.CharField(max_length=80)
def __str__(self):
return self.name
class Case(m.Model):
client = m.ForeignKey(Client)
company_contact_person = m.ForeignKey(CompanyContactPerson)
def clean(self):
"""
Make sure that the company for client is the same as the company for
the company contact person.
"""
if not self.client.company == self.company_contact_person.company:
raise ValidationError('The clients and the contacts company does'
' not match.')
def __str__(self):
return 'Case: %d' % self.id
| cc0-1.0 | -4,173,985,098,094,928,400 | 23.276596 | 77 | 0.628396 | false | 4.017606 | false | false | false |
PyBossa/app-ushahidi | createTasks.py | 1 | 9261 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
import json
from optparse import OptionParser
import pbclient
import requests
def get_categories(url):
"""Gets Ushahidi categories from the server"""
url = url + "/api?task=categories"
r = requests.get(url)
data = r.json()
categories = data['payload']['categories']
return categories
def task_formatter(app_config, row, n_answers, categories):
"""
Creates tasks for the application
:arg integer app_id: Application ID in PyBossa.
:returns: Task ID in PyBossa.
:rtype: integer
"""
# Each row has the following format
# row[0] = INCIDENT ID,
# row[1] = INCIDENT TITLE,
# row[2] = INCIDENT DATE
# row[3] = LOCATION
# row[4] = DESCRIPTION
# row[5] = CATEGORY
# row[6] = LATITUDE
# row[7] = LONGITUDE
# row[8] = APPROVED
# row[9] = VERIFIED
incident = dict(id=row[0],
title=row[1],
date=row[2],
location=row[3],
description=row[4],
category=row[5],
latitude=row[6],
longitude=row[7],
approved=row[8],
verified=row[9])
categories = categories
return dict(question=app_config['question'],
n_answers=int(n_answers),
incident=incident,
categories=categories)
if __name__ == "__main__":
# Arguments for the application
usage = "usage: %prog [options]"
parser = OptionParser(usage)
# URL where PyBossa listens
parser.add_option("-s", "--server", dest="api_url",
help="PyBossa URL http://domain.com/", metavar="URL")
# API-KEY
parser.add_option("-k", "--api-key", dest="api_key",
help="PyBossa User API-KEY to interact with PyBossa",
metavar="API-KEY")
# Create App
parser.add_option("-c", "--create-app", action="store_true",
dest="create_app",
help="Create the application",
metavar="CREATE-APP")
# Update template for tasks and long_description for app
parser.add_option("-t", "--update-template", action="store_true",
dest="update_template",
help="Update Tasks template",
metavar="UPDATE-TEMPLATE")
# Update tasks question
parser.add_option("-q", "--update-tasks",
dest="update_tasks",
help="Update Tasks question",
metavar="UPDATE-TASKS")
parser.add_option("-x", "--extra-task", action="store_true",
dest="add_more_tasks",
help="Add more tasks",
metavar="ADD-MORE-TASKS")
# Modify the number of TaskRuns per Task
# (default 30)
parser.add_option("-n", "--number-answers",
dest="n_answers",
help="Number of answers per task",
metavar="N-ANSWERS")
parser.add_option("-u", "--ushahidi-server",
dest="ushahidi_server",
help="Ushahidi server",
metavar="Ushahidi server")
parser.add_option("-d", "--data",
dest="csv_file",
help="CSV file with incident reports to import",
metavar="CSV file")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose")
(options, args) = parser.parse_args()
# Load app details
try:
app_json = open('app.json')
app_config = json.load(app_json)
app_json.close()
except IOError as e:
print "app.json is missing! Please create a new one"
exit(0)
if not options.api_url:
options.api_url = 'http://localhost:5000/'
pbclient.set('endpoint', options.api_url)
if not options.api_key:
parser.error("You must supply an API-KEY to create an \
applicationa and tasks in PyBossa")
else:
pbclient.set('api_key', options.api_key)
if (options.create_app or options.add_more_tasks) and not options.ushahidi_server:
parser.error("You must supply the Ushahidi server from where you want \
to categorize the reports")
if (options.verbose):
print('Running against PyBosssa instance at: %s' % options.api_url)
print('Using API-KEY: %s' % options.api_key)
if not options.n_answers:
options.n_answers = 2
if options.create_app:
import csv
pbclient.create_app(app_config['name'],
app_config['short_name'],
app_config['description'])
app = pbclient.find_app(short_name=app_config['short_name'])[0]
app.long_description = open('long_description.html').read()
app.info['task_presenter'] = open('template.html').read()
app.info['thumbnail'] = app_config['thumbnail']
app.info['tutorial'] = open('tutorial.html').read()
categories = get_categories(options.ushahidi_server)
pbclient.update_app(app)
if not options.csv_file:
options.csv = 'ushahidi.csv'
with open(options.csv_file, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
# Each row has the following format
# # <- ID
# INCIDENT TITLE
# INCIDENT DATE
# LOCATION
# DESCRIPTION
# CATEGORY
# LATITUDE
# LONGITUDE
# APPROVED
# VERIFIED
for row in csvreader:
if row[0] != '#':
task_info = task_formatter(app_config, row,
options.n_answers,
categories)
pbclient.create_task(app.id, task_info)
else:
app = pbclient.find_app(short_name=app_config['short_name'])[0]
if options.add_more_tasks:
categories = get_categories(options.ushahidi_server)
import csv
if not options.csv_file:
options.csv_file = 'ushahidi.csv'
with open(options.csv_file, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
# Each row has the following format
# # <- ID
# INCIDENT TITLE
# INCIDENT DATE
# LOCATION
# DESCRIPTION
# CATEGORY
# LATITUDE
# LONGITUDE
# APPROVED
# VERIFIED
for row in csvreader:
if row[0] != 'tweetid':
task_info = task_formatter(app_config, row,
options.n_answers,
categories)
pbclient.create_task(app.id, task_info)
if options.update_template:
print "Updating app tutorial, description and task presenter..."
app = pbclient.find_app(short_name=app_config['short_name'])[0]
app.long_description = open('long_description.html').read()
app.info['task_presenter'] = open('template.html').read()
app.info['tutorial'] = open('tutorial.html').read()
app.info['thumbnail'] = app_config['thumbnail']
pbclient.update_app(app)
print "Done!"
if options.update_tasks:
print "Updating task n_answers"
app = pbclient.find_app(short_name=app_config['short_name'])[0]
n_tasks = 0
offset = 0
limit = 100
tasks = pbclient.get_tasks(app.id, offset=offset, limit=limit)
while tasks:
for task in tasks:
print "Updating task: %s" % task.id
if ('n_answers' in task.info.keys()):
del(task.info['n_answers'])
task.n_answers = int(options.update_tasks)
pbclient.update_task(task)
n_tasks += 1
offset = (offset + limit)
tasks = pbclient.get_tasks(app.id, offset=offset, limit=limit)
print "%s Tasks have been updated!" % n_tasks
if not options.create_app and not options.update_template\
and not options.add_more_tasks and not options.update_tasks:
parser.error("Please check --help or -h for the available options")
| agpl-3.0 | -5,847,963,015,010,918,000 | 36.192771 | 86 | 0.541518 | false | 4.152915 | true | false | false |
mapres21/Proyecto_LSST- | Curve_Star_1.py | 1 | 2733 | import numpy as np
import matplotlib.pyplot as plt
# Matriz que contiene los datos
Matriz = np.genfromtxt("Star_1.csv", delimiter=",")
# Vectores con cada una de las bandas y los errores
U_band = Matriz[:, 0]
G_band = Matriz[:, 1]
R_band = Matriz[:, 2]
I_band = Matriz[:, 3]
Z_band = Matriz[:, 4]
EU_band = Matriz[:, 5]
EG_band = Matriz[:, 6]
ER_band = Matriz[:, 7]
EI_band = Matriz[:, 8]
EZ_band = Matriz[:, 9]
U_mean = np.mean(U_band)
G_mean = np.mean(G_band)
R_mean = np.mean(R_band)
I_mean = np.mean(I_band)
Z_mean = np.mean(Z_band)
U_mean1 = 18.52
G_mean1 = 17.26
R_mean1 = 17.24
I_mean1 = 17.34
Z_mean1 = 17.39
x = np.arange(0, 43, 1)
# Grafica
plt.figure()
plt.plot(x, U_band, 'bo', label='Magnitude')
plt.hlines(y=U_mean, xmin=0, xmax=44, color='r', label='Found value')
plt.hlines(y=U_mean1, xmin=0, xmax=44, color='g', label='Mean value')
# plt.errorbar(x, U_band, yerr=EU_band, fmt='o', ecolor='b')
plt.ylabel("Magnitude")
plt.xlabel("Observation's night")
plt.title("U Band")
plt.grid()
plt.legend(loc='upper right')
plt.savefig("U.png")
plt.show(True)
plt.figure()
plt.plot(x, G_band, 'bo', label='Magnitude')
plt.hlines(y=G_mean, xmin=0, xmax=44, color='r', label='Found value')
plt.hlines(y=G_mean1, xmin=0, xmax=44, color='g', label='Mean value')
# plt.errorbar(x, U_band, yerr=EU_band, fmt='o', ecolor='b')
plt.ylabel("Magnitude")
plt.xlabel("Observation's night")
plt.title("G Band")
plt.grid()
plt.legend(loc='upper right')
plt.savefig("G.png")
plt.show(True)
plt.figure()
plt.plot(x, R_band, 'bo', label='Magnitude')
plt.hlines(y=R_mean, xmin=0, xmax=44, color='r', label='Found value')
plt.hlines(y=R_mean1, xmin=0, xmax=44, color='g', label='Mean value')
# plt.errorbar(x, U_band, yerr=EU_band, fmt='o', ecolor='b')
plt.ylabel("Magnitude")
plt.xlabel("Observation's night")
plt.title("R Band")
plt.grid()
plt.legend(loc='upper right')
plt.savefig("R.png")
plt.show(True)
plt.figure()
plt.plot(x, I_band, 'bo', label='Magnitude')
plt.hlines(y=I_mean, xmin=0, xmax=44, color='r', label='Found value')
plt.hlines(y=I_mean1, xmin=0, xmax=44, color='g', label='Mean value')
# plt.errorbar(x, U_band, yerr=EU_band, fmt='o', ecolor='b')
plt.ylabel("Magnitude")
plt.xlabel("Observation's night")
plt.title("I Band")
plt.grid()
plt.legend(loc='upper right')
plt.savefig("I.png")
plt.show(True)
plt.figure()
plt.plot(x, Z_band, 'bo', label='Magnitude')
plt.hlines(y=Z_mean, xmin=0, xmax=44, color='r', label='Found value')
plt.hlines(y=Z_mean1, xmin=0, xmax=44, color='g', label='Mean value')
# plt.errorbar(x, U_band, yerr=EU_band, fmt='o', ecolor='b')
plt.ylabel("Magnitude")
plt.xlabel("Observation's night")
plt.title("Z Band")
plt.grid()
plt.legend(loc='upper right')
plt.savefig("Z.png")
plt.show(True)
| gpl-3.0 | -3,486,409,329,770,050,600 | 26.606061 | 69 | 0.666667 | false | 2.333903 | false | true | false |
redsnapper8t8/django-cacheback | cacheback/base.py | 1 | 12027 | import time
import logging
from django.core.cache import cache
from django.conf import settings
from cacheback import tasks
logging.basicConfig()
logger = logging.getLogger('cacheback')
MEMCACHE_MAX_EXPIRATION = 2592000
class Job(object):
"""
A cached read job.
This is the core class for the package which is intended to be subclassed
to allow the caching behaviour to be customised.
"""
# All items are stored in memcache as a tuple (expiry, data). We don't use
# the TTL functionality within memcache but implement on own. If the
# expiry value is None, this indicates that there is already a job created
# for refreshing this item.
#: Default cache lifetime is 5 minutes. After this time, the result will
#: be considered stale and requests will trigger a job to refresh it.
lifetime = 600
#: Timeout period during which no new Celery tasks will be created for a
#: single cache item. This time should cover the normal time required to
#: refresh the cache.
refresh_timeout = 60
#: Time to store items in the cache. After this time, we will get a cache
#: miss which can lead to synchronous refreshes if you have
#: fetch_on_miss=True.
cache_ttl = MEMCACHE_MAX_EXPIRATION
#: Whether to perform a synchronous refresh when a result is missing from
#: the cache. Default behaviour is to do a synchronous fetch when the cache is empty.
#: Stale results are generally ok, but not no results.
fetch_on_miss = True
#: Whether to perform a synchronous refresh when a result is in the cache
#: but stale from. Default behaviour is never to do a synchronous fetch but
#: there will be times when an item is _too_ stale to be returned.
fetch_on_stale_threshold = None
#: Overrides options for `refresh_cache.apply_async` (e.g. `queue`).
task_options = {}
# --------
# MAIN API
# --------
def get(self, *raw_args, **raw_kwargs):
"""
Return the data for this function (using the cache if possible).
This method is not intended to be overidden
"""
# We pass args and kwargs through a filter to allow them to be
# converted into values that can be pickled.
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
# Build the cache key and attempt to fetch the cached item
key = self.key(*args, **kwargs)
item = cache.get(key)
if item is None:
# Cache MISS - we can either:
# a) fetch the data immediately, blocking execution until
# the fetch has finished, or
# b) trigger an async refresh and return an empty result
if self.should_missing_item_be_fetched_synchronously(*args, **kwargs):
logger.debug(("Job %s with key '%s' - cache MISS - running "
"synchronous refresh"),
self.class_path, key)
return self.refresh(*args, **kwargs)
else:
logger.debug(("Job %s with key '%s' - cache MISS - triggering "
"async refresh and returning empty result"),
self.class_path, key)
# To avoid cache hammering (ie lots of identical Celery tasks
# to refresh the same cache item), we reset the cache with an
# empty result which will be returned until the cache is
# refreshed.
empty = self.empty()
self.cache_set(key, self.timeout(*args, **kwargs), empty)
self.async_refresh(*args, **kwargs)
return empty
expiry, data = item
delta = time.time() - expiry
if delta > 0:
# Cache HIT but STALE expiry - we can either:
# a) fetch the data immediately, blocking execution until
# the fetch has finished, or
# b) trigger a refresh but allow the stale result to be
# returned this time. This is normally acceptable.
if self.should_stale_item_be_fetched_synchronously(
delta, *args, **kwargs):
logger.debug(
("Job %s with key '%s' - STALE cache hit - running "
"synchronous refresh"),
self.class_path, key)
return self.refresh(*args, **kwargs)
else:
logger.debug(
("Job %s with key '%s' - STALE cache hit - triggering "
"async refresh and returning stale result"),
self.class_path, key)
# We replace the item in the cache with a 'timeout' expiry - this
# prevents cache hammering but guards against a 'limbo' situation
# where the refresh task fails for some reason.
timeout = self.timeout(*args, **kwargs)
self.cache_set(key, timeout, data)
self.async_refresh(*args, **kwargs)
else:
logger.debug("Job %s with key '%s' - cache HIT", self.class_path, key)
return data
def invalidate(self, *raw_args, **raw_kwargs):
"""
Mark a cached item invalid and trigger an asynchronous
job to refresh the cache
"""
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
key = self.key(*args, **kwargs)
item = cache.get(key)
if item is not None:
expiry, data = item
self.cache_set(key, self.timeout(*args, **kwargs), data)
self.async_refresh(*args, **kwargs)
def delete(self, *raw_args, **raw_kwargs):
"""
Remove an item from the cache
"""
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
key = self.key(*args, **kwargs)
item = cache.get(key)
if item is not None:
cache.delete(key)
# --------------
# HELPER METHODS
# --------------
def prepare_args(self, *args):
return args
def prepare_kwargs(self, **kwargs):
return kwargs
def cache_set(self, key, expiry, data):
"""
Add a result to the cache
:key: Cache key to use
:expiry: The expiry timestamp after which the result is stale
:data: The data to cache
"""
cache.set(key, (expiry, data), self.cache_ttl)
if getattr(settings, 'CACHEBACK_VERIFY_CACHE_WRITE', True):
# We verify that the item was cached correctly. This is to avoid a
# Memcache problem where some values aren't cached correctly
# without warning.
__, cached_data = cache.get(key, (None, None))
if data is not None and cached_data is None:
raise RuntimeError(
"Unable to save data of type %s to cache" % (
type(data)))
def refresh(self, *args, **kwargs):
"""
Fetch the result SYNCHRONOUSLY and populate the cache
"""
result = self.fetch(*args, **kwargs)
self.cache_set(self.key(*args, **kwargs),
self.expiry(*args, **kwargs),
result)
return result
def async_refresh(self, *args, **kwargs):
"""
Trigger an asynchronous job to refresh the cache
"""
# We trigger the task with the class path to import as well as the
# (a) args and kwargs for instantiating the class
# (b) args and kwargs for calling the 'refresh' method
try:
tasks.refresh_cache.apply_async(
kwargs=dict(
klass_str=self.class_path,
obj_args=self.get_constructor_args(),
obj_kwargs=self.get_constructor_kwargs(),
call_args=args,
call_kwargs=kwargs
),
**self.task_options
)
except Exception, e:
# Handle exceptions from talking to RabbitMQ - eg connection
# refused. When this happens, we try to run the task
# synchronously.
logger.error("Unable to trigger task asynchronously - failing "
"over to synchronous refresh")
logger.exception(e)
try:
return self.refresh(*args, **kwargs)
except Exception, e:
# Something went wrong while running the task
logger.error("Unable to refresh data synchronously: %s", e)
logger.exception(e)
else:
logger.debug("Failover synchronous refresh completed successfully")
def get_constructor_args(self):
return ()
def get_constructor_kwargs(self):
"""
Return the kwargs that need to be passed to __init__ when
reconstructing this class.
"""
return {}
@property
def class_path(self):
return '%s.%s' % (self.__module__, self.__class__.__name__)
# Override these methods
def empty(self):
"""
Return the appropriate value for a cache MISS (and when we defer the
repopulation of the cache)
"""
return None
def expiry(self, *args, **kwargs):
"""
Return the expiry timestamp for this item.
"""
return time.time() + self.lifetime
def timeout(self, *args, **kwargs):
"""
Return the refresh timeout for this item
"""
return time.time() + self.refresh_timeout
def should_missing_item_be_fetched_synchronously(self, *args, **kwargs):
"""
Return whether to refresh an item synchronously when it is missing from
the cache
"""
return self.fetch_on_miss
def should_item_be_fetched_synchronously(self, *args, **kwargs):
import warnings
warnings.warn(
"The method 'should_item_be_fetched_synchronously' is deprecated "
"and will be removed in 0.5. Use "
"'should_missing_item_be_fetched_synchronously' instead.",
DeprecationWarning)
return self.should_missing_item_be_fetched_synchronously(
*args, **kwargs)
def should_stale_item_be_fetched_synchronously(self, delta, *args, **kwargs):
"""
Return whether to refresh an item synchronously when it is found in the
cache but stale
"""
if self.fetch_on_stale_threshold is None:
return False
return delta > (self.fetch_on_stale_threshold - self.lifetime)
def key(self, *args, **kwargs):
"""
Return the cache key to use.
If you're passing anything but primitive types to the ``get`` method,
it's likely that you'll need to override this method.
"""
if not args and not kwargs:
return self.class_path
try:
if args and not kwargs:
return "%s:%s" % (self.class_path, hash(args))
# The line might break if your passed values are un-hashable. If
# it does, you need to override this method and implement your own
# key algorithm.
return "%s:%s:%s:%s" % (self.class_path,
hash(args),
hash(tuple(kwargs.keys())),
hash(tuple(kwargs.values())))
except TypeError:
raise RuntimeError(
"Unable to generate cache key due to unhashable"
"args or kwargs - you need to implement your own"
"key generation method to avoid this problem")
def fetch(self, *args, **kwargs):
"""
Return the data for this job - this is where the expensive work should
be done.
"""
raise NotImplementedError()
| mit | 2,435,693,329,812,904,400 | 37.060127 | 90 | 0.565228 | false | 4.538491 | false | false | false |
lamontu/data-analysis | numpy/conditional_logical.py | 1 | 1289 | # -*- coding: utf-8 -*-
import numpy as np
import numpy.random as np_random
print("""
## zip(*iterables)
Make an iterator that aggregates elements from each of the iterables""")
zipped = zip([1, 2, 3], [4, 5, 6], [7, 8, 9])
print(zipped)
print(list(zipped))
print()
print("## Select elements from boolean array:")
x_arr = np.array([1.1, 1.2, 1.3, 1.4, 1.5])
y_arr = np.array([2.1, 2.2, 2.3, 2.4, 2.5])
cond = np.array([True, False, True, True, False])
result = [(x if c else y) for x, y, c in zip(x_arr, y_arr, cond)]
print(result)
print("### np.where(cond, x_arr, y_arr):")
print(np.where(cond, x_arr, y_arr))
print()
print("### examples using 'where':")
arr = np_random.randn(4, 4)
print(arr)
print(np.where(arr > 0, 2, -2))
print()
print("## nested where:")
cond_1 = np.array([True, False, True, True, False])
cond_2 = np.array([False, True, False, True, False])
print("### Using ordinary code:")
result = []
for i in range(len(cond)):
if cond_1[i] and cond_2[i]:
result.append(0)
elif cond_1[i]:
result.append(1)
elif cond_2[i]:
result.append(2)
else:
result.append(3)
print(result)
print("### Using NumPy code:")
result = np.where(cond_1 & cond_2, 0,
np.where(cond_1, 1, np.where(cond_2, 2, 3)))
print(result)
| gpl-3.0 | -6,410,535,431,654,735,000 | 25.854167 | 75 | 0.602017 | false | 2.646817 | false | false | false |
olekw/cyphesis | data/rulesets/deeds/scripts/world/traits/Nourishable.py | 2 | 2117 | import server
from atlas import Operation, Entity
# Nourishable entities that receive nourishment and increase their '_nutrients' value.
class Nourishable(server.Thing):
def nourish_operation(self, op):
# Get the mass of the contained arg, convert it to nutrient through _modifier_eat* properties,
# and increase the "_nutrients" property.
# Check any limits on the amount of nutrient we can contain in our stomach/reserves
# through the _nutrients_max prop too.
if len(op) > 0:
arg = op[0]
if hasattr(arg, 'mass'):
# print('mass {}'.format(arg.mass))
# Check if we can convert to nutrient through the _modifier_eat property.
# We also check if there are specific values for herbivores and omnivores
# (_modifier_consume_type_meat and _modifier_consume_type_plant)
consume_factor = 0
if self.props._modifier_eat:
consume_factor = self.props._modifier_eat
if hasattr(arg, 'consume_type'):
if self.props["_modifier_consume_type_" + arg.consume_type]:
consume_factor = self.props["_modifier_consume_type_" + arg.consume_type]
# print("consume factor {}".format(consume_factor))
if consume_factor != 0:
nutrient = 0
if self.props._nutrients:
nutrient = self.props._nutrients
nutrient_new = nutrient + (arg.mass * consume_factor)
# Check if there's a limit to the nutrient we can contain in our stomach
if self.props._nutrients_max_factor and self.props.mass:
nutrient_new = min(self.props._nutrients_max_factor * self.props.mass, nutrient_new)
if nutrient_new != nutrient:
return server.OPERATION_BLOCKED, \
Operation("set", Entity(self.id, _nutrients=nutrient_new), to=self)
return server.OPERATION_BLOCKED
| gpl-2.0 | 527,064,252,118,595,800 | 50.634146 | 108 | 0.57487 | false | 4.00189 | false | false | false |
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/tornadoredis/client.py | 1 | 54624 | # -*- coding: utf-8 -*-
import sys
from functools import partial
import collections
from collections import namedtuple, deque
import logging
import weakref
import datetime
import time as mod_time
from tornado.ioloop import IOLoop
from tornado import gen
from tornado import stack_context
from tornado.escape import to_unicode, to_basestring
from .exceptions import RequestError, ConnectionError, ResponseError
from .connection import Connection
log = logging.getLogger('tornadoredis.client')
Message = namedtuple('Message', ('kind', 'channel', 'body', 'pattern'))
PY3 = sys.version > '3'
class CmdLine(object):
def __init__(self, cmd, *args, **kwargs):
self.cmd = cmd
self.args = args
self.kwargs = kwargs
def __repr__(self):
return self.cmd + '(' + str(self.args) + ',' + str(self.kwargs) + ')'
def string_keys_to_dict(key_string, callback):
return dict([(key, callback) for key in key_string.split()])
def dict_merge(*dicts):
merged = {}
for d in dicts:
merged.update(d)
return merged
def reply_to_bool(r, *args, **kwargs):
return bool(r)
def make_reply_assert_msg(msg):
def reply_assert_msg(r, *args, **kwargs):
return r == msg
return reply_assert_msg
def reply_set(r, *args, **kwargs):
return set(r)
def reply_dict_from_pairs(r, *args, **kwargs):
return dict(zip(r[::2], r[1::2]))
def reply_str(r, *args, **kwargs):
return r or ''
def reply_int(r, *args, **kwargs):
return int(r) if r is not None else None
def reply_number(r, *args, **kwargs):
if r is not None:
num = float(r)
if not num.is_integer():
return num
else:
return int(num)
return None
def reply_datetime(r, *args, **kwargs):
return datetime.datetime.fromtimestamp(int(r))
def reply_pubsub_message(r, *args, **kwargs):
"""
Handles a Pub/Sub message and packs its data into a Message object.
"""
if len(r) == 3:
(kind, channel, body) = r
pattern = channel
elif len(r) == 4:
(kind, pattern, channel, body) = r
elif len(r) == 2:
(kind, channel) = r
body = pattern = None
else:
raise ValueError('Invalid number of arguments')
return Message(kind, channel, body, pattern)
def reply_zset(r, *args, **kwargs):
if r and 'WITHSCORES' in args:
return reply_zset_withscores(r, *args, **kwargs)
else:
return r
def reply_zset_withscores(r, *args, **kwargs):
return list(zip(r[::2], list(map(reply_number, r[1::2]))))
def reply_hmget(r, key, *fields, **kwargs):
return dict(list(zip(fields, r)))
def reply_info(response, *args):
info = {}
def get_value(value):
# Does this string contain subvalues?
if (',' not in value) or ('=' not in value):
return value
sub_dict = {}
for item in value.split(','):
k, v = item.split('=')
try:
sub_dict[k] = int(v)
except ValueError:
sub_dict[k] = v
return sub_dict
for line in response.splitlines():
line = line.strip()
if line and not line.startswith('#'):
key, value = line.split(':')
try:
info[key] = int(value)
except ValueError:
info[key] = get_value(value)
return info
def reply_ttl(r, *args, **kwargs):
return r != -1 and r or None
def reply_map(*funcs):
def reply_fn(r, *args, **kwargs):
if len(funcs) != len(r):
raise ValueError('more results than functions to map')
return [f(part) for f, part in zip(funcs, r)]
return reply_fn
def to_list(source):
if isinstance(source, str):
return [source]
else:
return list(source)
PUB_SUB_COMMANDS = (
'SUBSCRIBE',
'PSUBSCRIBE',
'UNSUBSCRIBE',
'PUNSUBSCRIBE',
# Not a command at all
'LISTEN',
)
REPLY_MAP = dict_merge(
string_keys_to_dict('AUTH BGREWRITEAOF BGSAVE DEL EXISTS '
'EXPIRE HDEL HEXISTS '
'HMSET MOVE PERSIST RENAMENX SISMEMBER SMOVE '
'SETEX SAVE SETNX MSET',
reply_to_bool),
string_keys_to_dict('BITCOUNT DECRBY GETBIT HLEN INCRBY LINSERT '
'LPUSHX RPUSHX SADD SCARD SDIFFSTORE SETBIT SETRANGE '
'SINTERSTORE STRLEN SUNIONSTORE SETRANGE',
reply_int),
string_keys_to_dict('FLUSHALL FLUSHDB SELECT SET SETEX '
'SHUTDOWN RENAME RENAMENX WATCH UNWATCH',
make_reply_assert_msg('OK')),
string_keys_to_dict('SMEMBERS SINTER SUNION SDIFF',
reply_set),
string_keys_to_dict('HGETALL BRPOP BLPOP',
reply_dict_from_pairs),
string_keys_to_dict('HGET',
reply_str),
string_keys_to_dict('SUBSCRIBE UNSUBSCRIBE LISTEN '
'PSUBSCRIBE UNSUBSCRIBE',
reply_pubsub_message),
string_keys_to_dict('ZRANK ZREVRANK',
reply_int),
string_keys_to_dict('ZCOUNT ZCARD',
reply_int),
string_keys_to_dict('ZRANGE ZRANGEBYSCORE ZREVRANGE '
'ZREVRANGEBYSCORE',
reply_zset),
string_keys_to_dict('ZSCORE ZINCRBY',
reply_number),
string_keys_to_dict('SCAN HSCAN SSCAN',
reply_map(reply_int, reply_set)),
{'HMGET': reply_hmget,
'PING': make_reply_assert_msg('PONG'),
'LASTSAVE': reply_datetime,
'TTL': reply_ttl,
'INFO': reply_info,
'MULTI_PART': make_reply_assert_msg('QUEUED'),
'TIME': lambda x: (int(x[0]), int(x[1])),
'ZSCAN': reply_map(reply_int, reply_zset_withscores)}
)
class Client(object):
# __slots__ = ('_io_loop', '_connection_pool', 'connection', 'subscribed',
# 'password', 'selected_db', '_pipeline', '_weak')
def __init__(self, host='localhost', port=6379, unix_socket_path=None,
password=None, selected_db=None, io_loop=None,
connection_pool=None):
self._io_loop = io_loop or IOLoop.current()
self._connection_pool = connection_pool
self._weak = weakref.proxy(self)
if connection_pool:
connection = (connection_pool
.get_connection(event_handler_ref=self._weak))
else:
connection = Connection(host=host, port=port,
unix_socket_path=unix_socket_path,
event_handler_proxy=self._weak,
io_loop=self._io_loop)
self.connection = connection
self.subscribed = set()
self.subscribe_callbacks = deque()
self.unsubscribe_callbacks = []
self.password = password
self.selected_db = selected_db or 0
self._pipeline = None
def __del__(self):
try:
connection = self.connection
pool = self._connection_pool
except AttributeError:
connection = None
pool = None
if connection:
if pool:
pool.release(connection)
connection.wait_until_ready()
else:
connection.disconnect()
def __repr__(self):
return 'tornadoredis.Client (db=%s)' % (self.selected_db)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
def __getattribute__(self, item):
"""
Bind methods to the weak proxy to avoid memory leaks
when bound method is passed as argument to the gen.Task
constructor.
"""
a = super(Client, self).__getattribute__(item)
try:
if isinstance(a, collections.Callable) and a.__self__:
try:
a = self.__class__.__dict__[item]
except KeyError:
a = Client.__dict__[item]
a = partial(a, self._weak)
except AttributeError:
pass
return a
def pipeline(self, transactional=False):
"""
Creates the 'Pipeline' to send multiple redis commands
in a single request.
Usage:
pipe = self.client.pipeline()
pipe.hset('foo', 'bar', 1)
pipe.expire('foo', 60)
yield gen.Task(pipe.execute)
or:
with self.client.pipeline() as pipe:
pipe.hset('foo', 'bar', 1)
pipe.expire('foo', 60)
yield gen.Task(pipe.execute)
"""
if not self._pipeline:
self._pipeline = Pipeline(
transactional=transactional,
selected_db=self.selected_db,
password=self.password,
io_loop=self._io_loop,
)
self._pipeline.connection = self.connection
return self._pipeline
def on_disconnect(self):
if self.subscribed:
self.subscribed = set()
raise ConnectionError("Socket closed on remote end")
#### connection
def connect(self):
if not self.connection.connected():
pool = self._connection_pool
if pool:
old_conn = self.connection
self.connection = pool.get_connection(event_handler_ref=self)
self.connection.ready_callbacks = old_conn.ready_callbacks
else:
self.connection.connect()
@gen.engine
def disconnect(self, callback=None):
"""
Disconnects from the Redis server.
"""
connection = self.connection
if connection:
pool = self._connection_pool
if pool:
pool.release(connection)
yield gen.Task(connection.wait_until_ready)
proxy = pool.make_proxy(client_proxy=self._weak,
connected=False)
self.connection = proxy
else:
self.connection.disconnect()
if callback:
callback(False)
#### formatting
def encode(self, value):
if not isinstance(value, str):
if not PY3 and isinstance(value, unicode):
value = value.encode('utf-8')
else:
value = str(value)
if PY3:
value = value.encode('utf-8')
return value
def format_command(self, *tokens, **kwargs):
cmds = []
for t in tokens:
e_t = self.encode(t)
e_t_s = to_basestring(e_t)
cmds.append('$%s\r\n%s\r\n' % (len(e_t), e_t_s))
return '*%s\r\n%s' % (len(tokens), ''.join(cmds))
def format_reply(self, cmd_line, data):
if cmd_line.cmd not in REPLY_MAP:
return data
try:
res = REPLY_MAP[cmd_line.cmd](data,
*cmd_line.args,
**cmd_line.kwargs)
except Exception as e:
raise ResponseError(
'failed to format reply to %s, raw data: %s; err message: %s'
% (cmd_line, data, e), cmd_line
)
return res
####
@gen.engine
def execute_command(self, cmd, *args, **kwargs):
result = None
execute_pending = cmd not in ('AUTH', 'SELECT')
callback = kwargs.get('callback', None)
if 'callback' in kwargs:
del kwargs['callback']
cmd_line = CmdLine(cmd, *args, **kwargs)
if callback and self.subscribed and cmd not in PUB_SUB_COMMANDS:
callback(RequestError(
'Executing non-Pub/Sub command while in subscribed state',
cmd_line))
return
n_tries = 2
while n_tries > 0:
n_tries -= 1
if not self.connection.connected():
self.connection.connect()
if not self.subscribed and not self.connection.ready():
yield gen.Task(self.connection.wait_until_ready)
if not self.subscribed and cmd not in ('AUTH', 'SELECT'):
if self.password and self.connection.info.get('pass', None) != self.password:
yield gen.Task(self.auth, self.password)
if self.selected_db and self.connection.info.get('db', 0) != self.selected_db:
yield gen.Task(self.select, self.selected_db)
command = self.format_command(cmd, *args, **kwargs)
try:
yield gen.Task(self.connection.write, command)
except Exception as e:
self.connection.disconnect()
if not n_tries:
raise e
else:
continue
listening = ((cmd in PUB_SUB_COMMANDS) or
(self.subscribed and cmd == 'PUBLISH'))
if listening:
result = True
execute_pending = False
break
else:
result = None
data = yield gen.Task(self.connection.readline)
if not data:
if not n_tries:
raise ConnectionError('no data received')
else:
resp = self.process_data(data, cmd_line)
if isinstance(resp, partial):
resp = yield gen.Task(resp)
result = self.format_reply(cmd_line, resp)
break
if execute_pending:
self.connection.execute_pending_command()
if callback:
callback(result)
@gen.engine
def _consume_bulk(self, tail, callback=None):
response = yield gen.Task(self.connection.read, int(tail) + 2)
if isinstance(response, Exception):
raise response
if not response:
raise ResponseError('EmptyResponse')
else:
response = to_unicode(response)
response = response[:-2]
callback(response)
def process_data(self, data, cmd_line):
data = to_basestring(data)
data = data[:-2] # strip \r\n
if data == '$-1':
response = None
elif data == '*0' or data == '*-1':
response = []
else:
head, tail = data[0], data[1:]
if head == '*':
return partial(self.consume_multibulk, int(tail), cmd_line)
elif head == '$':
return partial(self._consume_bulk, tail)
elif head == '+':
response = tail
elif head == ':':
response = int(tail)
elif head == '-':
if tail.startswith('ERR'):
tail = tail[4:]
response = ResponseError(tail, cmd_line)
else:
raise ResponseError('Unknown response type %s' % head,
cmd_line)
return response
@gen.engine
def consume_multibulk(self, length, cmd_line, callback=None):
tokens = []
while len(tokens) < length:
data = yield gen.Task(self.connection.readline)
if not data:
raise ResponseError(
'Not enough data in response to %s, accumulated tokens: %s'
% (cmd_line, tokens),
cmd_line)
token = self.process_data(data, cmd_line)
if isinstance(token, partial):
token = yield gen.Task(token)
tokens.append(token)
callback(tokens)
### MAINTENANCE
def bgrewriteaof(self, callback=None):
self.execute_command('BGREWRITEAOF', callback=callback)
def dbsize(self, callback=None):
self.execute_command('DBSIZE', callback=callback)
def flushall(self, callback=None):
self.execute_command('FLUSHALL', callback=callback)
def flushdb(self, callback=None):
self.execute_command('FLUSHDB', callback=callback)
def ping(self, callback=None):
self.execute_command('PING', callback=callback)
def object(self, infotype, key, callback=None):
self.execute_command('OBJECT', infotype, key, callback=callback)
def info(self, section_name=None, callback=None):
args = ('INFO', )
if section_name:
args += (section_name, )
self.execute_command(*args, callback=callback)
def echo(self, value, callback=None):
self.execute_command('ECHO', value, callback=callback)
def time(self, callback=None):
"""
Returns the server time as a 2-item tuple of ints:
(seconds since epoch, microseconds into this second).
"""
self.execute_command('TIME', callback=callback)
def select(self, db, callback=None):
self.selected_db = db
if self.connection.info.get('db', None) != db:
self.connection.info['db'] = db
self.execute_command('SELECT', '%s' % db, callback=callback)
elif callback:
callback(True)
def shutdown(self, callback=None):
self.execute_command('SHUTDOWN', callback=callback)
def save(self, callback=None):
self.execute_command('SAVE', callback=callback)
def bgsave(self, callback=None):
self.execute_command('BGSAVE', callback=callback)
def lastsave(self, callback=None):
self.execute_command('LASTSAVE', callback=callback)
def keys(self, pattern='*', callback=None):
self.execute_command('KEYS', pattern, callback=callback)
def auth(self, password, callback=None):
self.password = password
if self.connection.info.get('pass', None) != password:
self.connection.info['pass'] = password
self.execute_command('AUTH', password, callback=callback)
elif callback:
callback(True)
### BASIC KEY COMMANDS
def append(self, key, value, callback=None):
self.execute_command('APPEND', key, value, callback=callback)
def getrange(self, key, start, end, callback=None):
"""
Returns the substring of the string value stored at ``key``,
determined by the offsets ``start`` and ``end`` (both are inclusive)
"""
self.execute_command('GETRANGE', key, start, end, callback=callback)
def expire(self, key, ttl, callback=None):
self.execute_command('EXPIRE', key, ttl, callback=callback)
def expireat(self, key, when, callback=None):
"""
Sets an expire flag on ``key``. ``when`` can be represented
as an integer indicating unix time or a Python datetime.datetime object.
"""
if isinstance(when, datetime.datetime):
when = int(mod_time.mktime(when.timetuple()))
self.execute_command('EXPIREAT', key, when, callback=callback)
def ttl(self, key, callback=None):
self.execute_command('TTL', key, callback=callback)
def type(self, key, callback=None):
self.execute_command('TYPE', key, callback=callback)
def randomkey(self, callback=None):
self.execute_command('RANDOMKEY', callback=callback)
def rename(self, src, dst, callback=None):
self.execute_command('RENAME', src, dst, callback=callback)
def renamenx(self, src, dst, callback=None):
self.execute_command('RENAMENX', src, dst, callback=callback)
def move(self, key, db, callback=None):
self.execute_command('MOVE', key, db, callback=callback)
def persist(self, key, callback=None):
self.execute_command('PERSIST', key, callback=callback)
def pexpire(self, key, time, callback=None):
"""
Set an expire flag on key ``key`` for ``time`` milliseconds.
``time`` can be represented by an integer or a Python timedelta
object.
"""
if isinstance(time, datetime.timedelta):
ms = int(time.microseconds / 1000)
time = time.seconds + time.days * 24 * 3600 * 1000 + ms
self.execute_command('PEXPIRE', key, time, callback=callback)
def pexpireat(self, key, when, callback=None):
"""
Set an expire flag on key ``key``. ``when`` can be represented
as an integer representing unix time in milliseconds (unix time * 1000)
or a Python datetime.datetime object.
"""
if isinstance(when, datetime.datetime):
ms = int(when.microsecond / 1000)
when = int(mod_time.mktime(when.timetuple())) * 1000 + ms
self.execute_command('PEXPIREAT', key, when, callback=callback)
def pttl(self, key, callback=None):
"Returns the number of milliseconds until the key will expire"
self.execute_command('PTTL', key, callback=callback)
def substr(self, key, start, end, callback=None):
self.execute_command('SUBSTR', key, start, end, callback=callback)
def delete(self, *keys, **kwargs):
self.execute_command('DEL', *keys, callback=kwargs.get('callback'))
def set(self, key, value, expire=None, pexpire=None,
only_if_not_exists=False, only_if_exists=False, callback=None):
args = []
if expire is not None:
args.extend(("EX", expire))
if pexpire is not None:
args.extend(("PX", pexpire))
if only_if_not_exists and only_if_exists:
raise ValueError("only_if_not_exists and only_if_exists "
"cannot be true simultaneously")
if only_if_not_exists:
args.append("NX")
if only_if_exists:
args.append("XX")
self.execute_command('SET', key, value, *args, callback=callback)
def setex(self, key, ttl, value, callback=None):
self.execute_command('SETEX', key, ttl, value, callback=callback)
def setnx(self, key, value, callback=None):
self.execute_command('SETNX', key, value, callback=callback)
def setrange(self, key, offset, value, callback=None):
self.execute_command('SETRANGE', key, offset, value, callback=callback)
def strlen(self, key, callback=None):
self.execute_command('STRLEN', key, callback=callback)
def mset(self, mapping, callback=None):
items = [i for k, v in mapping.items() for i in (k, v)]
self.execute_command('MSET', *items, callback=callback)
def msetnx(self, mapping, callback=None):
items = [i for k, v in mapping.items() for i in (k, v)]
self.execute_command('MSETNX', *items, callback=callback)
def get(self, key, callback=None):
self.execute_command('GET', key, callback=callback)
def mget(self, keys, callback=None):
self.execute_command('MGET', *keys, callback=callback)
def getset(self, key, value, callback=None):
self.execute_command('GETSET', key, value, callback=callback)
def exists(self, key, callback=None):
self.execute_command('EXISTS', key, callback=callback)
def sort(self, key, start=None, num=None, by=None, get=None, desc=False,
alpha=False, store=None, callback=None):
if ((start is not None and num is None) or
(num is not None and start is None)):
raise ValueError("``start`` and ``num`` must both be specified")
tokens = [key]
if by is not None:
tokens.append('BY')
tokens.append(by)
if start is not None and num is not None:
tokens.append('LIMIT')
tokens.append(start)
tokens.append(num)
if get is not None:
tokens.append('GET')
tokens.append(get)
if desc:
tokens.append('DESC')
if alpha:
tokens.append('ALPHA')
if store is not None:
tokens.append('STORE')
tokens.append(store)
self.execute_command('SORT', *tokens, callback=callback)
def getbit(self, key, offset, callback=None):
self.execute_command('GETBIT', key, offset, callback=callback)
def setbit(self, key, offset, value, callback=None):
self.execute_command('SETBIT', key, offset, value, callback=callback)
def bitcount(self, key, start=None, end=None, callback=None):
args = [a for a in (key, start, end) if a is not None]
kwargs = {'callback': callback}
self.execute_command('BITCOUNT', *args, **kwargs)
def bitop(self, operation, dest, *keys, **kwargs):
"""
Perform a bitwise operation using ``operation`` between ``keys`` and
store the result in ``dest``.
"""
kwargs = {'callback': kwargs.get('callback', None)}
self.execute_command('BITOP', operation, dest, *keys, **kwargs)
### COUNTERS COMMANDS
def incr(self, key, callback=None):
self.execute_command('INCR', key, callback=callback)
def decr(self, key, callback=None):
self.execute_command('DECR', key, callback=callback)
def incrby(self, key, amount, callback=None):
self.execute_command('INCRBY', key, amount, callback=callback)
def incrbyfloat(self, key, amount=1.0, callback=None):
self.execute_command('INCRBYFLOAT', key, amount, callback=callback)
def decrby(self, key, amount, callback=None):
self.execute_command('DECRBY', key, amount, callback=callback)
### LIST COMMANDS
def blpop(self, keys, timeout=0, callback=None):
tokens = to_list(keys)
tokens.append(timeout)
self.execute_command('BLPOP', *tokens, callback=callback)
def brpop(self, keys, timeout=0, callback=None):
tokens = to_list(keys)
tokens.append(timeout)
self.execute_command('BRPOP', *tokens, callback=callback)
def brpoplpush(self, src, dst, timeout=1, callback=None):
tokens = [src, dst, timeout]
self.execute_command('BRPOPLPUSH', *tokens, callback=callback)
def lindex(self, key, index, callback=None):
self.execute_command('LINDEX', key, index, callback=callback)
def llen(self, key, callback=None):
self.execute_command('LLEN', key, callback=callback)
def lrange(self, key, start, end, callback=None):
self.execute_command('LRANGE', key, start, end, callback=callback)
def lrem(self, key, value, num=0, callback=None):
self.execute_command('LREM', key, num, value, callback=callback)
def lset(self, key, index, value, callback=None):
self.execute_command('LSET', key, index, value, callback=callback)
def ltrim(self, key, start, end, callback=None):
self.execute_command('LTRIM', key, start, end, callback=callback)
def lpush(self, key, *values, **kwargs):
callback = kwargs.get('callback', None)
self.execute_command('LPUSH', key, *values, callback=callback)
def lpushx(self, key, value, callback=None):
self.execute_command('LPUSHX', key, value, callback=callback)
def linsert(self, key, where, refvalue, value, callback=None):
self.execute_command('LINSERT', key, where, refvalue, value,
callback=callback)
def rpush(self, key, *values, **kwargs):
callback = kwargs.get('callback', None)
self.execute_command('RPUSH', key, *values, callback=callback)
def rpushx(self, key, value, **kwargs):
"Push ``value`` onto the tail of the list ``name`` if ``name`` exists"
callback = kwargs.get('callback', None)
self.execute_command('RPUSHX', key, value, callback=callback)
def lpop(self, key, callback=None):
self.execute_command('LPOP', key, callback=callback)
def rpop(self, key, callback=None):
self.execute_command('RPOP', key, callback=callback)
def rpoplpush(self, src, dst, callback=None):
self.execute_command('RPOPLPUSH', src, dst, callback=callback)
### SET COMMANDS
def sadd(self, key, *values, **kwargs):
callback = kwargs.get('callback', None)
self.execute_command('SADD', key, *values, callback=callback)
def srem(self, key, *values, **kwargs):
callback = kwargs.get('callback', None)
self.execute_command('SREM', key, *values, callback=callback)
def scard(self, key, callback=None):
self.execute_command('SCARD', key, callback=callback)
def spop(self, key, callback=None):
self.execute_command('SPOP', key, callback=callback)
def smove(self, src, dst, value, callback=None):
self.execute_command('SMOVE', src, dst, value, callback=callback)
def sismember(self, key, value, callback=None):
self.execute_command('SISMEMBER', key, value, callback=callback)
def smembers(self, key, callback=None):
self.execute_command('SMEMBERS', key, callback=callback)
def srandmember(self, key, number=None, callback=None):
if number:
self.execute_command('SRANDMEMBER', key, number, callback=callback)
else:
self.execute_command('SRANDMEMBER', key, callback=callback)
def sinter(self, keys, callback=None):
self.execute_command('SINTER', *keys, callback=callback)
def sdiff(self, keys, callback=None):
self.execute_command('SDIFF', *keys, callback=callback)
def sunion(self, keys, callback=None):
self.execute_command('SUNION', *keys, callback=callback)
def sinterstore(self, keys, dst, callback=None):
self.execute_command('SINTERSTORE', dst, *keys, callback=callback)
def sunionstore(self, keys, dst, callback=None):
self.execute_command('SUNIONSTORE', dst, *keys, callback=callback)
def sdiffstore(self, keys, dst, callback=None):
self.execute_command('SDIFFSTORE', dst, *keys, callback=callback)
### SORTED SET COMMANDS
def zadd(self, key, *score_value, **kwargs):
callback = kwargs.get('callback', None)
self.execute_command('ZADD', key, *score_value, callback=callback)
def zcard(self, key, callback=None):
self.execute_command('ZCARD', key, callback=callback)
def zincrby(self, key, value, amount, callback=None):
self.execute_command('ZINCRBY', key, amount, value, callback=callback)
def zrank(self, key, value, callback=None):
self.execute_command('ZRANK', key, value, callback=callback)
def zrevrank(self, key, value, callback=None):
self.execute_command('ZREVRANK', key, value, callback=callback)
def zrem(self, key, *values, **kwargs):
callback = kwargs.get('callback', None)
self.execute_command('ZREM', key, *values, callback=callback)
def zcount(self, key, start, end, callback=None):
self.execute_command('ZCOUNT', key, start, end, callback=callback)
def zscore(self, key, value, callback=None):
self.execute_command('ZSCORE', key, value, callback=callback)
def zrange(self, key, start, num, with_scores=True, callback=None):
tokens = [key, start, num]
if with_scores:
tokens.append('WITHSCORES')
self.execute_command('ZRANGE', *tokens, callback=callback)
def zrevrange(self, key, start, num, with_scores, callback=None):
tokens = [key, start, num]
if with_scores:
tokens.append('WITHSCORES')
self.execute_command('ZREVRANGE', *tokens, callback=callback)
def zrangebyscore(self, key, start, end, offset=None, limit=None,
with_scores=False, callback=None):
tokens = [key, start, end]
if offset is not None:
tokens.append('LIMIT')
tokens.append(offset)
tokens.append(limit)
if with_scores:
tokens.append('WITHSCORES')
self.execute_command('ZRANGEBYSCORE', *tokens, callback=callback)
def zrevrangebyscore(self, key, end, start, offset=None, limit=None,
with_scores=False, callback=None):
tokens = [key, end, start]
if offset is not None:
tokens.append('LIMIT')
tokens.append(offset)
tokens.append(limit)
if with_scores:
tokens.append('WITHSCORES')
self.execute_command('ZREVRANGEBYSCORE', *tokens, callback=callback)
def zremrangebyrank(self, key, start, end, callback=None):
self.execute_command('ZREMRANGEBYRANK', key, start, end,
callback=callback)
def zremrangebyscore(self, key, start, end, callback=None):
self.execute_command('ZREMRANGEBYSCORE', key, start, end,
callback=callback)
def zinterstore(self, dest, keys, aggregate=None, callback=None):
return self._zaggregate('ZINTERSTORE', dest, keys, aggregate, callback)
def zunionstore(self, dest, keys, aggregate=None, callback=None):
return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate, callback)
def _zaggregate(self, command, dest, keys, aggregate, callback):
tokens = [dest, len(keys)]
if isinstance(keys, dict):
items = list(keys.items())
keys = [i[0] for i in items]
weights = [i[1] for i in items]
else:
weights = None
tokens.extend(keys)
if weights:
tokens.append('WEIGHTS')
tokens.extend(weights)
if aggregate:
tokens.append('AGGREGATE')
tokens.append(aggregate)
self.execute_command(command, *tokens, callback=callback)
### HASH COMMANDS
def hgetall(self, key, callback=None):
self.execute_command('HGETALL', key, callback=callback)
def hmset(self, key, mapping, callback=None):
items = [i for k, v in mapping.items() for i in (k, v)]
self.execute_command('HMSET', key, *items, callback=callback)
def hset(self, key, field, value, callback=None):
self.execute_command('HSET', key, field, value, callback=callback)
def hsetnx(self, key, field, value, callback=None):
self.execute_command('HSETNX', key, field, value, callback=callback)
def hget(self, key, field, callback=None):
self.execute_command('HGET', key, field, callback=callback)
def hdel(self, key, *fields, **kwargs):
callback = kwargs.get('callback')
self.execute_command('HDEL', key, *fields, callback=callback)
def hlen(self, key, callback=None):
self.execute_command('HLEN', key, callback=callback)
def hexists(self, key, field, callback=None):
self.execute_command('HEXISTS', key, field, callback=callback)
def hincrby(self, key, field, amount=1, callback=None):
self.execute_command('HINCRBY', key, field, amount, callback=callback)
def hincrbyfloat(self, key, field, amount=1.0, callback=None):
self.execute_command('HINCRBYFLOAT', key, field, amount,
callback=callback)
def hkeys(self, key, callback=None):
self.execute_command('HKEYS', key, callback=callback)
def hmget(self, key, fields, callback=None):
self.execute_command('HMGET', key, *fields, callback=callback)
def hvals(self, key, callback=None):
self.execute_command('HVALS', key, callback=callback)
### SCAN COMMANDS
def scan(self, cursor, count=None, match=None, callback=None):
self._scan('SCAN', cursor, count, match, callback)
def hscan(self, key, cursor, count=None, match=None, callback=None):
self._scan('HSCAN', cursor, count, match, callback, key=key)
def sscan(self, key, cursor, count=None, match=None, callback=None):
self._scan('SSCAN', cursor, count, match, callback, key=key)
def zscan(self, key, cursor, count=None, match=None, callback=None):
self._scan('ZSCAN', cursor, count, match, callback, key=key)
def _scan(self, cmd, cursor, count, match, callback, key=None):
tokens = [cmd]
key and tokens.append(key)
tokens.append(cursor)
match and tokens.extend(['MATCH', match])
count and tokens.extend(['COUNT', count])
self.execute_command(*tokens, callback=callback)
### PUBSUB
def subscribe(self, channels, callback=None):
self._subscribe('SUBSCRIBE', channels, callback=callback)
def psubscribe(self, channels, callback=None):
self._subscribe('PSUBSCRIBE', channels, callback=callback)
def _subscribe(self, cmd, channels, callback=None):
if isinstance(channels, str) or (not PY3 and isinstance(channels, unicode)):
channels = [channels]
if not self.subscribed:
listen_callback = None
original_cb = stack_context.wrap(callback) if callback else None
def _cb(*args, **kwargs):
self.on_subscribed(Message(kind='subscribe',
channel=channels[0],
body=None,
pattern=None))
if original_cb:
original_cb(True)
callback = _cb
else:
listen_callback = callback
callback = None
# Use the listen loop to execute subscribe callbacks
for channel in channels:
self.subscribe_callbacks.append((channel, listen_callback))
# Do not execute the same callback multiple times
listen_callback = None
self.execute_command(cmd, *channels, callback=callback)
def on_subscribed(self, result):
self.subscribed.add(result.channel)
def on_unsubscribed(self, channels, *args, **kwargs):
channels = set(channels)
self.subscribed -= channels
for cb_channels, cb in self.unsubscribe_callbacks:
cb_channels.difference_update(channels)
if not cb_channels:
self._io_loop.add_callback(cb)
def unsubscribe(self, channels, callback=None):
self._unsubscribe('UNSUBSCRIBE', channels, callback=callback)
def punsubscribe(self, channels, callback=None):
self._unsubscribe('PUNSUBSCRIBE', channels, callback=callback)
def _unsubscribe(self, cmd, channels, callback=None):
if isinstance(channels, str) or (not PY3 and isinstance(channels, unicode)):
channels = [channels]
if callback:
cb = stack_context.wrap(callback)
# TODO: Do we need to back this up with self._io_loop.add_timeout(time() + 1, cb)?
# FIXME: What about PUNSUBSCRIBEs?
self.unsubscribe_callbacks.append((set(channels), cb))
self.execute_command(cmd, *channels)
def publish(self, channel, message, callback=None):
self.execute_command('PUBLISH', channel, message, callback=callback)
@gen.engine
def listen(self, callback=None, exit_callback=None):
"""
Starts a Pub/Sub channel listening loop.
Use the unsubscribe or punsubscribe methods to exit it.
Each received message triggers the callback function.
Callback function receives a Message object instance as argument.
Here is an example of handling a channel subscription::
def handle_message(msg):
if msg.kind == 'message':
print msg.body
elif msg.kind == 'disconnect':
# Disconnected from the redis server
pass
yield gen.Task(client.subscribe, 'channel_name')
client.listen(handle_message)
...
yield gen.Task(client.subscribe, 'another_channel_name')
...
yield gen.Task(client.unsubscribe, 'another_channel_name')
yield gen.Task(client.unsubscribe, 'channel_name')
Unsubscribe from a channel to exit the 'listen' loop.
"""
if callback:
def error_wrapper(e):
if isinstance(e, GeneratorExit):
return ConnectionError('Connection lost')
else:
return e
cmd_listen = CmdLine('LISTEN')
while self.subscribed:
data = yield gen.Task(self.connection.readline)
if isinstance(data, Exception):
raise data
if data is None:
# If disconnected from the redis server clear the list
# of subscriber this client has subscribed to
channels = self.subscribed
self.subscribed = set()
# send a message to caller:
# Message(kind='disconnect', channel=set(channel1, ...))
callback(reply_pubsub_message(('disconnect', channels)))
return
response = self.process_data(data, cmd_listen)
if isinstance(response, partial):
response = yield gen.Task(response)
if isinstance(response, Exception):
raise response
result = self.format_reply(cmd_listen, response)
if result and result.kind in ('subscribe', 'psubscribe'):
self.on_subscribed(result)
try:
__, cb = self.subscribe_callbacks.popleft()
except IndexError:
__, cb = result.channel, None
if cb:
cb(True)
if result and result.kind in ('unsubscribe', 'punsubscribe'):
self.on_unsubscribed([result.channel])
callback(result)
if exit_callback:
exit_callback(bool(callback))
### CAS
def watch(self, *key_names, **kwargs):
callback = kwargs.get('callback', None)
self.execute_command('WATCH', *key_names, callback=callback)
def unwatch(self, callback=None):
self.execute_command('UNWATCH', callback=callback)
### LOCKS
def lock(self, lock_name, lock_ttl=None, polling_interval=0.1):
"""
Create a new Lock object using the Redis key ``lock_name`` for
state, that behaves like a threading.Lock.
This method is synchronous, and returns immediately with the Lock object.
This method doesn't acquire the Lock or in fact trigger any sort of
communications with the Redis server. This must be done using the Lock
object itself.
If specified, ``lock_ttl`` indicates the maximum life time for the lock.
If none is specified, it will remain locked until release() is called.
``polling_interval`` indicates the time between acquire attempts (polling)
when the lock is in blocking mode and another client is currently
holding the lock.
Note: If using ``lock_ttl``, you should make sure all the hosts
that are running clients have their time synchronized with a network
time service like ntp.
"""
return Lock(self, lock_name, lock_ttl=lock_ttl, polling_interval=polling_interval)
### SCRIPTING COMMANDS
def eval(self, script, keys=None, args=None, callback=None):
if keys is None:
keys = []
if args is None:
args = []
num_keys = len(keys)
_args = keys + args
self.execute_command('EVAL', script, num_keys,
*_args, callback=callback)
def evalsha(self, shahash, keys=None, args=None, callback=None):
if keys is None:
keys = []
if args is None:
args = []
num_keys = len(keys)
keys.extend(args)
self.execute_command('EVALSHA', shahash, num_keys,
*keys, callback=callback)
def script_exists(self, shahashes, callback=None):
# not yet implemented in the redis protocol
self.execute_command('SCRIPT EXISTS', *shahashes, callback=callback)
def script_flush(self, callback=None):
# not yet implemented in the redis protocol
self.execute_command('SCRIPT FLUSH', callback=callback, verbose=True)
def script_kill(self, callback=None):
# not yet implemented in the redis protocol
self.execute_command('SCRIPT KILL', callback=callback)
def script_load(self, script, callback=None):
# not yet implemented in the redis protocol
self.execute_command('SCRIPT LOAD', script, callback=callback)
class Pipeline(Client):
def __init__(self, transactional, *args, **kwargs):
super(Pipeline, self).__init__(*args, **kwargs)
self.transactional = transactional
self.command_stack = []
self.executing = False
def __del__(self):
"""
Do not disconnect on releasing the PipeLine object.
Thanks to Tomek (https://github.com/thlawiczka)
"""
pass
def execute_command(self, cmd, *args, **kwargs):
if self.executing and cmd in ('AUTH', 'SELECT'):
super(Pipeline, self).execute_command(cmd, *args, **kwargs)
elif cmd in PUB_SUB_COMMANDS:
raise RequestError(
'Client is not supposed to issue '
'the %s command in a pipeline' % cmd)
else:
self.command_stack.append(CmdLine(cmd, *args, **kwargs))
def discard(self):
# actually do nothing with redis-server, just flush the command_stack
self.command_stack = []
def format_replies(self, cmd_lines, responses):
results = []
for cmd_line, response in zip(cmd_lines, responses):
try:
results.append(self.format_reply(cmd_line, response))
except Exception as e:
results.append(e)
return results
def format_pipeline_request(self, command_stack):
return ''.join(self.format_command(c.cmd, *c.args, **c.kwargs)
for c in command_stack)
@gen.engine
def execute(self, callback=None):
command_stack = self.command_stack
self.command_stack = []
self.executing = True
try:
if self.transactional:
command_stack = ([CmdLine('MULTI')] +
command_stack +
[CmdLine('EXEC')])
request = self.format_pipeline_request(command_stack)
password_should_be_sent = (
self.password and
self.connection.info.get('pass', None) != self.password)
if password_should_be_sent:
yield gen.Task(self.auth, self.password)
db_should_be_selected = (
self.selected_db and
self.connection.info.get('db', None) != self.selected_db)
if db_should_be_selected:
yield gen.Task(self.select, self.selected_db)
if not self.connection.connected():
self.connection.connect()
if not self.connection.ready():
yield gen.Task(self.connection.wait_until_ready)
try:
self.connection.write(request)
except IOError:
self.command_stack = []
self.connection.disconnect()
raise ConnectionError("Socket closed on remote end")
except Exception as e:
self.command_stack = []
self.connection.disconnect()
raise e
responses = []
total = len(command_stack)
cmds = iter(command_stack)
while len(responses) < total:
data = yield gen.Task(self.connection.readline)
if not data:
raise ResponseError('Not enough data after EXEC')
try:
cmd_line = next(cmds)
if self.transactional and cmd_line.cmd != 'EXEC':
response = self.process_data(data,
CmdLine('MULTI_PART'))
else:
response = self.process_data(data, cmd_line)
if isinstance(response, partial):
response = yield gen.Task(response)
responses.append(response)
except Exception as e:
responses.append(e)
if self.transactional:
command_stack = command_stack[:-1]
responses = responses[-1]
results = self.format_replies(command_stack[1:], responses)
else:
results = self.format_replies(command_stack, responses)
self.connection.execute_pending_command()
finally:
self.executing = False
callback(results)
class Lock(object):
"""
A shared, distributed Lock that uses a Redis server to hold its state.
This Lock can be shared across processes and/or machines. It works
asynchronously and plays nice with the Tornado IOLoop.
"""
LOCK_FOREVER = float(2 ** 31 + 1) # 1 past max unix time
def __init__(self, redis_client, lock_name, lock_ttl=None, polling_interval=0.1):
"""
Create a new Lock object using the Redis key ``lock_name`` for
state, that behaves like a threading.Lock.
This method is synchronous, and returns immediately. It doesn't acquire the
Lock or in fact trigger any sort of communications with the Redis server.
This must be done using the Lock object itself.
If specified, ``lock_ttl`` indicates the maximum life time for the lock.
If none is specified, it will remain locked until release() is called.
``polling_interval`` indicates the time between acquire attempts (polling)
when the lock is in blocking mode and another client is currently
holding the lock.
Note: If using ``lock_ttl``, you should make sure all the hosts
that are running clients have their time synchronized with a network
time service like ntp.
"""
self.redis_client = redis_client
self.lock_name = lock_name
self.acquired_until = None
self.lock_ttl = lock_ttl
self.polling_interval = polling_interval
if self.lock_ttl and self.polling_interval > self.lock_ttl:
raise LockError("'polling_interval' must be less than 'lock_ttl'")
@gen.engine
def acquire(self, blocking=True, callback=None):
"""
Acquire the lock.
Returns True once the lock is acquired.
If ``blocking`` is False, always return immediately. If the lock
was acquired, return True, otherwise return False.
Otherwise, block until the lock is acquired (or an error occurs).
If ``callback`` is supplied, it is called with the result.
"""
# Loop until we have a conclusive result
while 1:
# Get the current time
unixtime = int(mod_time.time())
# If the lock has a limited lifetime, create a timeout value
if self.lock_ttl:
timeout_at = unixtime + self.lock_ttl
# Otherwise, set the timeout value at forever (dangerous)
else:
timeout_at = Lock.LOCK_FOREVER
timeout_at = float(timeout_at)
# Try and get the lock, setting the timeout value in the appropriate key,
# but only if a previous value does not exist in Redis
result = yield gen.Task(self.redis_client.setnx, self.lock_name, timeout_at)
# If we managed to get the lock
if result:
# We successfully acquired the lock!
self.acquired_until = timeout_at
if callback:
callback(True)
return
# We didn't get the lock, another value is already there
# Check to see if the current lock timeout value has already expired
result = yield gen.Task(self.redis_client.get, self.lock_name)
existing = float(result or 1)
# Has it expired?
if existing < unixtime:
# The previous lock is expired. We attempt to overwrite it, getting the current value
# in the server, just in case someone tried to get the lock at the same time
result = yield gen.Task(self.redis_client.getset,
self.lock_name,
timeout_at)
existing = float(result or 1)
# If the value we read is older than our own current timestamp, we managed to get the
# lock with no issues - the timeout has indeed expired
if existing < unixtime:
# We successfully acquired the lock!
self.acquired_until = timeout_at
if callback:
callback(True)
return
# However, if we got here, then the value read from the Redis server is newer than
# our own current timestamp - meaning someone already got the lock before us.
# We failed getting the lock.
# If we are not signalled to block
if not blocking:
# We failed acquiring the lock...
if callback:
callback(False)
return
# Otherwise, we "sleep" for an amount of time equal to the polling interval, after which
# we will try getting the lock again.
yield gen.Task(self.redis_client._io_loop.add_timeout,
self.redis_client._io_loop.time() + self.polling_interval)
@gen.engine
def release(self, callback=None):
"""
Releases the already acquired lock.
If ``callback`` is supplied, it is called with True when finished.
"""
if self.acquired_until is None:
raise ValueError("Cannot release an unlocked lock")
# Get the current lock value
result = yield gen.Task(self.redis_client.get, self.lock_name)
existing = float(result or 1)
# If the lock time is in the future, delete the lock
if existing >= self.acquired_until:
yield gen.Task(self.redis_client.delete, self.lock_name)
self.acquired_until = None
# That is it.
if callback:
callback(True)
| gpl-2.0 | -1,550,556,144,528,038,700 | 35.488978 | 101 | 0.57894 | false | 4.152338 | false | false | false |
bmcfee/gordon | scripts/audio_intake_from_tracklist.py | 1 | 17235 | #!/usr/bin/env python
# Copyright (C) 2010 Douglas Eck
#
# This file is part of Gordon.
#
# Gordon is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gordon is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gordon. If not, see <http://www.gnu.org/licenses/>.
'''
Functions for importing music to Gordon database
script usage:
python audio_intake_from_tracklist.py source csv [doit]
<source> is a name for the collection
<csv> is the collection physical location to browse
<doit> is an optional parameter; if False, no actual import takes
place, only verbose would-dos
'''
import os, collections, datetime, logging, stat, sys
import argparse
from csv import reader
from gordon.io import AudioFile
from gordon.db.model import add, commit, Album, Artist, Track, Collection, Annotation
from gordon.db.config import DEF_GORDON_DIR
from gordon.db.gordon_db import get_tidfilename, make_subdirs_and_copy, is_binary
from gordon.io.mp3_eyeD3 import isValidMP3, getAllTags
log = logging.getLogger('gordon.audio_intake_from_tracklist')
def add_track(trackpath, source=str(datetime.date.today()),
gordonDir=DEF_GORDON_DIR, tag_dict=dict(), artist=None,
album=None, fast_import=False, import_md=False):
"""Add track with given filename <trackpath> to database
@param source: audio files data source (string)
@param gordonDir: main Gordon directory
@param tag_dict: dictionary of key,val tag pairs - See add_album(...).
@param artist: The artist for this track. An instance of Artist. None if not present
@param album: The album for this track. An instance of Album. None if not present
@param fast_import: If true, do not calculate strip_zero length. Defaults to False
@param import_md: use True to try to extract all metadata tags embedded in the auudio-file. Defaults to False
"""
(path, filename) = os.path.split(trackpath)
(fname, ext) = os.path.splitext(filename)
log.debug('Adding file "%s" of "%s" album by %s', filename, album, artist)
# validations
if 'album' not in tag_dict:
#todo: currently cannot add singleton files. Need an album which is defined in tag_dict
log.error('Cannot add "%s" because it is not part of an album',
filename)
return -1 # didn't add
if not os.path.isfile(trackpath):
log.info('Skipping %s because it is not a file', filename)
return -1 # not a file
try:
AudioFile(trackpath).read(tlen_sec=0.01)
except:
log.error('Skipping "%s" because it is not a valid audio file', filename)
return -1 # not an audio file
# required data
bytes = os.stat(trackpath)[stat.ST_SIZE]
# reencode name to latin1 !!!
try:
fn_recoded = filename.decode('utf-8')
except:
try: fn_recoded = filename.decode('latin1')
except: fn_recoded = 'unknown'
# prepare data
if tag_dict[u'compilation'] not in [True, False, 'True', 'False'] :
tag_dict[u'compilation'] = False
track = Track(title = tag_dict[u'title'],
artist = tag_dict[u'artist'],
album = tag_dict[u'album'],
tracknum = tag_dict[u'tracknum'],
compilation = tag_dict[u'compilation'],
otitle = tag_dict[u'title'],
oartist = tag_dict[u'artist'],
oalbum = tag_dict[u'album'],
otracknum = tag_dict[u'tracknum'],
ofilename = fn_recoded,
source = unicode(source),
bytes = bytes)
# add data
add(track) # needed to get a track id
commit() #to get our track id we need to write this record
log.debug('Wrote track record %s to database', track.id)
if fast_import :
track.secs = -1
track.zsecs = -1
else :
a = AudioFile(trackpath)
[track.secs, track.zsecs] = a.get_secs_zsecs()
track.path = u'%s' % get_tidfilename(track.id, ext[1:])
# links track to artist & album in DB
if artist:
log.debug('Linking %s to artist %s', track, artist)
track.artist = artist.name
track.artists.append(artist)
if album:
log.debug('Linking %s to album %s', track, album)
track.album = album.name
track.albums.append(album)
log.debug('Wrote album and artist additions to track into database')
# copy the file to the Gordon audio/feature data directory
tgt = os.path.join(gordonDir, 'audio', 'main', track.path)
make_subdirs_and_copy(trackpath, tgt)
log.debug('Copied "%s" to %s', trackpath, tgt)
# add annotations
del(tag_dict[u'title'])
del(tag_dict[u'artist'])
del(tag_dict[u'album'])
del(tag_dict[u'tracknum'])
del(tag_dict[u'compilation'])
for tagkey, tagval in tag_dict.iteritems(): # create remaining annotations
track.annotations.append(Annotation(type='text', name=tagkey, value=tagval))
if import_md:
#check if file is mp3. if so:
if isValidMP3(trackpath):
#extract all ID3 tags, store each tag value as an annotation type id3.[tagname]
for tag in getAllTags(trackpath):
track.annotations.append(Annotation(type='id3', name=tag[0], value=tag[1]))
#todo: work with more metadata formats (use tagpy?)
# Link the track to the collection object
track.collections.append(get_or_create_collection(source))
commit() # store the annotations
def _read_csv_tags(cwd, csv=None):
'''Reads a csv file containing track metadata and annotations (v 2.0)
# may use py comments in metadata .csv file. Must include a header:
filename, title, artist, album, tracknum, compilation, [optional1], [optional2]...
and then corresponding values per line (see example metadata.csv file in project root)
@return: a 2D dict in the form dict[<file-name>][<tag>] or False if an error occurs
@param cwd: complete csv file-path (if no <csv> sent) or path to directory to work in
@param csv: csv file-name (in <cwd> dir). Defaults to None
@todo: csv values (after header) may include "\embedded" to try getting it from the audio file.
Currently only ID3 tags names understood by gordon.io.mp3_eyeD3.id3v2_getval_sub are usable in this manner.
@todo: include other metadata formats (use tagpy?)'''
# open csv file
if csv is None:
filename = cwd
else:
filename = os.path.join(cwd, csv)
try:
csvfile = reader(open(filename))
except IOError:
log.error(" Couldn't open '%s'", csv)
raise
tags = dict()
headers = False
for line in csvfile: # each record (file rows)
if len(line) < 6 : continue # skip bad lines (blank or too short)
line[0] = line[0].strip()
if not line[0] or line[0][0] == '#' : continue # skip if filepath empty or comment line
# read and validate header
if not headers: # first valid line is the header
line=[l.strip() for l in line]
if not line[:6]==['filepath','title','artist','album','tracknum','compilation']:
log.error('CSV headers are incorrect at line %d.',
csvfile.line_num)
return False
headers = [unicode(x) for x in line]
continue
# save title, artist, album, tracknum, compilation in tags[<file-name>]
filepath=line[0]
tags[filepath] = dict() # this deletes previous lines if filepath is repeated ...
col = 1 # col 0 is 'filepath' so skip it
while col < len(headers):
if col >= len(line):
break
value = line[col].strip()
if headers[col] == u'tracknum': # prepare for smallint in the DB
try: tags[filepath][u'tracknum'] = int(value)
except: tags[filepath][u'tracknum'] = 0
elif headers[col] == u'compilation': # prepare for bool in the DB
if value.lower()=='true' or value=='1':
value = True
else:
value = False
tags[filepath][u'compilation'] = value
elif os.path.isfile(value):
if not is_binary(value):
try:
txt=open(value)
tags[filepath][headers[col]] = unicode(txt.read())
txt.close()
except:
log.error('Error opening %s file %s at line %d',
headers[col], value, csvfile.line_num)
tags[filepath][headers[col]] = unicode(value)
else:
log.debug('%s file %s at line %d appears to be binary, '
'not importing', headers[col], value,
csvfile.line_num)
tags[filepath][headers[col]] = unicode(value)
else:
try:
tags[filepath][headers[col]] = u'%s' % value
except UnicodeDecodeError:
tags[filepath][headers[col]] = value.decode("utf-8")
col+=1
return tags
def add_album(album_name, tags_dicts, source=str(datetime.date.today()),
gordonDir=DEF_GORDON_DIR, prompt_aname=False, import_md=False, fast_import=False):
"""Add an album from a list of metadata in <tags_dicts> v "1.0 CSV"
"""
log.debug('Adding album "%s"', album_name)
# create set of artists from tag_dicts
artists = set()
for track in tags_dicts.itervalues():
artists.add(track['artist'])
if len(artists) == 0:
log.debug('Nothing to add')
return # no songs
else:
log.debug('Found %d artists in directory: %s', len(artists), artists)
#add album to Album table
log.debug('Album has %d tracks', len(tags_dicts))
albumrec = Album(name = album_name, trackcount = len(tags_dicts))
#if we have an *exact* string match we will use the existing artist
artist_dict = dict()
for artist in artists:
match = Artist.query.filter_by(name=artist)
if match.count() == 1 :
log.debug('Matched %s to %s in database', artist, match[0])
artist_dict[artist] = match[0]
#todo: (eckdoug) what happens if match.count()>1? This means we have multiple artists in db with same
# name. Do we try harder to resolve which one? Or just add a new one. I added a new one (existing code)
# but it seems risky.. like we will generate lots of new artists.
# Anyway, we resolve this in the musicbrainz resolver....
else :
# add our Artist to artist table
newartist = Artist(name = artist)
artist_dict[artist] = newartist
#add artist to album (album_artist table)
albumrec.artists.append(artist_dict[artist])
# Commit these changes in order to have access to this album
# record when adding tracks.
commit()
# Now add our tracks to album.
for filename in sorted(tags_dicts.keys()):
add_track(filename, source=source, gordonDir=gordonDir, tag_dict=tags_dicts[filename],
artist=artist_dict[tags_dicts[filename][u'artist']], album=albumrec,
fast_import=fast_import, import_md=import_md)
log.debug('Added "%s"', filename)
#now update our track counts
for aname, artist in artist_dict.iteritems() :
artist.update_trackcount()
log.debug('Updated trackcount for artist %s', artist)
albumrec.update_trackcount()
log.debug('Updated trackcount for album %s', albumrec)
commit()
def get_or_create_collection(source):
match = Collection.query.filter_by(name = unicode(source))
if match.count() == 1:
log.debug(' Matched source %s in database', match[0])
return match[0]
else:
return Collection(name=unicode(source))
def add_collection_from_csv_file(csvfile, source=str(datetime.date.today()),
prompt_incompletes=False, doit=False,
gordonDir=DEF_GORDON_DIR, fast_import=False,
import_md=False):
"""Adds tracks from a CSV (file) list of file-paths.
Only imports if all songs actually have same album name.
With flag prompt_incompletes will prompt for incomplete albums as well
Use doit=True to actually commit the addition of songs
"""
metadata = _read_csv_tags(csvfile)
# Turn metadata into a list of albums:
albums = collections.defaultdict(dict)
for filename, x in metadata.iteritems():
albums[x['album']][filename] = x
ntracks = 1
for albumname in sorted(albums):
tracks = albums[albumname]
# tracks is a 2D dict[<file-name>][<tag>] for a single album.
if not doit:
print 'Would import album "%s"' % albumname
for track in sorted(tracks):
print ' Would import file %d: "%s"' % (ntracks, track)
for key, value in tracks[track].iteritems():
strvalue = '%s' % value
if '\n' in strvalue:
strvalue = '%s ...' % strvalue.split('\n')[0]
print ' %s: %s' % (key, strvalue)
ntracks += 1
else:
add_album(albumname, tracks, source, gordonDir, prompt_incompletes, fast_import)
print 'Finished'
def _die_with_usage() :
print 'This program imports a set of tracks (and their corresponding metdata) listed in a csv file into the database'
print 'Usage: '
print 'audio_intake [flags] <source> <csvfile> [doit] [metadata]'
print 'Flags:'
print ' -fast: imports without calculating zero-stripped track times.'
print ' -noprompt: will not prompt for incomplete albums. See log for what we skipped'
print 'Arguments: '
print ' <source> is the string stored to the database for source (to identify the collection) e.g. DougDec22'
print ' <csvfile> is the csv file listing tracks to be imported'
print ' <doit> (default 1) use 0 to test the intake harmlessly'
print ' <metadata> (default 0) use 1 to import all metadata tags from the file'
print 'More options are available by using the function add_collection()'
sys.exit(0)
def process_arguments():
parser = argparse.ArgumentParser(description='Gordon audio intake from track list')
parser.add_argument('source',
action = 'store',
help = 'name for the collection')
parser.add_argument('csvfile',
action = 'store',
help = 'path to the track list CSV file')
parser.add_argument('-f',
'--fast',
action = 'store_true',
dest = 'fast',
default = False,
help = 'imports without calculating zero-stripped track times')
parser.add_argument('--no-prompt',
action = 'store_false',
dest = 'prompt_incompletes',
default = True,
help = 'Do not prompt for incomplete albums. See log for what we skipped.')
parser.add_argument('-t',
'--test',
action = 'store_false',
dest = 'doit',
default = True,
help = 'test the intake without modifying the database')
parser.add_argument('-m',
'--metadata',
action = 'store_true',
dest = 'import_md',
default = False,
help = 'import all metadata flags from the audio file')
return vars(parser.parse_args(sys.argv[1:]))
if __name__ == '__main__':
args = process_arguments()
log.info('Importing audio from tracklist %s (source=%s)', args['csvfile'], args['source'])
add_collection_from_csv_file( args['csvfile'],
source=args['source'],
prompt_incompletes=args['prompt_incompletes'],
doit=args['doit'],
fast_import=args['fast'],
import_md=args['import_md'])
| gpl-3.0 | -3,791,216,342,320,178,000 | 39.362998 | 121 | 0.582187 | false | 3.986815 | false | false | false |
cirosantilli/linux-kernel-module-cheat | shell_helpers.py | 1 | 20166 | #!/usr/bin/env python3
import base64
import distutils.file_util
import io
import itertools
import os
import shlex
import shutil
import signal
import stat
import subprocess
import sys
import threading
from typing import List, Union
import urllib.request
class LF:
'''
LineFeed (AKA newline).
Singleton class. Can be used in print_cmd to print out nicer command lines
with --key on the same line as "--key value".
'''
pass
class ShellHelpers:
'''
Helpers to do things which are easy from the shell,
usually filesystem, process or pipe operations.
Attempt to print shell equivalents of all commands to make things
easy to debug and understand what is going on.
'''
_print_lock = threading.Lock()
def __init__(self, dry_run=False, quiet=False, force_oneline=False):
'''
:param dry_run: don't run the commands, just potentially print them. Debug aid.
:type dry_run: Bool
:param quiet: don't print the commands
:type dry_run: Bool
'''
self.dry_run = dry_run
self.force_oneline_default = force_oneline
self.quiet = quiet
@classmethod
def _print_thread_safe(cls, string):
'''
Python sucks: a naive print adds a bunch of random spaces to stdout,
and then copy pasting the command fails.
https://stackoverflow.com/questions/3029816/how-do-i-get-a-thread-safe-print-in-python-2-6
The initial use case was test-gdb which must create a thread for GDB to run the program in parallel.
'''
with cls._print_lock:
try:
print(string, flush=True)
except BrokenPipeError:
# https://stackoverflow.com/questions/26692284/how-to-prevent-brokenpipeerror-when-doing-a-flush-in-python
# https://stackoverflow.com/questions/16314321/suppressing-printout-of-exception-ignored-message-in-python-3
pass
def add_newlines(self, cmd):
out = []
for arg in cmd:
out.extend([arg, LF])
return out
def base64_encode(self, string):
'''
TODO deal with redirection and print nicely.
'''
return base64.b64encode(string.encode()).decode()
def base64_decode(self, string):
return base64.b64decode(string.encode()).decode()
def check_output(self, *args, **kwargs):
'''
Analogous to subprocess.check_output: get the stdout / stderr
of a program back as a byte array.
'''
out_str = []
actual_kwargs = {
'show_stdout': False,
'show_cmd': False
}
actual_kwargs.update(kwargs)
self.run_cmd(
*args,
out_str=out_str,
**actual_kwargs
)
return out_str[0]
def chmod(self, path, add_rm_abs='+', mode_delta=stat.S_IXUSR):
'''
TODO extend further, shell print equivalent.
'''
old_mode = os.stat(path).st_mode
if add_rm_abs == '+':
new_mode = old_mode | mode_delta
elif add_rm_abs == '':
new_mode = mode_delta
elif add_rm_abs == '-':
new_mode = old_mode & ~mode_delta
os.chmod(path, new_mode)
def force_oneline(self, force_oneline):
if force_oneline is not None:
return force_oneline
else:
return self.force_oneline_default
def cmd_to_string(
self,
cmd: List[Union[str, LF]],
cwd=None,
extra_env=None,
extra_paths=None,
force_oneline: Union[bool,None] =None,
*,
stdin_path: Union[str,None] =None
):
'''
Format a command given as a list of strings so that it can
be viewed nicely and executed by bash directly and print it to stdout.
If cmd contains:
* no LF, then newlines are added after every word
* exactly one LF at the end, then no newlines are added
* otherwise: newlines are added exactly at each LF
'''
last_newline = ' \\\n'
newline_separator = last_newline + ' '
out = []
if extra_env is None:
extra_env = {}
preffix_arr = []
if cwd is not None:
preffix_arr.append('cd {} &&'.format(shlex.quote(cwd)))
extra_env2 = extra_env.copy()
if extra_paths is not None:
extra_env2['PATH'] = '{}:"${{PATH}}"'.format(shlex.quote(':'.join(extra_paths)))
for key in extra_env2:
preffix_arr.append('{}={}'.format(shlex.quote(key), shlex.quote(extra_env2[key])))
cmd_quote = []
newline_count = 0
for arg in cmd:
if arg == LF:
if not self.force_oneline(force_oneline):
cmd_quote.append(arg)
newline_count += 1
else:
cmd_quote.append(shlex.quote(arg))
if self.force_oneline(force_oneline) or newline_count > 0:
cmd_quote = [
' '.join(list(y))
for x, y in itertools.groupby(
cmd_quote,
lambda z: z == LF
)
if not x
]
if self.force_oneline(force_oneline):
cmd_quote = [' '.join(preffix_arr + cmd_quote)]
else:
cmd_quote = preffix_arr + cmd_quote
out.extend(cmd_quote)
if stdin_path is not None:
out.append('< {}'.format(shlex.quote(stdin_path)))
if self.force_oneline(force_oneline) or newline_count == 1 and cmd[-1] == LF:
ending = ''
else:
ending = last_newline + ';'
return newline_separator.join(out) + ending
def copy_file_if_update(self, src, destfile):
if os.path.isdir(destfile):
destfile = os.path.join(destfile, os.path.basename(src))
self.mkdir_p(os.path.dirname(destfile))
if (
not os.path.exists(destfile) or \
os.path.getmtime(src) > os.path.getmtime(destfile)
):
self.cp(src, destfile)
def copy_dir_if_update_non_recursive(
self,
srcdir,
destdir,
filter_ext=None
):
# TODO print rsync equivalent.
os.makedirs(destdir, exist_ok=True)
if not os.path.exists(srcdir) and self.dry_run:
basenames = []
else:
basenames = os.listdir(srcdir)
for basename in sorted(basenames):
src = os.path.join(srcdir, basename)
if os.path.isfile(src) or os.path.islink(src):
noext, ext = os.path.splitext(basename)
if (filter_ext is None or ext == filter_ext):
dest = os.path.join(destdir, basename)
self.copy_file_if_update(src, dest)
def copy_dir_if_update(
self,
srcdir,
destdir,
filter_ext=None
):
self.copy_dir_if_update_non_recursive(srcdir, destdir, filter_ext)
srcdir_abs = os.path.abspath(srcdir)
srcdir_abs_len = len(srcdir_abs)
for path, dirnames, filenames in self.walk(srcdir_abs):
for dirname in dirnames:
dirpath = os.path.join(path, dirname)
dirpath_relative_root = dirpath[srcdir_abs_len + 1:]
self.copy_dir_if_update_non_recursive(
dirpath,
os.path.join(destdir, dirpath_relative_root),
filter_ext
)
def cp(self, src, dest, **kwargs):
if not kwargs.get('quiet', False):
self.print_cmd(['cp', src, dest])
if not self.dry_run:
if os.path.islink(src):
if os.path.lexists(dest):
os.unlink(dest)
linkto = os.readlink(src)
os.symlink(linkto, dest)
else:
shutil.copy2(src, dest)
def mkdir_p(self, d):
if not os.path.exists(d):
self.print_cmd(['mkdir', d, LF])
if not self.dry_run:
os.makedirs(d)
def mv(self, src, dest, **kwargs):
self.print_cmd(['mv', src, dest])
if not self.dry_run:
shutil.move(src, dest)
def print_cmd(
self,
cmd,
cwd=None,
cmd_file=None,
cmd_files=None,
extra_env=None,
extra_paths=None,
force_oneline: Union[bool,None] =None,
*,
stdin_path: Union[str,None] =None
):
'''
Print cmd_to_string to stdout.
Optionally save the command to cmd_file file, and add extra_env
environment variables to the command generated.
'''
if type(cmd) is str:
cmd_string = cmd
else:
cmd_string = self.cmd_to_string(
cmd,
cwd=cwd,
extra_env=extra_env,
extra_paths=extra_paths,
force_oneline=force_oneline,
stdin_path=stdin_path
)
if not self.quiet:
self._print_thread_safe('+ ' + cmd_string)
if cmd_files is None:
cmd_files = []
if cmd_file is not None:
cmd_files.append(cmd_file)
for cmd_file in cmd_files:
os.makedirs(os.path.dirname(cmd_file), exist_ok=True)
with open(cmd_file, 'w') as f:
f.write('#!/usr/bin/env bash\n')
f.write(cmd_string)
self.chmod(cmd_file)
def rmrf(self, path):
self.print_cmd(['rm', '-r', '-f', path, LF])
if not self.dry_run and os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.unlink(path)
def run_cmd(
self,
cmd,
cmd_file=None,
cmd_files=None,
out_file=None,
show_stdout=True,
show_cmd=True,
extra_env=None,
extra_paths=None,
delete_env=None,
raise_on_failure=True,
*,
out_str=None,
stdin_path: Union[str,None] =None,
**kwargs
):
'''
Run a command. Write the command to stdout before running it.
Wait until the command finishes execution.
:param cmd: command to run. LF entries are magic get skipped.
:type cmd: List[str]
:param cmd_file: if not None, write the command to be run to that file
:type cmd_file: str
:param cmd_files: if not None, write the command to be run to all files in this list
cmd_file gets appended to that list if given.
:type cmd_files: List[str]
:param out_file: if not None, write the stdout and stderr of the command the file
:type out_file: str
:param out_str: if not None, append the stdout and stderr string to this list
:type out_str: Union(List,None)
:param show_stdout: wether to show stdout and stderr on the terminal or not
:type show_stdout: bool
:param extra_env: extra environment variables to add when running the command
:type extra_env: Dict[str,str]
:return: exit status of the command
:rtype: int
'''
if out_file is None and out_str is None:
if show_stdout:
stdout = None
stderr = None
else:
stdout = subprocess.DEVNULL
stderr = subprocess.DEVNULL
else:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
if extra_env is None:
extra_env = {}
if delete_env is None:
delete_env = []
if 'cwd' in kwargs:
cwd = kwargs['cwd']
else:
cwd = None
env = os.environ.copy()
env.update(extra_env)
if extra_paths is not None:
path = ':'.join(extra_paths)
if 'PATH' in os.environ:
path += ':' + os.environ['PATH']
env['PATH'] = path
for key in delete_env:
if key in env:
del env[key]
if show_cmd:
self.print_cmd(
cmd,
cwd=cwd,
cmd_file=cmd_file,
cmd_files=cmd_files,
extra_env=extra_env,
extra_paths=extra_paths,
stdin_path=stdin_path
)
# Otherwise, if called from a non-main thread:
# ValueError: signal only works in main thread
if threading.current_thread() == threading.main_thread():
# Otherwise Ctrl + C gives:
# - ugly Python stack trace for gem5 (QEMU takes over terminal and is fine).
# - kills Python, and that then kills GDB:
# https://stackoverflow.com/questions/19807134/does-python-always-raise-an-exception-if-you-do-ctrlc-when-a-subprocess-is-exec
sigint_old = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Otherwise BrokenPipeError when piping through | grep
# But if I do this_module, my terminal gets broken at the end. Why, why, why.
# https://stackoverflow.com/questions/14207708/ioerror-errno-32-broken-pipe-python
# Ignoring the exception is not enough as it prints a warning anyways.
#sigpipe_old = signal.getsignal(signal.SIGPIPE)
#signal.signal(signal.SIGPIPE, signal.SIG_DFL)
cmd = self.strip_newlines(cmd)
if not self.dry_run:
if stdin_path is None:
stdin = None
else:
stdin = open(stdin_path, 'r')
# https://stackoverflow.com/questions/15535240/python-popen-write-to-stdout-and-log-file-simultaneously/52090802#52090802
with subprocess.Popen(
cmd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
env=env,
**kwargs
) as proc:
if out_file is not None or out_str is not None:
if out_file is not None:
os.makedirs(os.path.split(os.path.abspath(out_file))[0], exist_ok=True)
if out_file is not None:
logfile = open(out_file, 'bw')
logfile_str = []
while True:
byte = proc.stdout.read(1)
if byte:
if show_stdout:
sys.stdout.buffer.write(byte)
try:
sys.stdout.flush()
except BlockingIOError:
# TODO understand. Why, Python, why.
pass
if out_file is not None:
logfile.write(byte)
if out_str is not None:
logfile_str.append(byte)
else:
break
if out_file is not None:
logfile.close()
if out_str is not None:
out_str.append((b''.join(logfile_str)))
if threading.current_thread() == threading.main_thread():
signal.signal(signal.SIGINT, sigint_old)
#signal.signal(signal.SIGPIPE, sigpipe_old)
if stdin_path is not None:
stdin.close()
returncode = proc.returncode
if returncode != 0 and raise_on_failure:
e = Exception('Command exited with status: {}'.format(returncode))
e.returncode = returncode
raise e
return returncode
else:
if not out_str is None:
out_str.append(b'')
return 0
def shlex_split(self, string):
'''
shlex_split, but also add Newline after every word.
Not perfect since it does not group arguments, but I don't see a solution.
'''
return self.add_newlines(shlex.split(string))
def strip_newlines(self, cmd):
if type(cmd) is str:
return cmd
else:
return [x for x in cmd if x != LF]
def walk(self, root):
'''
Extended walk that can take files or directories.
'''
if not os.path.exists(root):
raise Exception('Path does not exist: ' + root)
if os.path.isfile(root):
dirname, basename = os.path.split(root)
yield dirname, [], [basename]
else:
for path, dirnames, filenames in os.walk(root):
dirnames.sort()
filenames.sort()
yield path, dirnames, filenames
def wget(self, url, download_path):
'''
Append extra KEY=val configs into the given config file.
I wissh we could have a progress indicator, but impossible:
https://stackoverflow.com/questions/51212/how-to-write-a-download-progress-indicator-in-python
'''
self.print_cmd([
'wget', LF,
'-O', download_path, LF,
url, LF,
])
urllib.request.urlretrieve(url, download_path)
def write_configs(self, config_path, configs, config_fragments=None, mode='a'):
'''
Append extra KEY=val configs into the given config file.
'''
if config_fragments is None:
config_fragments = []
for config_fragment in config_fragments:
self.print_cmd(['cat', config_fragment, '>>', config_path])
if not self.dry_run:
with open(config_path, 'a') as config_file:
for config_fragment in config_fragments:
with open(config_fragment, 'r') as config_fragment_file:
for line in config_fragment_file:
config_file.write(line)
self.write_string_to_file(config_path, '\n'.join(configs), mode=mode)
def write_string_to_file(self, path, string, mode='w'):
if mode == 'a':
redirect = '>>'
else:
redirect = '>'
self.print_cmd("cat << 'EOF' {} {}\n{}\nEOF".format(redirect, path, string))
if not self.dry_run:
with open(path, mode) as f:
f.write(string)
if __name__ == '__main__':
shell_helpers = ShellHelpers()
if 'cmd_to_string':
# Default.
assert shell_helpers.cmd_to_string(['cmd']) == 'cmd \\\n;'
assert shell_helpers.cmd_to_string(['cmd', 'arg1']) == 'cmd \\\n arg1 \\\n;'
assert shell_helpers.cmd_to_string(['cmd', 'arg1', 'arg2']) == 'cmd \\\n arg1 \\\n arg2 \\\n;'
# Argument with a space gets escaped.
assert shell_helpers.cmd_to_string(['cmd', 'arg1 arg2']) == "cmd \\\n 'arg1 arg2' \\\n;"
# Ending in LF with no other LFs get separated only by spaces.
assert shell_helpers.cmd_to_string(['cmd', LF]) == 'cmd'
assert shell_helpers.cmd_to_string(['cmd', 'arg1', LF]) == 'cmd arg1'
assert shell_helpers.cmd_to_string(['cmd', 'arg1', 'arg2', LF]) == 'cmd arg1 arg2'
# More than one LF adds newline separators at each LF.
assert shell_helpers.cmd_to_string(['cmd', LF, 'arg1', LF]) == 'cmd \\\n arg1 \\\n;'
assert shell_helpers.cmd_to_string(['cmd', LF, 'arg1', LF, 'arg2', LF]) == 'cmd \\\n arg1 \\\n arg2 \\\n;'
assert shell_helpers.cmd_to_string(['cmd', LF, 'arg1', 'arg2', LF]) == 'cmd \\\n arg1 arg2 \\\n;'
# force_oneline separates everything simply by spaces.
assert \
shell_helpers.cmd_to_string(['cmd', LF, 'arg1', LF, 'arg2', LF], force_oneline=True) \
== 'cmd arg1 arg2'
# stdin_path
assert shell_helpers.cmd_to_string(['cmd'], stdin_path='ab') == "cmd \\\n < ab \\\n;"
assert shell_helpers.cmd_to_string(['cmd'], stdin_path='a b') == "cmd \\\n < 'a b' \\\n;"
| gpl-3.0 | -5,774,252,105,433,351,000 | 34.946524 | 140 | 0.528265 | false | 3.989318 | true | false | false |
egafford/sahara | sahara/plugins/mapr/versions/v5_0_0_mrv2/version_handler.py | 2 | 2805 | # Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sahara.plugins.mapr.base.base_version_handler as bvh
from sahara.plugins.mapr.services.drill import drill
from sahara.plugins.mapr.services.flume import flume
from sahara.plugins.mapr.services.hbase import hbase
from sahara.plugins.mapr.services.hive import hive
from sahara.plugins.mapr.services.httpfs import httpfs
from sahara.plugins.mapr.services.hue import hue
from sahara.plugins.mapr.services.impala import impala
from sahara.plugins.mapr.services.mahout import mahout
from sahara.plugins.mapr.services.management import management as mng
from sahara.plugins.mapr.services.maprfs import maprfs
from sahara.plugins.mapr.services.oozie import oozie
from sahara.plugins.mapr.services.pig import pig
from sahara.plugins.mapr.services.spark import spark
from sahara.plugins.mapr.services.sqoop import sqoop2
from sahara.plugins.mapr.services.swift import swift
from sahara.plugins.mapr.services.yarn import yarn
import sahara.plugins.mapr.versions.v5_0_0_mrv2.context as c
version = "5.0.0.mrv2"
class VersionHandler(bvh.BaseVersionHandler):
def __init__(self):
super(VersionHandler, self).__init__()
self._version = version
self._required_services = [
yarn.YARNv270(),
maprfs.MapRFS(),
mng.Management(),
oozie.Oozie(),
]
self._services = [
hive.HiveV013(),
hive.HiveV10(),
hive.HiveV12(),
impala.ImpalaV141(),
pig.PigV014(),
pig.PigV015(),
flume.FlumeV15(),
flume.FlumeV16(),
spark.SparkOnYarn(),
sqoop2.Sqoop2(),
mahout.MahoutV010(),
oozie.OozieV410(),
oozie.OozieV420(),
hue.HueV370(),
hue.HueV381(),
hue.HueV390(),
hbase.HBaseV0989(),
hbase.HBaseV09812(),
drill.DrillV11(),
drill.DrillV14(),
yarn.YARNv270(),
maprfs.MapRFS(),
mng.Management(),
httpfs.HttpFS(),
swift.Swift(),
]
def get_context(self, cluster, added=None, removed=None):
return c.Context(cluster, self, added, removed)
| apache-2.0 | -3,551,650,006,758,706,000 | 34.961538 | 75 | 0.663458 | false | 3.497506 | false | false | false |
eeriks/velo.lv | velo/registration/competition_classes/rm2017.py | 2 | 10599 | import datetime
from django.utils.translation import activate
from io import BytesIO
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import cm
from reportlab.pdfgen import canvas
from velo.results.tasks import create_result_sms
from velo.core.models import Log
from velo.core.pdf import fill_page_with_image, _baseFontNameB
from velo.registration.competition_classes import RM2016
from velo.registration.models import UCICategory, Participant, PreNumberAssign
from velo.results.models import ChipScan, DistanceAdmin, Result, LapResult
from velo.results.tables import ResultRMGroupTable, ResultRMDistanceTable, ResultRMTautaDistanceTable
class RM2017(RM2016):
SPORTA_DISTANCE_ID = 65
TAUTAS_DISTANCE_ID = 66
TAUTAS1_DISTANCE_ID = 77
GIMENU_DISTANCE_ID = 68
BERNU_DISTANCE_ID = 67
def _update_year(self, year):
return year + 3
@property
def groups(self):
"""
Returns defined groups for each competition type.
"""
return {
self.SPORTA_DISTANCE_ID: ('M-18', 'M', 'Masters', 'M 19-34 CFA', 'W'),
self.TAUTAS_DISTANCE_ID: ('T M-16', 'T W-16', 'T M', 'T W', 'T M-35', 'T M-45', 'T M-55', 'T M-65'),
self.TAUTAS1_DISTANCE_ID: ('T1 M', 'T1 W',)
}
def number_ranges(self):
"""
Returns number ranges for each distance.
"""
return {
self.SPORTA_DISTANCE_ID: [{'start': 1, 'end': 500, 'group': ''}, ],
self.TAUTAS_DISTANCE_ID: [{'start': 2001, 'end': 3400, 'group': ''}, ],
self.TAUTAS1_DISTANCE_ID: [{'start': 3401, 'end': 4000, 'group': ''}, ],
}
def assign_group(self, distance_id, gender, birthday, participant=None):
year = birthday.year
if distance_id not in (self.SPORTA_DISTANCE_ID, self.TAUTAS_DISTANCE_ID, self.TAUTAS1_DISTANCE_ID):
return ''
elif distance_id == self.SPORTA_DISTANCE_ID:
if gender == 'M':
if participant and (self._update_year(1995) >= year >= self._update_year(1980)) and UCICategory.objects.filter(category="CYCLING FOR ALL", slug=participant.slug):
return 'M 19-34 CFA'
if self._update_year(1997) >= year >= self._update_year(1996):
return 'M-18'
elif year <= self._update_year(1979):
return 'Masters'
else:
return 'M'
else:
return 'W'
elif distance_id == self.TAUTAS_DISTANCE_ID:
if gender == 'M':
if self._update_year(1999) >= year >= self._update_year(1998):
return 'T M-16'
elif self._update_year(1997) >= year >= self._update_year(1980):
return 'T M'
elif self._update_year(1979) >= year >= self._update_year(1970):
return 'T M-35'
elif self._update_year(1969) >= year >= self._update_year(1960):
return 'T M-45'
elif self._update_year(1959) >= year >= self._update_year(1950):
return 'T M-55'
elif year <= self._update_year(1949):
return 'T M-65'
else:
if self._update_year(1999) >= year >= self._update_year(1996):
return 'T W-16'
elif year <= self._update_year(1997):
return 'T W'
elif distance_id == self.TAUTAS1_DISTANCE_ID:
if gender == 'M':
return 'T1 M'
else:
return 'T1 W'
print('here I shouldnt be...')
raise Exception('Invalid group assigning. {0} {1} {2}'.format(gender, distance_id, birthday))
def passages(self):
return {
self.SPORTA_DISTANCE_ID: [(1, 1, 200, 0), (2, 201, 500, 0)],
self.TAUTAS_DISTANCE_ID: [
(1, 2001, 2200, 20),
(2, 2201, 2400, 20),
(3, 2401, 2600, 15),
(4, 2601, 2800, 10),
(5, 2801, 3000, 10),
(6, 3001, 3200, 5),
(7, 3201, 3400, 5),
],
self.TAUTAS1_DISTANCE_ID: [
(1, 3401, 3600, 5),
(2, 3601, 3800, 5),
(3, 3801, 4000, 5),
],
}
def number_pdf(self, participant_id):
activate('lv')
participant = Participant.objects.get(id=participant_id)
output = BytesIO()
c = canvas.Canvas(output, pagesize=A4)
fill_page_with_image("velo/media/competition/vestule/RVm_2017_vestule_ar_tekstu.jpg", c)
c.setFont(_baseFontNameB, 18)
c.drawString(6*cm, 20.6*cm, "%s %s" % (participant.full_name.upper(), participant.birthday.year))
c.drawString(5*cm, 18.6*cm, str(participant.distance))
if participant.primary_number:
c.setFont(_baseFontNameB, 35)
c.drawString(16*cm, 19.6*cm, str(participant.primary_number))
elif participant.distance_id == self.GIMENU_DISTANCE_ID:
c.setFont(_baseFontNameB, 25)
c.drawString(15*cm, 19.6*cm, "Ģimeņu br.")
else:
c.setFont(_baseFontNameB, 25)
c.drawString(16.5*cm, 19.6*cm, "-")
c.showPage()
c.save()
output.seek(0)
return output
def assign_numbers(self, reassign=False, assign_special=False):
# TODO: Need to find all participants that have started in sport distance and now are in other distances.
prev_participants = [p.slug for p in Participant.objects.filter(is_participating=True, competition=self.competition, distance_id=53)]
now_participants = Participant.objects.filter(distance_id=self.TAUTAS_DISTANCE_ID, is_participating=True, slug__in=prev_participants)
for now in now_participants:
try:
PreNumberAssign.objects.get(competition=self.competition, participant_slug=now.slug)
except:
PreNumberAssign.objects.create(competition=self.competition, distance=now.distance, participant_slug=now.slug, segment=1)
super().assign_numbers(reassign, assign_special)
def result_select_extra(self, distance_id):
return {
'l1': 'SELECT time FROM results_lapresult l1 WHERE l1.result_id = results_result.id and l1.index=1',
}
def get_result_table_class(self, distance, group=None):
if group:
return ResultRMGroupTable
else:
if distance.id in (self.SPORTA_DISTANCE_ID, self.TAUTAS1_DISTANCE_ID):
return ResultRMDistanceTable
else:
return ResultRMTautaDistanceTable
def process_chip_result(self, chip_id, sendsms=True, recalc=False):
"""
Function processes chip result and recalculates all standings
"""
chip = ChipScan.objects.get(id=chip_id)
distance_admin = DistanceAdmin.objects.get(competition=chip.competition, distance=chip.nr.distance)
Log.objects.create(content_object=chip, action="Chip process", message="Started")
delta = datetime.datetime.combine(datetime.date.today(), distance_admin.zero) - datetime.datetime.combine(datetime.date.today(), datetime.time(0,0,0,0))
result_time = (datetime.datetime.combine(datetime.date.today(), chip.time) - delta).time()
result_time_5back = (datetime.datetime.combine(datetime.date.today(), chip.time) - delta - datetime.timedelta(minutes=5)).time()
if result_time_5back > result_time:
result_time_5back = datetime.time(0,0,0)
result_time_5forw = (datetime.datetime.combine(datetime.date.today(), chip.time) - delta + datetime.timedelta(minutes=5)).time()
seconds = result_time.hour * 60 * 60 + result_time.minute * 60 + result_time.second
# Do not process if finished in 10 minutes.
if seconds < 10 * 60 or chip.time < distance_admin.zero: # 10 minutes
Log.objects.create(content_object=chip, action="Chip process", message="Chip result less than 10 minutes. Ignoring.")
return None
if chip.is_processed:
Log.objects.create(content_object=chip, action="Chip process", message="Chip already processed")
return None
participants = Participant.objects.filter(competition_id__in=self.competition.get_ids(), is_participating=True, slug=chip.nr.participant_slug, distance=chip.nr.distance)
if not participants:
Log.objects.create(content_object=chip, action="Chip process", message="Number not assigned to anybody. Ignoring.")
return None
else:
participant = participants[0]
if participant.is_competing:
result, created = Result.objects.get_or_create(competition=chip.competition, participant=participant, number=chip.nr)
already_exists_result = LapResult.objects.filter(result=result, time__gte=result_time_5back, time__lte=result_time_5forw)
if already_exists_result:
Log.objects.create(content_object=chip, action="Chip process", message="Chip double scanned.")
else:
laps_done = result.lapresult_set.count()
result.lapresult_set.create(index=(laps_done+1), time=result_time)
# Fix lap index
for index, lap in enumerate(result.lapresult_set.order_by('time'), start=1):
lap.index = index
lap.save()
if (chip.nr.distance_id == self.SPORTA_DISTANCE_ID and laps_done == 0) or (chip.nr.distance_id == self.TAUTAS_DISTANCE_ID and laps_done == 1) or (chip.nr.distance_id == self.TAUTAS1_DISTANCE_ID and laps_done == 0):
Log.objects.create(content_object=chip, action="Chip process", message="DONE. Lets assign avg speed.")
last_laptime = result.lapresult_set.order_by('-time')[0]
result.time = last_laptime.time
result.set_avg_speed()
result.save()
self.assign_standing_places()
if self.competition.competition_date == datetime.date.today() and sendsms:
create_result_sms.apply_async(args=[result.id, ], countdown=120)
chip.is_processed = True
chip.save()
print(chip)
| gpl-3.0 | 3,747,598,638,551,850,000 | 43.71308 | 230 | 0.580164 | false | 3.675685 | false | false | false |
Cenditel/cenditel.comunidades.cynin | src/ubify.smartview/setup.py | 5 | 2860 | ###############################################################################
#cyn.in is an open source Collaborative Knowledge Management Appliance that
#enables teams to seamlessly work together on files, documents and content in
#a secure central environment.
#
#cyn.in v2 an open source appliance is distributed under the GPL v3 license
#along with commercial support options.
#
#cyn.in is a Cynapse Invention.
#
#Copyright (C) 2008 Cynapse India Pvt. Ltd.
#
#This program is free software: you can redistribute it and/or modify it under
#the terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or any later version and observe
#the Additional Terms applicable to this program and must display appropriate
#legal notices. In accordance with Section 7(b) of the GNU General Public
#License version 3, these Appropriate Legal Notices must retain the display of
#the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have
#received a copy of the detailed Additional Terms License with this program.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
#Public License for more details.
#
#You should have received a copy of the GNU General Public License along with
#this program. If not, see <http://www.gnu.org/licenses/>.
#
#You can contact Cynapse at [email protected] with any problems with cyn.in.
#For any queries regarding the licensing, please send your mails to
# [email protected]
#
#You can also contact Cynapse at:
#802, Building No. 1,
#Dheeraj Sagar, Malad(W)
#Mumbai-400064, India
###############################################################################
from setuptools import setup, find_packages
import os
version = '0.1'
setup(name='ubify.smartview',
version=version,
description="intelligent views",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='web zope plone theme',
author='Cynapse',
author_email='[email protected]',
url='http://www.cynapse.com',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['ubify'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
| gpl-3.0 | -5,875,803,171,157,364,000 | 39.28169 | 83 | 0.661538 | false | 3.950276 | false | false | false |
ethoms/netdrive-connector | netdriveconnector/NetdriveConnector.py | 5 | 29380 | # Copyright (c) 2015, Euan Thoms
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys, os, subprocess
from PyQt4 import QtCore, QtGui, uic
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from LoginDialog import LoginDialog
from RootPasswordDialog import RootPasswordDialog
try:
_fromUtf8 = QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
( Ui_NetdriveConnector, QWidget ) = uic.loadUiType( os.path.join(os.path.dirname( __file__ ), 'NetdriveConnector.ui' ))
TOOLTIP_PREFIX = "Full fstab entry: "
SSHFS_INVALID_OPTIONS = ['users','noauto']
class NetdriveConnector ( QWidget ):
def __init__ ( self, parent = None ):
QWidget.__init__( self, parent )
self.ui = Ui_NetdriveConnector()
self.ui.setupUi( self )
self.getHomeFolder()
self.dependencyCheck()
self.loadConnectionsTable()
def __del__ ( self ):
self.ui = None
def dependencyCheck(self):
shellCommand = str("groups | egrep 'davfs2 | davfs2'")
if subprocess.call(shellCommand,shell=True) != 0:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Warning")
message =\
"""
WARNING: The currently logged in user is not a member of the davfs2 group.
This will likely cause the mounting of WebDAV connections to fail.
Consider adding this user account to the davfs2 group. Consult your OS/distributions guide for how to add a user to a group.
"""
warningMessage.setText(message)
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
def loadConnectionsTable(self):
self.ui.connectionsTableWidget.clear()
allConnections = []
if self.ui.currentUserCheckBox.isChecked():
grepForCurrentUser = " | grep " + self.homeFolder
else:
grepForCurrentUser = ""
shellCommand = str("cat /etc/fstab | grep -v '^#' | grep ' davfs '" + grepForCurrentUser)
if subprocess.call(shellCommand,shell=True) == 0:
davfsConnections = str (subprocess.check_output(shellCommand,shell=True)).splitlines()
allConnections = allConnections + davfsConnections
else:
davfsConnections = None
shellCommand = str("cat /etc/fstab | grep -v '^#' | grep ' fuse.sshfs '" + grepForCurrentUser)
if subprocess.call(shellCommand,shell=True) == 0:
sftpConnections = str (subprocess.check_output(shellCommand,shell=True)).splitlines()
allConnections = allConnections + sftpConnections
else:
sftpConnections = None
self.ui.connectionsTableWidget.setColumnCount(2)
self.ui.connectionsTableWidget.setHorizontalHeaderLabels(('URL','Mount Point'))
self.ui.connectionsTableWidget.setRowCount(len(allConnections))
row = 0
for rowData in allConnections:
url = rowData.split(' ')[0]
mountPoint = rowData.split(' ')[1]
shellCommand = str("mount | grep ' " + str(mountPoint) + " '")
if subprocess.call(shellCommand,shell=True) == 0:
bgColour = QColor(100,200,100,80)
else:
bgColour = QColor(250,120,10,80)
tableItem = QtGui.QTableWidgetItem(url)
self.ui.connectionsTableWidget.setItem(row, 0, tableItem)
tableItem.setBackgroundColor(bgColour)
tableItem.setToolTip(TOOLTIP_PREFIX + rowData)
tableItem = QtGui.QTableWidgetItem(mountPoint)
self.ui.connectionsTableWidget.setItem(row, 1, tableItem)
tableItem.setBackgroundColor(bgColour)
tableItem.setToolTip(TOOLTIP_PREFIX + rowData)
row += 1
self.ui.connectionsTableWidget.resizeColumnsToContents()
self.ui.connectionsTableWidget.resizeRowsToContents()
header = self.ui.connectionsTableWidget.horizontalHeader()
header.setStretchLastSection(True)
def clearSftpFields(self):
self.ui.sftpUsernameLineEdit.clear()
self.ui.sftpHostnameLineEdit.clear()
self.ui.sftpPortSpinBox.setValue(22)
self.ui.sftpPathLineEdit.clear()
self.ui.sftpMountpointLineEdit.clear()
self.ui.sftpPasswordlessCheckBox.setChecked(True)
self.ui.sftpPasswordLineEdit.clear()
self.ui.sftpAutoMountCheckBox.setCheckable(True)
self.ui.sftpAutoMountCheckBox.setChecked(False)
def clearWebdavFields(self):
self.ui.webdavServerUrlLineEdit.clear()
self.ui.webdavUriLineEdit.clear()
self.ui.webdavMountpointLineEdit.clear()
self.ui.httpRadioButton.setChecked(True)
self.ui.webdavProtocolLbl.setText("http://")
self.ui.webdavPortSpinBox.setValue(80)
self.ui.webdavUsernameLineEdit.clear()
self.ui.webdavPasswordLineEdit.clear()
self.ui.webdavAutoMountCheckBox.setCheckable(True)
self.ui.webdavAutoMountCheckBox.setChecked(False)
def currentUserCheckBoxClicked(self):
self.loadConnectionsTable()
def sftpPasswordlessCheckBoxClicked(self):
if self.ui.sftpPasswordlessCheckBox.isChecked():
self.ui.sftpAutoMountCheckBox.setCheckable(True)
else:
self.ui.sftpAutoMountCheckBox.setChecked(False)
self.ui.sftpAutoMountCheckBox.setCheckable(False)
def webdavSavePasswordCheckBoxClicked(self):
if self.ui.webdavSavePasswordCheckBox.isChecked():
self.ui.webdavAutoMountCheckBox.setCheckable(True)
else:
self.ui.webdavAutoMountCheckBox.setChecked(False)
self.ui.webdavAutoMountCheckBox.setCheckable(False)
def connectBtnClicked(self):
if len(self.ui.connectionsTableWidget.selectedItems()) < 1:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("No connection selected. Please select a filesystem to connect.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
toolTipText = str ( self.ui.connectionsTableWidget.selectedItems()[0].toolTip() )
toConnect = toolTipText[toolTipText.find(TOOLTIP_PREFIX)+len(TOOLTIP_PREFIX):]
filesystem = toConnect.split(' ')[0]
mountpoint = toConnect.split(' ')[1]
fsType = toConnect.split(' ')[2]
fstabMountOptions = toConnect.split(' ')[3].split(',')
mountOptions = ""
for option in fstabMountOptions:
if option not in SSHFS_INVALID_OPTIONS:
mountOptions = mountOptions + option + ","
if mountOptions is not "":
mountOptions = mountOptions[:-1]
shellCommand = str("mount | grep ' " + mountpoint + " '")
if subprocess.call(shellCommand,shell=True) == 0:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("The selected filesystem is already mounted.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
if fsType == "davfs":
shellCommand = str("cat '" + self.homeFolder + "/.davfs2/secrets' | grep '^" + filesystem +" '")
if subprocess.call(shellCommand,shell=True) != 0:
isWebdavPasswordSaved = False
loginDialog = LoginDialog("")
loginDialog.exec_()
if not loginDialog.isOK:
return False
else:
username,password = loginDialog.getLoginCredentials()
shellCommand = str("echo '" + filesystem + " " + username + " " + password + "' >> '" + self.homeFolder + "/.davfs2/secrets'")
if subprocess.call(shellCommand,shell=True) != 0:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("ERROR: Failed to add username/password to secrets file.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
else:
isWebdavPasswordSaved = True
shellCommand = str("mount " + mountpoint)
if subprocess.call(shellCommand,shell=True) != 0:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("Failed to connect filesystem: " + filesystem)
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
else:
successMessage = QtGui.QMessageBox(self)
successMessage.setWindowTitle("Netdrive Connector - Success")
successMessage.setText("Successfully connected the remote filesystem: " + filesystem )
successMessage.setIcon(QtGui.QMessageBox.Information)
successMessage.show()
if not isWebdavPasswordSaved:
# TODO: check for GNU/LInux or *BSD and use specific sed in-place command
shellCommand = str('sed -i "\|^' + filesystem + ' .*|d" "' + self.homeFolder + '/.davfs2/secrets"')
if subprocess.call(shellCommand,shell=True) != 0:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("ERROR: Failed to remove username/password from secrets file.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
if fsType == "fuse.sshfs":
# NOTE: since we rely on a ssh-askpass to graphically prompt for password (no tty),
# we need to use sshfs instead of mount. At least on Slackware, mount does not initiate the ssh-askpass.
shellCommand = str("sshfs " + filesystem + " " + mountpoint + " -o " + mountOptions)
print shellCommand
if subprocess.call(shellCommand, shell=True) != 0:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("Failed to connect filesystem: " + filesystem)
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
else:
successMessage = QtGui.QMessageBox(self)
successMessage.setWindowTitle("Netdrive Connector - Success")
successMessage.setText("Successfully connected the remote filesystem: " + filesystem )
successMessage.setIcon(QtGui.QMessageBox.Information)
successMessage.show()
self.loadConnectionsTable()
def disconnectBtnClicked(self):
if len(self.ui.connectionsTableWidget.selectedItems()) < 1:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("No connection selected. Please select a filesystem to disconnect.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
toolTipText = str ( self.ui.connectionsTableWidget.selectedItems()[0].toolTip() )
toDisconnect = toolTipText[toolTipText.find(TOOLTIP_PREFIX)+len(TOOLTIP_PREFIX):]
mountpoint = toDisconnect.split(' ')[1]
fs_type = toDisconnect.split(' ')[2]
shellCommand = str("mount | grep ' " + mountpoint + " '")
if subprocess.call(shellCommand,shell=True) != 0:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("The selected filesystem is not currently mounted.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
if fs_type == "fuse.sshfs":
shellCommand = str("fusermount -u " + mountpoint)
else:
shellCommand = str("umount " + mountpoint)
if subprocess.call(shellCommand,shell=True) != 0:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("Failed to disconnect mount point: " + mountpoint + " . Try to save and close all open files, exit the folder and try again." )
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
else:
successMessage = QtGui.QMessageBox(self)
successMessage.setWindowTitle("Netdrive Connector - Success")
successMessage.setText("Successfully disconnected the remote filesystem mounted at: " + mountpoint)
successMessage.setIcon(QtGui.QMessageBox.Information)
successMessage.show()
self.loadConnectionsTable()
def removeBtnClicked(self):
if len(self.ui.connectionsTableWidget.selectedItems()) < 1:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("No connection selected. Please select a filesystem to remove.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
toolTipText = str ( self.ui.connectionsTableWidget.selectedItems()[0].toolTip() )
connection = toolTipText[toolTipText.find(TOOLTIP_PREFIX)+len(TOOLTIP_PREFIX):]
filesystem = connection.split(' ')[0]
mountpoint = connection.split(' ')[1]
fsType = connection.split(' ')[2]
shellCommand = str("mount | grep ' " + mountpoint + " '")
if subprocess.call(shellCommand,shell=True) == 0:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("The selected filesystem is currently mounted. Disconnect before trying to remove the connection.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
reply = QtGui.QMessageBox.question(self, 'Netdrive Connector',"Are you sure that you want to remove this connection?", \
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
return False
if fsType == "davfs":
removeCmd = "remove-webdav-connector"
elif fsType == "fuse.sshfs":
removeCmd = "remove-sftp-connector"
rootPasswordDialog = RootPasswordDialog()
rootPasswordDialog.exec_()
if not rootPasswordDialog.isOK:
return False
password = rootPasswordDialog.getRootPassword()
removeConnectorParms = filesystem + " " + mountpoint
if subprocess.call(['unbuffer','netdrive-connector_run-as-root', str(password), removeCmd, removeConnectorParms]) !=0:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("Failed to remove the connection to : " + filesystem )
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
mountpointNoSlashes = str(mountpoint).replace("/","_")
shellCommand = str("rm " + self.homeFolder + "/.config/autostart/netdrive_connector" + mountpointNoSlashes + ".desktop" )
if subprocess.call(shellCommand,shell=True) != 0:
print "WARNING: problem whilst removing autostart file."
self.loadConnectionsTable()
def refreshBtnClicked(self):
self.loadConnectionsTable()
def addSftpBtnClicked(self):
sftpUsername= self.ui.sftpUsernameLineEdit.text()
sftpHostname= self.ui.sftpHostnameLineEdit.text()
sftpPort = str(self.ui.sftpPortSpinBox.value())
sftpMountpoint = self.ui.sftpMountpointLineEdit.text()
sftpPath = self.ui.sftpPathLineEdit.text()
sftpPassword = self.ui.sftpPasswordLineEdit.text()
if len(str(sftpUsername).replace(" ","")) < 1:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("No valid username. Please enter a valid username.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
if len(str(sftpHostname).replace(" ","")) < 1:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("No valid hostname. Please enter a valid hostname.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
if len(str(sftpPath).replace(" ","")) < 1:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("No valid path. Please enter a valid path.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
if len(str(sftpMountpoint).replace(" ","")) < 1:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("No mount point (folder) selected. Please select a folder to use as a mount point.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
if self.ui.sftpPasswordlessCheckBox.isChecked() and len(str(sftpPassword).replace(" ","")) < 1:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("No SFTP password supplied. Please enter the password for the user on the server.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
rootPasswordDialog = RootPasswordDialog()
rootPasswordDialog.exec_()
if not rootPasswordDialog.isOK:
return False
password = rootPasswordDialog.getRootPassword()
if self.ui.sftpPasswordlessCheckBox.isChecked():
connectorParms = sftpUsername + "@" + sftpHostname + ":" + sftpPort + "/" + sftpPath + " " + sftpMountpoint + " key " + sftpPassword
else:
connectorParms = sftpUsername + "@" + sftpHostname + ":" + sftpPort + "/" + sftpPath + " " + sftpMountpoint
if subprocess.call(['unbuffer','netdrive-connector_run-as-root', str(password), 'add-sftp-connector', connectorParms]) !=0:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("Failed to add the connection. ")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
else:
if self.ui.sftpAutoMountCheckBox.isChecked():
self.addAutoMount(sftpMountpoint, "fuse.sshfs")
self.clearSftpFields()
self.loadConnectionsTable()
def addWebdavBtnClicked(self):
webdavProtocol = self.ui.webdavProtocolLbl.text()
webdavURL = self.ui.webdavServerUrlLineEdit.text()
webdavPort = str(self.ui.webdavPortSpinBox.value())
webdavMountpoint = self.ui.webdavMountpointLineEdit.text()
webdavURI = self.ui.webdavUriLineEdit.text()
webdavUsername = self.ui.webdavUsernameLineEdit.text()
webdavPassword = self.ui.webdavPasswordLineEdit.text()
if len(str(webdavURL).replace(" ","")) < 1:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("No valid server URL. Please enter a valid server URL.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
if len(str(webdavURI).replace(" ","")) < 1:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("No valid WebDAV URI. Please enter a valid WebDAV URI.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
if len(str(webdavMountpoint).replace(" ","")) < 1:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("No mount point (folder) selected. Please select a folder to use as a mount point.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
if self.ui.webdavSavePasswordCheckBox.isChecked() and len(str(webdavUsername).replace(" ","")) < 1:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("No valid WebDAV username supplied. Please enter a valid WebDAV username.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
if self.ui.webdavSavePasswordCheckBox.isChecked() and len(str(webdavPassword).replace(" ","")) < 1:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("No WebDAV password supplied. Please enter the WebDAV password.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
rootPasswordDialog = RootPasswordDialog()
rootPasswordDialog.exec_()
if not rootPasswordDialog.isOK:
return False
password = rootPasswordDialog.getRootPassword()
if self.ui.webdavSavePasswordCheckBox.isChecked():
connectorParms = webdavProtocol + webdavURL + ":" + webdavPort + "/" + webdavURI + " " + webdavMountpoint + " " + webdavUsername + " " + webdavPassword
else:
connectorParms = webdavProtocol + webdavURL + ":" + webdavPort + "/" + webdavURI + " " + webdavMountpoint
if subprocess.call(['unbuffer','netdrive-connector_run-as-root', str(password), 'add-webdav-connector', connectorParms]) !=0:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("Failed to add the connection. ")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
else:
if self.ui.webdavAutoMountCheckBox.isChecked():
self.addAutoMount(webdavMountpoint, "davfs")
self.clearWebdavFields()
self.loadConnectionsTable()
def sftpMountpointBtnClicked(self):
mountpoint = QtGui.QFileDialog.getExistingDirectory(self, 'Select mount point',self.homeFolder)
if mountpoint == self.homeFolder:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Warning")
warningMessage.setText("WARNING: The selected folder is your home folder. Mounting a remote filesystem to your home folder is not recommended.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
if self.isMountpointOwnedByCurrentUser(mountpoint):
self.ui.sftpMountpointLineEdit.setText(mountpoint)
else:
errorMessage = QtGui.QErrorMessage(self)
errorMessage.setWindowTitle("Netdrive Connector - Error")
errorMessage.showMessage("ERROR: you are not the owner of the selected folder. Please change ownership of the folder or select a different mount point.")
def webdavMountpointBtnClicked(self):
mountpoint = QtGui.QFileDialog.getExistingDirectory(self, 'Select mount point',self.homeFolder)
if mountpoint == self.homeFolder:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Warning")
warningMessage.setText("WARNING: The selected folder is your home folder. Mounting a remote filesystem to your home folder is not recommended.")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
if self.isMountpointOwnedByCurrentUser(mountpoint):
self.ui.webdavMountpointLineEdit.setText(mountpoint)
else:
errorMessage = QtGui.QErrorMessage(self)
errorMessage.setWindowTitle("Netdrive Connector - Error")
errorMessage.showMessage("ERROR: you are not the owner of the selected folder. Please change ownership of the folder or select a different mount point.")
def httpRadioBtnClicked(self):
self.ui.webdavProtocolLbl.setText("http://")
if self.ui.webdavPortSpinBox.value() == 443:
self.ui.webdavPortSpinBox.setValue(80)
def httpsRadioBtnClicked(self):
self.ui.webdavProtocolLbl.setText("https://")
if self.ui.webdavPortSpinBox.value() == 80:
self.ui.webdavPortSpinBox.setValue(443)
def getHomeFolder(self):
self.homeFolder = str (subprocess.check_output("echo $HOME",shell=True)).splitlines()[0]
def isMountpointOwnedByCurrentUser(self, mountpoint):
currentUser = str (subprocess.check_output("whoami",shell=True)).splitlines()[0]
shellCommand = str ("ls -ld " + mountpoint + " | awk '{print $3}'")
folderOwner = str (subprocess.check_output(shellCommand,shell=True)).splitlines()[0]
if folderOwner != currentUser:
return False
else:
return True
def addAutoMount(self, mountpoint, fs_type):
mountpointNoSlashes = str(mountpoint).replace("/","_")
fileContents =\
"""
[Desktop Entry]
Name=Netdrive AutoMounter
Hidden=false
StartupNotify=false
Terminal=false
TerminalOptions=
Type=Application
"""
fileContents = str(fileContents + "Exec=netdrive-connector_automountd " + mountpoint + " " + fs_type)
shellCommand = str("if [ ! -d " + self.homeFolder + "/.config/autostart ]; then mkdir " + self.homeFolder + "/.config/autostart ; fi ; echo '" + fileContents + "' > " + self.homeFolder + "/.config/autostart/netdrive_connector" + mountpointNoSlashes + ".desktop" )
if subprocess.call(shellCommand,shell=True) != 0:
warningMessage = QtGui.QMessageBox(self)
warningMessage.setWindowTitle("Netdrive Connector - Error")
warningMessage.setText("An error occured whilst creating the autostart file in " + self.homeFolder + "/.config/autostart .")
warningMessage.setIcon(QtGui.QMessageBox.Warning)
warningMessage.show()
return False
| bsd-2-clause | 106,535,529,566,496,180 | 46.083333 | 271 | 0.641082 | false | 4.557158 | false | false | false |
ellethee/getwebfilesinator | getwebfilesinator/getwebfilesinator/commands.py | 1 | 1349 | # -*- coding: utf-8 -*-
"""
===========================================
Commands :core:`getwebfilesinator.commands`
===========================================
Commands for the GetWebFilesInator
"""
from argparseinator import ArgParseInated
from argparseinator import arg
from argparseinator import class_args
from getwebfilesinator.client import GwfiClient
from getwebfilesinator.utils import update_paths, getLogger
log = getLogger(__name__)
# Tell to ArgParseInator the class must be parsed for GetWebFilesInator
# commands and it is a ArgParseInated subclass.
@class_args
class Commands(ArgParseInated):
"""Commands for getwebfilesinator"""
# we will check for the configuration file. Is a mandatory.
def __preinator__(self):
if not self.args.config:
# if we don't have a configuration file we will exit using
# the builtin __argpi__ (ArgParseInator Instance)
__argpi__.exit(1, u'The configuration file is mandatory\n')
# this will be the only command.
@arg()
def download(self):
"""Downloads files according with configuration"""
# lets instantiate the client passing the configuration
cli = GwfiClient(self.cfg)
# now the client should process all the files
# (should we change the name ?)
cli.process(self.cfg.files or [])
| mit | 4,739,849,057,103,220,000 | 36.472222 | 71 | 0.656042 | false | 4.296178 | true | false | false |
cropleyb/pentai | pentai/ai/t_ab_state.py | 1 | 13516 | #!/usr/bin/env python
import unittest
import pentai.base.human_player as h_m
import pentai.base.rules as r_m
import pentai.base.game as g_m
import pentai.ai.priority_filter as pf_m
import pentai.ai.utility_calculator as uc_m
from pentai.ai.ab_state import *
def get_black_line_counts(ab_game_state):
return ab_game_state.get_utility_stats().lines[P1]
def get_white_line_counts(ab_game_state):
return ab_game_state.get_utility_stats().lines[P2]
class AlphaBetaBridgeTest(unittest.TestCase):
def setUp(self):
player1 = h_m.HumanPlayer("Blomp")
player2 = h_m.HumanPlayer("Kubba")
r = r_m.Rules(13, "standard")
my_game = g_m.Game(r, player1, player2)
self.gs = my_game.current_state
self.search_filter = pf_m.PriorityFilter()
self.util_calc = uc_m.UtilityCalculator()
self.s = ABState(search_filter=self.search_filter,
utility_calculator=self.util_calc)
self.bl = self.s.utility_stats.lines[P1]
self.wl = self.s.utility_stats.lines[P2]
self.s.set_state(self.gs)
def test_update_substrips_middle_of_board(self):
self.gs.set_occ((7,7), P1)
"""
self.assertEquals(self.bl, [20, 0, 0, 0, 0])
self.assertEquals(self.wl, [0, 0, 0, 0, 0])
def test_empty_board(self):
self.assertEquals(self.bl, [0, 0, 0, 0, 0])
self.assertEquals(self.wl, [0, 0, 0, 0, 0])
def test_update_substrips_SW_corner(self):
self.gs.set_occ((0,0), P1)
self.assertEquals(self.bl, [3, 0, 0, 0, 0])
self.assertEquals(self.wl, [0, 0, 0, 0, 0])
def test_update_substrips_near_SW_corner(self):
self.gs.set_occ((1,0), P1)
self.assertEquals(self.bl, [4, 0, 0, 0, 0])
self.assertEquals(self.wl, [0, 0, 0, 0, 0])
def test_update_substrips_NE_corner(self):
self.gs.set_occ((12,12), P1)
self.assertEquals(self.bl, [3, 0, 0, 0, 0])
self.assertEquals(self.wl, [0, 0, 0, 0, 0])
def test_update_substrips_remove_single_stone(self):
self.gs.set_occ((0,0), P1)
self.gs.set_occ((0,0), EMPTY)
self.assertEquals(self.bl, [0, 0, 0, 0, 0])
self.assertEquals(self.wl, [0, 0, 0, 0, 0])
def test_update_substrips_two_blacks_SW(self):
self.gs.set_occ((0,0), P1)
self.gs.set_occ((1,1), P1)
self.assertEquals(self.bl, [7, 1, 0, 0, 0])
self.assertEquals(self.wl, [0, 0, 0, 0, 0])
def test_update_substrips_2_opp_colour_pieces(self):
self.gs.set_occ((0,0), P1)
self.gs.set_occ((0,1), P2)
self.assertEquals(self.bl, [2, 0, 0, 0, 0])
self.assertEquals(self.wl, [3, 0, 0, 0, 0])
def test_update_substrips_2_pieces(self):
self.gs.set_occ((0,0), P1)
self.gs.set_occ((0,1), P1)
self.assertEquals(self.bl, [5, 1, 0, 0, 0])
self.assertEquals(self.wl, [0, 0, 0, 0, 0])
def test_update_substrips_5_in_a_row(self):
self.gs.set_occ((0,0), P1)
self.gs.set_occ((0,1), P1)
self.gs.set_occ((0,2), P1)
self.gs.set_occ((0,3), P1)
self.gs.set_occ((0,4), P1)
self.assertEquals(self.bl, [12, 1, 1, 1, 1])
self.assertEquals(self.wl, [0, 0, 0, 0, 0])
class LengthCountingTest(unittest.TestCase):
def setUp(self):
player1 = h_m.HumanPlayer("Blomp")
player2 = h_m.HumanPlayer("Kubba")
r = r_m.Rules(9, "standard")
my_game = g_m.Game(r, player1, player2)
self.gs = my_game.current_state
self.search_filter = pf_m.PriorityFilter()
self.util_calc = uc_m.UtilityCalculator()
self.s = ABState(search_filter=self.search_filter,
utility_calculator=self.util_calc)
self.bl = self.s.utility_stats.lines[P1]
self.wl = self.s.utility_stats.lines[P2]
self.s.set_state(self.gs)
def test_middle_for_black_diag_2_for_white(self):
self.gs.set_occ((4,4), P1)
self.gs.set_occ((2,2), P2)
self.assertEquals(self.bl, [17, 0, 0, 0, 0])
self.assertEquals(self.wl, [7, 0, 0, 0, 0])
def test_middle_for_black_left_1_for_white(self):
self.gs.set_occ((4,4), P1)
self.gs.set_occ((3,4), P2)
self.assertEquals(self.bl, [16, 0, 0, 0, 0])
self.assertEquals(self.wl, [5+4+4, 0, 0, 0, 0])
def test_middle_for_black_right_1_for_white(self):
self.gs.set_occ((4,4), P1)
self.gs.set_occ((5,4), P2)
self.assertEquals(self.bl, [16, 0, 0, 0, 0])
self.assertEquals(self.wl, [5+4+4, 0, 0, 0, 0])
def test_middle_for_black_up_1_for_white(self):
self.gs.set_occ((4,4), P1)
self.gs.set_occ((4,5), P2)
self.assertEquals(self.bl, [16, 0, 0, 0, 0])
self.assertEquals(self.wl, [5+4+4, 0, 0, 0, 0])
def test_middle_for_black_down_1_for_white(self):
self.gs.set_occ((4,4), P1)
self.gs.set_occ((4,3), P2)
self.assertEquals(self.bl, [16, 0, 0, 0, 0])
self.assertEquals(self.wl, [5+4+4, 0, 0, 0, 0])
###############
class MoreAlphaBetaBridgeTests(unittest.TestCase):
def setUp(self):
player1 = h_m.HumanPlayer("Blomp")
player2 = h_m.HumanPlayer("Kubba")
r = r_m.Rules(5, "standard")
my_game = g_m.Game(r, player1, player2)
self.gs = my_game.current_state
self.search_filter = pf_m.PriorityFilter()
self.util_calc = uc_m.UtilityCalculator()
self.s = ABState(search_filter=self.search_filter,
utility_calculator=self.util_calc)
self.bl = self.s.utility_stats.lines[P1]
self.wl = self.s.utility_stats.lines[P2]
self.s.set_state(self.gs)
def test_initial_state_black_to_move(self):
self.assertEquals(self.s.to_move_colour(), P1)
def test_create_state(self):
child = self.s.create_state((2,2))
self.assertEquals(child.to_move_colour(), P2)
self.assertEquals(child.terminal(), False)
board = child.board()
self.assertEquals(board.get_occ((2,2)), P1)
self.assertEquals(board.get_occ((3,3)), EMPTY)
self.assertEquals(board.get_occ((1,1)), EMPTY)
def test_length_counters_after_sw_corner(self):
g1 = self.s.create_state((0,0)) # B
self.assertEquals(get_black_line_counts(g1), [3, 0, 0, 0, 0])
def test_length_counters_after_nw_corner(self):
g1 = self.s.create_state((0,4)) # B
self.assertEquals(get_black_line_counts(g1), [3, 0, 0, 0, 0])
def test_length_counters_after_ne_corner(self):
g1 = self.s.create_state((4,4)) # B
self.assertEquals(get_black_line_counts(g1), [3, 0, 0, 0, 0])
def test_length_counters_after_se_corner(self):
g1 = self.s.create_state((4,0)) # B
self.assertEquals(get_black_line_counts(g1), [3, 0, 0, 0, 0])
def test_cannot_place_off_e_edge(self):
try:
g1 = self.s.create_state((-1,2)) # B
except IllegalMoveException:
return
self.assertFail()
def test_length_counters_after_two_moves(self):
g1 = self.s.create_state((0,0)) # B
g2 = g1.create_state((1,1)) # W
self.assertEquals(get_black_line_counts(g2), [2, 0, 0, 0, 0])
self.assertEquals(get_white_line_counts(g2), [2, 0, 0, 0, 0])
def test_length_counters_after_two_moves_b(self):
g1 = self.s.create_state((1,1)) # B
g2 = g1.create_state((2,2)) # W
self.assertEquals(get_black_line_counts(g2), [2, 0, 0, 0, 0])
# One across the other diagonal
self.assertEquals(get_white_line_counts(g2), [3, 0, 0, 0, 0])
def test_length_counters_after_five_moves(self):
# along the NE diagonal
g1 = self.s.create_state((1,1)) # B
g2 = g1.create_state((2,2)) # W
g3 = g2.create_state((3,3)) # B
g4 = g3.create_state((4,4)) # W
g5 = g4.create_state((0,0)) # B
self.assertEquals(get_black_line_counts(g5), [6, 0, 0, 0, 0])
self.assertEquals(get_white_line_counts(g5), [5, 0, 0, 0, 0])
def test_length_counters_after_five_moves_in_cnrs_and_middle(self):
# four in the corners and one in the middle
g1 = self.s.create_state((0,0)) # B
g2 = g1.create_state((0,4)) # W
g3 = g2.create_state((4,4)) # B
g4 = g3.create_state((4,0)) # W
g5 = g4.create_state((2,2)) # B
self.assertEquals(get_black_line_counts(g5), [2, 0, 1, 0, 0])
self.assertEquals(get_white_line_counts(g5), [0, 0, 0, 0, 0])
def test_make_a_capture(self):
g1 = self.s.create_state((0,1)) # B
g2 = g1.create_state((1,2)) # W
g3 = g2.create_state((1,3)) # B
g4 = g3.create_state((2,3)) # W
g5 = g4.create_state((3,4)) # B
self.assertEquals(g5.to_move_colour(), P2)
self.assertEquals(g5.terminal(), False)
board = g5.board()
self.assertEquals(board.get_occ((0,1)), P1)
self.assertEquals(board.get_occ((1,3)), P1)
self.assertEquals(board.get_occ((3,4)), P1)
self.assertEquals(board.get_occ((1,2)), EMPTY)
self.assertEquals(board.get_occ((2,3)), EMPTY)
class ThreatTest(unittest.TestCase):
def setUp(self):
player1 = h_m.HumanPlayer("Blomp")
player2 = h_m.HumanPlayer("Kubba")
r = r_m.Rules(5, "standard")
my_game = g_m.Game(r, player1, player2)
self.gs = my_game.current_state
self.search_filter = pf_m.PriorityFilter()
self.util_calc = uc_m.UtilityCalculator()
self.s = ABState(search_filter=self.search_filter,
utility_calculator=self.util_calc)
self.bl = self.s.utility_stats.lines[P1]
self.wl = self.s.utility_stats.lines[P2]
self.s.set_state(self.gs)
def test_add_one_take_for_white(self):
g1 = self.s.create_state((2,4)) # B
g2 = g1.create_state((1,4)) # W
g3 = g2.create_state((3,4)) # B
self.assertEquals(g3.get_takes(), [0, 0, 1])
def test_SW_valid(self):
g1 = self.s.create_state((1,1)) # B
g2 = g1.create_state((3,3)) # W
g3 = g2.create_state((2,2)) # B
self.assertEquals(g3.get_takes(), [0, 0, 1])
def test_NW_valid(self):
g1 = self.s.create_state((1,3)) # B
g2 = g1.create_state((3,1)) # W
g3 = g2.create_state((2,2)) # B
self.assertEquals(g3.get_takes(), [0, 0, 1])
def test_NE_valid(self):
g1 = self.s.create_state((3,3)) # B
g2 = g1.create_state((1,1)) # W
g3 = g2.create_state((2,2)) # B
self.assertEquals(g3.get_takes(), [0, 0, 1])
def test_SE_valid(self):
g1 = self.s.create_state((2,2)) # B
g2 = g1.create_state((1,3)) # W
g3 = g2.create_state((3,1)) # B
self.assertEquals(g3.get_takes(), [0, 0, 1])
##########################################
def test_SW_invalid(self):
g1 = self.s.create_state((0,0)) # B
g2 = g1.create_state((2,2)) # W
g3 = g2.create_state((1,1)) # B
self.assertEquals(g3.get_takes(), [0, 0, 0])
def test_NW_invalid(self):
g1 = self.s.create_state((0,4)) # B
g2 = g1.create_state((2,2)) # W
g3 = g2.create_state((1,3)) # B
self.assertEquals(g3.get_takes(), [0, 0, 0])
def test_NE_invalid(self):
g1 = self.s.create_state((4,4)) # B
g2 = g1.create_state((2,2)) # W
g3 = g2.create_state((3,3)) # B
self.assertEquals(g3.get_takes(), [0, 0, 0])
def test_SE_invalid(self):
g1 = self.s.create_state((4,0)) # B
g2 = g1.create_state((2,2)) # W
g3 = g2.create_state((3,1)) # B
self.assertEquals(g3.get_takes(), [0, 0, 0])
##########################################
def test_W_invalid(self):
g1 = self.s.create_state((0,2)) # B
g2 = g1.create_state((2,2)) # W
g3 = g2.create_state((1,2)) # B
self.assertEquals(g3.get_takes(), [0, 0, 0])
def test_E_invalid(self):
g1 = self.s.create_state((4,2)) # B
g2 = g1.create_state((2,2)) # W
g3 = g2.create_state((3,2)) # B
self.assertEquals(g3.get_takes(), [0, 0, 0])
def test_N_invalid(self):
g1 = self.s.create_state((2,4)) # B
g2 = g1.create_state((2,2)) # W
g3 = g2.create_state((2,3)) # B
self.assertEquals(g3.get_takes(), [0, 0, 0])
def test_S_invalid(self):
g1 = self.s.create_state((2,0)) # B
g2 = g1.create_state((2,2)) # W
g3 = g2.create_state((2,1)) # B
self.assertEquals(g3.get_takes(), [0, 0, 0])
##########################################
def test_SW_invalid_take2(self):
g1 = self.s.create_state((1,0)) # B
g2 = g1.create_state((3,2)) # W
g3 = g2.create_state((2,1)) # B
self.assertEquals(g3.get_takes(), [0, 0, 0])
def test_SW_invalid_threat2(self):
g1 = self.s.create_state((1,0)) # B
g2 = g1.create_state((3,4)) # W (irrel.)
g3 = g2.create_state((2,1)) # B
self.assertEquals(g3.get_threats(), [0, 0, 0])
##########################################
'''
def test_seen(self):
self.s.set_seen(set([(1,2)]))
moves = list(self.s.successors())
'''
"""
# TODO: lots of threat cases, or unify stuff
if __name__ == "__main__":
unittest.main()
| mit | -94,669,123,988,427,790 | 34.851459 | 71 | 0.546168 | false | 2.752749 | true | false | false |
azarai/python-mochi | src/setup.py | 1 | 1073 | """
python-mochi
------------
python-mochi is a lib for working with the `mochiads api <https://www.mochimedia.com/support/pub_docs>`_
Links
`````
* `website <http://codeboje.de/python-mochi/>`_
* `development version
<http://github.com/azarai/python-mochi>`_
"""
from distutils.core import setup
setup(name="python-mochi",
version="0.0.1",
description="A Python lib for the mochiads api",
long_description=__doc__,
author="Jens Boje",
author_email="[email protected]",
url="http://codeboje.de/python-mochi/",
packages=['mochi'],
platforms='any',
license = 'BSD',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
| bsd-3-clause | 7,604,597,016,173,331,000 | 27 | 104 | 0.571295 | false | 3.859712 | false | false | false |
humrochagf/lektor-creative-commons | lektor_creative_commons/translation.py | 1 | 1403 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gettext
import os
import sys
PY3 = sys.version_info > (3,)
LOCALES_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'locales'
)
class Translator(object):
def configure(self, locale):
if not os.path.exists(os.path.join(LOCALES_DIR, locale)):
locale = 'en'
self.lang = gettext.translation(
'messages', localedir=LOCALES_DIR, languages=[locale])
self.lang.install()
def translate(self, string, arguments=None):
if PY3:
gettext = self.lang.gettext
else:
gettext = self.lang.ugettext
translated = gettext(string)
if arguments is not None:
translated = translated % arguments
return translated
class __proxy__(object):
def __init__(self, string, translator, arguments):
self.translator = translator
self.string = string
self.arguments = arguments
def __repr__(self):
return self.translator.translate(self.string, self.arguments)
__str__ = __repr__
class LazyTranslator(object):
def __init__(self):
self.translator = Translator()
def __call__(self, string, arguments=None):
self.proxy = __proxy__(string, self.translator, arguments)
return self.proxy
translate_lazy = LazyTranslator()
| mit | -7,256,687,341,937,554,000 | 20.921875 | 69 | 0.612259 | false | 4.031609 | false | false | false |
XaF/rteval | rteval/modules/measurement/__init__.py | 1 | 7889 | #
# Copyright 2012 - 2013 David Sommerseth <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# For the avoidance of doubt the "preferred form" of this code is one which
# is in an open unpatent encumbered format. Where cryptographic key signing
# forms part of the process of creating an executable the information
# including keys needed to generate an equivalently functional executable
# are deemed to be part of the source code.
#
import libxml2
from rteval.modules import RtEvalModules, ModuleContainer
class MeasurementProfile(RtEvalModules):
"""Keeps and controls all the measurement modules with the same measurement profile"""
def __init__(self, config, with_load, run_parallel, modules_root, logger):
self.__with_load = with_load
self.__run_parallel = run_parallel
# Only used when running modules serialised
self.__run_serialised_mods = None
self._module_type = "measurement"
self._module_config = "measurement"
self._report_tag = "Profile"
RtEvalModules.__init__(self, config, modules_root, logger)
def GetProfile(self):
"Returns the profile characteristic as (with_load, run_parallel)"
return (self.__with_load, self.__run_parallel)
def ImportModule(self, module):
"Imports an exported module from a ModuleContainer() class"
return self._ImportModule(module)
def Setup(self, modname):
"Instantiates and prepares a measurement module"
modobj = self._InstantiateModule(modname, self._cfg.GetSection(modname))
self._RegisterModuleObject(modname, modobj)
def Unleash(self):
"""Unleashes all the measurement modules"""
if self.__run_parallel:
# Use the inherrited method if running
# measurements in parallel
return RtEvalModules.Unleash(self)
# Get a list of all registered modules,
# and start the first one
self.__serialised_mods = self.GetModulesList()
mod = self.GetNamedModuleObject(self.__serialised_mods[0])
mod.setStart()
return 1
def MakeReport(self):
"Generates an XML report for all run measurement modules in this profile"
rep_n = RtEvalModules.MakeReport(self)
rep_n.newProp("loads", self.__with_load and "1" or "0")
rep_n.newProp("parallel", self.__run_parallel and "1" or "0")
return rep_n
def isAlive(self):
"""Returns True if all modules which are supposed to run runs"""
if self.__run_parallel:
return self._isAlive()
if len(self.__serialised_mods) > 0:
# If running serialised, first check if measurement is still running,
# if so - return True.
mod = self.GetNamedModuleObject(self.__serialised_mods[0])
if mod.WorkloadAlive():
return True
# If not, go to next on the list and kick it off
self.__serialised_mods.remove(self.__serialised_mods[0])
if len(self.__serialised_mods) > 0:
mod = self.GetNamedModuleObject(self.__serialised_mods[0])
mod.setStart()
return True
# If we've been through everything, nothing is running
return False
class MeasurementModules(object):
"""Class which takes care of all measurement modules and groups them into
measurement profiles, based on their characteristics"""
def __init__(self, config, logger):
self.__cfg = config
self.__logger = logger
self.__measureprofiles = []
self.__modules_root = "modules.measurement"
self.__iter_item = None
# Temporary module container, which is used to evalute measurement modules.
# This will container will be destroyed after Setup() has been called
self.__container = ModuleContainer(self.__modules_root, self.__logger)
self.__LoadModules(self.__cfg.GetSection("measurement"))
def __LoadModules(self, modcfg):
"Loads and imports all the configured modules"
for m in modcfg:
# hope to eventually have different kinds but module is only on
# for now (jcw)
if m[1].lower() == 'module':
self.__container.LoadModule(m[0])
def GetProfile(self, with_load, run_parallel):
"Returns the appropriate MeasurementProfile object, based on the profile type"
for p in self.__measureprofiles:
mp = p.GetProfile()
if mp == (with_load, run_parallel):
return p
return None
def SetupModuleOptions(self, parser):
"Sets up all the measurement modules' parameters for the option parser"
self.__container.SetupModuleOptions(parser, self.__cfg)
def Setup(self, modparams):
"Loads all measurement modules and group them into different measurement profiles"
if not isinstance(modparams, dict):
raise TypeError("modparams attribute is not of a dictionary type")
modcfg = self.__cfg.GetSection("measurement")
for (modname, modtype) in modcfg:
if modtype.lower() == 'module': # Only 'module' will be supported (ds)
# Extract the measurement modules info
modinfo = self.__container.ModuleInfo(modname)
# Get the correct measurement profile container for this module
mp = self.GetProfile(modinfo["loads"], modinfo["parallel"])
if mp is None:
# If not found, create a new measurement profile
mp = MeasurementProfile(self.__cfg,
modinfo["loads"], modinfo["parallel"],
self.__modules_root, self.__logger)
self.__measureprofiles.append(mp)
# Export the module imported here and transfer it to the
# measurement profile
mp.ImportModule(self.__container.ExportModule(modname))
# Setup this imported module inside the appropriate measurement profile
self.__cfg.AppendConfig(modname, modparams)
mp.Setup(modname)
del self.__container
def MakeReport(self):
"Generates an XML report for all measurement profiles"
# Get the reports from all meaurement modules in all measurement profiles
rep_n = libxml2.newNode("Measurements")
for mp in self.__measureprofiles:
mprep_n = mp.MakeReport()
if mprep_n:
rep_n.addChild(mprep_n)
return rep_n
def __iter__(self):
"Initiates an iteration loop for MeasurementProfile objects"
self.__iter_item = len(self.__measureprofiles)
return self
def next(self):
"""Internal Python iterating method, returns the next
MeasurementProfile object to be processed"""
if self.__iter_item == 0:
self.__iter_item = None
raise StopIteration
else:
self.__iter_item -= 1
return self.__measureprofiles[self.__iter_item]
| gpl-2.0 | -2,603,962,293,298,363,000 | 36.037559 | 90 | 0.628977 | false | 4.477299 | true | false | false |
foobnix/foobnix | foobnix/helpers/dialog_entry.py | 1 | 12288 | #-*- coding: utf-8 -*-
'''
Created on 24 авг. 2010
@author: ivan
'''
from gi.repository import Gtk
import logging
from foobnix.fc.fc import FC
from foobnix.helpers.image import ImageBase
from foobnix.util.const import SITE_LOCALE, ICON_FOOBNIX
from foobnix.util.localization import foobnix_localization
from foobnix.gui.service.path_service import get_foobnix_resourse_path_by_name
foobnix_localization()
def responseToDialog(entry, dialog, response):
dialog.response(response)
def file_selection_dialog(title, current_folder=None):
chooser = Gtk.FileSelection(title)
chooser.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
chooser.set_default_response(Gtk.ResponseType.OK)
chooser.set_select_multiple(True)
paths = None
if current_folder:
chooser.set_current_folder(current_folder)
response = chooser.run()
if response == Gtk.ResponseType.OK:
paths = chooser.get_selections()
elif response == Gtk.ResponseType.CANCEL:
logging.info('Closed, no files selected')
chooser.destroy()
return paths
def file_chooser_dialog(title, current_folder=None):
chooser = Gtk.FileChooserDialog(title, action=Gtk.FileChooserAction.OPEN, buttons=(_("Open"), Gtk.ResponseType.OK))
chooser.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
chooser.set_default_response(Gtk.ResponseType.OK)
chooser.set_select_multiple(True)
paths = None
if current_folder:
chooser.set_current_folder(current_folder)
response = chooser.run()
if response == Gtk.ResponseType.OK:
paths = chooser.get_filenames()
elif response == Gtk.ResponseType.CANCEL:
logging.info('Closed, no files selected')
chooser.destroy()
return paths
def directory_chooser_dialog(title, current_folder=None):
chooser = Gtk.FileChooserDialog(title, action=Gtk.FileChooserAction.SELECT_FOLDER, buttons=(_("Choose"), Gtk.ResponseType.OK))
chooser.set_default_response(Gtk.ResponseType.OK)
chooser.set_select_multiple(True)
paths = None
if current_folder:
chooser.set_current_folder(current_folder)
response = chooser.run()
if response == Gtk.ResponseType.OK:
paths = chooser.get_filenames()
elif response == Gtk.ResponseType.CANCEL:
logging.info('Closed, no directory selected')
chooser.destroy()
return paths
def one_line_dialog(dialog_title, parent=None, entry_text=None, message_text1=None, message_text2=None):
dialog = Gtk.MessageDialog(
parent,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.INFO,
Gtk.ButtonsType.OK,
None)
dialog.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
dialog.set_title(dialog_title)
if message_text1:
dialog.set_markup(message_text1)
if message_text2:
dialog.format_secondary_markup(message_text2)
entry = Gtk.Entry()
'''set last widget in action area as default widget (button OK)'''
dialog.set_default_response(Gtk.ResponseType.OK)
'''activate default widget after Enter pressed in entry'''
entry.set_activates_default(True)
if entry_text:
entry.set_text(entry_text)
dialog.vbox.pack_start(entry, True, True, 0)
dialog.show_all()
dialog.run()
text = entry.get_text()
dialog.destroy()
return text if text else None
def two_line_dialog(dialog_title, parent=None, message_text1=None,
message_text2=None, entry_text1="", entry_text2=""):
dialog = Gtk.MessageDialog(
parent,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.OK,
None)
dialog.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
dialog.set_title(dialog_title)
if message_text1:
dialog.set_markup(message_text1)
if message_text2:
dialog.format_secondary_markup(message_text2)
login_entry = Gtk.Entry()
if entry_text1:
login_entry.set_text(entry_text1)
login_entry.show()
password_entry = Gtk.Entry()
if entry_text2:
password_entry.set_text(entry_text2)
password_entry.show()
hbox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)
hbox.pack_start(login_entry, False, False, 0)
hbox.pack_start(password_entry, False, False, 0)
dialog.vbox.pack_start(hbox, True, True, 0)
dialog.show_all()
'''set last widget in action area as default widget (button OK)'''
dialog.set_default_response(Gtk.ResponseType.OK)
'''activate default widget after Enter pressed in entry'''
login_entry.set_activates_default(True)
password_entry.set_activates_default(True)
dialog.run()
login_text = login_entry.get_text()
password_text = password_entry.get_text()
dialog.destroy()
return [login_text, password_text] if (login_text and password_text) else [None,None]
def info_dialog(title, message, parent=None):
dialog = Gtk.MessageDialog(
parent,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.INFO,
Gtk.ButtonsType.OK,
None)
dialog.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
dialog.set_title(title)
dialog.set_markup(title)
dialog.format_secondary_markup(message)
dialog.show_all()
dialog.run()
dialog.destroy()
def info_dialog_with_link(title, version, link):
dialog = Gtk.MessageDialog(
None,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.INFO,
Gtk.ButtonsType.OK,
None)
dialog.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
dialog.set_title(title)
dialog.set_markup(title)
dialog.format_secondary_markup("<b>" + version + "</b>")
link = Gtk.LinkButton.new_with_label(link, link)
link.show()
dialog.vbox.pack_end(link, True, True, 0)
dialog.show_all()
dialog.run()
dialog.destroy()
def info_dialog_with_link_and_donate(version):
dialog = Gtk.MessageDialog(
None,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.INFO,
Gtk.ButtonsType.OK,
None)
dialog.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
dialog.set_title(_("New foobnix release avaliable"))
dialog.set_markup(_("New foobnix release avaliable"))
dialog.format_secondary_markup("<b>" + version + "</b>")
card = Gtk.LinkButton.new_with_label("http://foobnix.com/%s/download.html"%SITE_LOCALE, _("Download and Donate"))
#terminal = Gtk.LinkButton("http://www.foobnix.com/donate/eng#terminal", _("Download and Donate by Webmoney or Payment Terminal"))
# link = Gtk.LinkButton("http://www.foobnix.com/support?lang=%s"%SITE_LOCALE, _("Download"))
frame = Gtk.Frame(label="Please donate and download")
vbox = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)
vbox.set_homogeneous(True)
vbox.pack_start(card, True, True, 0)
#vbox.pack_start(terminal, True, True, 0)
vbox.pack_start(link, True, True, 0)
frame.add(vbox)
image = ImageBase("images/foobnix-slogan.jpg")
dialog.vbox.pack_start(image, True, True, 0)
dialog.vbox.pack_start(frame, True, True, 0)
dialog.vbox.pack_start(Gtk.Label.new(_("We hope you like the player. We will make it even better.")), True, True, 0)
version_check = Gtk.CheckButton.new_with_label(_("Check for new foobnix release on start"))
version_check.set_active(FC().check_new_version)
dialog.vbox.pack_start(version_check, True, True, 0)
dialog.show_all()
dialog.run()
FC().check_new_version = version_check.get_active()
FC().save()
dialog.destroy()
def show_entry_dialog(title, description):
dialog = Gtk.MessageDialog(
None,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.OK,
None)
dialog.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
dialog.set_markup(title)
entry = Gtk.Entry()
entry.connect("activate", responseToDialog, dialog, Gtk.ResponseType.OK)
hbox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)
hbox.pack_start(Gtk.Label.new("Value:"), False, 5, 5)
hbox.pack_end(entry, False, False, 0)
dialog.format_secondary_markup(description)
dialog.vbox.pack_end(hbox, True, True, 0)
dialog.show_all()
dialog.run()
text = entry.get_text()
dialog.destroy()
return text
def show_login_password_error_dialog(title, description, login, password):
dialog = Gtk.MessageDialog(
None,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.ERROR,
Gtk.ButtonsType.OK,
title)
dialog.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
dialog.set_markup(str(title))
dialog.format_secondary_markup(description)
login_entry = Gtk.Entry()
login_entry.set_text(login)
login_entry.show()
password_entry = Gtk.Entry()
password_entry.set_text(password)
password_entry.set_visibility(False)
password_entry.set_invisible_char("*")
password_entry.show()
vbox = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)
vbox.pack_start(login_entry, False, False, 0)
vbox.pack_start(password_entry, False, False, 0)
dialog.vbox.pack_start(vbox, True, True, 0)
dialog.show_all()
dialog.run()
login_text = login_entry.get_text()
password_text = password_entry.get_text()
dialog.destroy()
return [login_text, password_text]
def file_saving_dialog(title, current_folder=None):
chooser = Gtk.FileChooserDialog(title, action=Gtk.FileChooserAction.SAVE, buttons=("document-save", Gtk.ResponseType.OK))
chooser.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
chooser.set_default_response(Gtk.ResponseType.OK)
chooser.set_select_multiple(False)
if current_folder:
chooser.set_current_folder(current_folder)
response = chooser.run()
if response == Gtk.ResponseType.OK:
paths = chooser.get_filenames()
elif response == Gtk.ResponseType.CANCEL:
logging.info('Closed, no files selected')
chooser.destroy()
class FileSavingDialog(Gtk.FileChooserDialog):
def __init__(self, title, func, args = None, current_folder=None, current_name=None):
Gtk.FileChooserDialog.__init__(self, title, action=Gtk.FileChooserAction.SAVE, buttons=("document-save", Gtk.ResponseType.OK))
self.set_default_response(Gtk.ResponseType.OK)
self.set_select_multiple(False)
self.set_do_overwrite_confirmation(True)
self.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
if current_folder:
self.set_current_folder(current_folder)
if current_name:
self.set_current_name(current_name)
response = self.run()
if response == Gtk.ResponseType.OK:
filename = self.get_filename()
folder = self.get_current_folder()
if func:
try:
if args: func(filename, folder, args)
else: func(filename, folder)
except IOError as e:
logging.error(e)
elif response == Gtk.ResponseType.CANCEL:
logging.info('Closed, no files selected')
self.destroy()
if __name__ == '__main__':
info_dialog_with_link_and_donate("foobnix 0.2.1-8")
Gtk.main()
| gpl-3.0 | -2,835,426,773,235,846,000 | 37.753943 | 138 | 0.638991 | false | 3.562935 | false | false | false |
rice-eclipse/rice-eclipse-mk1-1 | mk1_gui/logger.py | 1 | 2728 | """
Author: Kevin Lin, [email protected]
Modified version of Logger used by Skynet Senior Design team at Rice University.
"""
import time
class LogLevel:
"""
Mapping of log level enums to names.
"""
DEBUGV = {'name': 'DEBUGV', 'value' : 4}
DEBUG = {'name': 'DEBUG', 'value': 3}
INFO = {'name': 'INFO', 'value': 2}
WARN = {'name': 'WARN', 'value': 1}
ERROR = {'name': 'ERROR', 'value': 0}
class Logger:
def __init__(self, name, level=LogLevel.DEBUG, outfile=None):
"""
Initializes a logger.
:param name: Name to attach to every log entry generated with this logger.
:param level: The log level at which to supress messages.
"""
self.name = name
self.level = level
if outfile != None:
self.fout = open(outfile, mode='a')
else:
self.fout = None
def debugv(self, message):
"""
Log a debug message.
:param message: Message to log.
"""
return self._print_log(LogLevel.DEBUGV, message)
def debug(self, message):
"""
Log a debug message.
:param message: Message to log.
"""
return self._print_log(LogLevel.DEBUG, message)
def info(self, message):
"""
Log an info message.
:param message: Message to log.
"""
return self._print_log(LogLevel.INFO, message)
def warn(self, message):
"""
Log a warning message.
:param message: Message to log.
"""
return self._print_log(LogLevel.WARN, message)
def error(self, message):
"""
Log an error message.
:param message: Message to log.
"""
return self._print_log(LogLevel.ERROR, message)
def _print_log(self, level, message):
"""
Print a log entry to standard output, with the timestamp, log level, and context name
automatically prefixed.
:param level: Target log level.
:param message: Message to log.
"""
# Don't print if we are supressing the message:
if self.level['value'] < level['value']:
return
hms = time.strftime('%H:%M:%S')
self._print_stdout(
'[{hms}] [{name}] [{level}] {message}'.format(
hms=hms,
name=self.name,
level=level['name'],
message=message,
)
)
def _print_stdout(self, line):
"""
Print a line to standard output.
:param line: Line to print.
"""
print(line)
if self.fout is not None:
self.fout.write(line + '\n')
self.fout.flush()
| gpl-3.0 | 6,142,132,702,150,370,000 | 24.259259 | 93 | 0.531891 | false | 4.041481 | false | false | false |
imcgreer/idmrm | cfht/cfhtrmphot.py | 2 | 16355 | #!/usr/bin/env python
import os,sys
import numpy as np
import subprocess
import multiprocessing
from functools import partial
from astropy.io import fits
from astropy.table import Table,vstack,hstack,join
from astropy.stats import sigma_clip
from astropy.wcs import InconsistentAxisTypesError
from bokpipe import bokphot,bokpl,bokproc,bokutil,bokastrom
from bokpipe.bokdm import SimpleFileNameMap
import bokrmpipe,bokrmphot
import cfhtrm
import idmrmphot
nom_pixscl = 0.18555
cfhtrm_aperRad = np.array([0.75,1.5,2.275,3.4,4.55,6.67,10.]) / nom_pixscl
def get_phot_file(photCat,inFile):
if inFile is None:
return '{0}_{1}.fits'.format('cfhtrmphot',photCat.name)
else:
return inFile
class CfhtConfig(object):
name = 'cfht'
nCCD = 40
nAper = 7
nAmp = 80
ccd0 = 0
zpAperNum = -2
zpMinSnr = 10.
zpMinNobs = 10
zpMaxSeeing = 1.7/nom_pixscl
zpMaxChiVal = 5.
zpMagRange = {'g':(17.0,20.5),'i':(17.0,21.0)}
zpFitKwargs = {'minContig':1}
apCorrMaxRmsFrac = 0.5
apCorrMinSnr = 20.
apCorrMinNstar = 20
# XXX need to understand why cfht data has so many outliers
maxFrameOutlierFrac = 0.99
maxFrameChiSqrNu = 10.
#colorXform = idmrmphot.ColorTransform('cfht','sdss')
# although the color terms appear consistent between <2009 and 2014-15,
# combining them into a single calibration results in ~10 mmag offsets
# in the absolute calibration with SDSS. Splitting them into separate
# calibrations improves this.
def __init__(self):
_cfgdir = os.path.join(os.environ['BOKRMDIR'],'..') # XXX
ctab = Table.read(os.path.join(_cfgdir,'colorterms.fits'))
ii = np.where( (ctab['photsys']=='cfht') &
(ctab['refsys']=='sdss') &
(ctab['filter']=='g') )[0]
dec1_2013 = 56627
i = np.searchsorted(ctab['mjdmin'][ii],dec1_2013)
ctab['mjdmax'][ii[i-1]] = dec1_2013
ctab['epoch'][ii[i:]] += 1
ctab.insert_row(ii[i],('cfht','sdss','g',1,
dec1_2013,ctab['mjdmin'][ii[i]],
ctab['cterms'][ii[i-1]]))
self.colorXform = idmrmphot.ColorTransform('cfht','sdss',
inTab=ctab)
def _cat_worker(dataMap,imFile,**kwargs):
clobber = kwargs.pop('redo',False)
verbose = kwargs.pop('verbose',0)
bokutil.mplog('extracting catalogs for '+imFile)
imgFile = dataMap('img')(imFile)
psfFile = dataMap('psf')(imFile)
aheadFile = imgFile.replace('.fits.fz','.ahead')
tmpFile = imgFile.replace('.fz','')
catFile = dataMap('wcscat')(imFile)
print '-->',imgFile
kwargs.setdefault('SEEING_FWHM','1.0')
kwargs.setdefault('PIXEL_SCALE','0.18555')
kwargs.setdefault('SATUR_KEY','SATURATE')
kwargs.setdefault('GAIN_KEY','GAIN')
if not os.path.exists(aheadFile):
print aheadFile,' not found!'
return
if not os.path.exists(imgFile):
print imgFile,' not found!'
return
if True:
# a few widely spaced ccds
pix = np.array([ fits.getdata(imgFile,ccdNum)[::8]
for ccdNum in [10,16,21,33] ])
sky = sigma_clip(pix).mean()
if verbose > 0:
print 'sky level is %.2f' % sky
kwargs.setdefault('BACK_TYPE','MANUAL')
kwargs.setdefault('BACK_VALUE','%.1f'%sky)
if not os.path.exists(catFile):
if not os.path.exists(tmpFile):
subprocess.call(['funpack',imgFile])
bokphot.sextract(tmpFile,catFile,full=False,
clobber=clobber,verbose=verbose,**kwargs)
if not os.path.exists(psfFile):
if not os.path.exists(tmpFile):
subprocess.call(['funpack',imgFile])
bokphot.run_psfex(catFile,psfFile,instrument='cfhtmegacam',
clobber=clobber,verbose=verbose,**kwargs)
if not os.path.exists(aheadFile):
bokastrom.scamp_solve(tmpFile,catFile,filt='r',
clobber=clobber,verbose=verbose)
if not os.path.exists(aheadFile):
print imgFile,' WCS failed!'
return
if False:
os.remove(catFile)
catFile = dataMap('cat')(imFile)
# XXX while using these as primary
apers = ','.join(['%.2f'%a for a in cfhtrm_aperRad])
kwargs.setdefault('DETECT_MINAREA','10.0')
kwargs.setdefault('DETECT_THRESH','2.0')
kwargs.setdefault('ANALYSIS_THRESH','2.0')
kwargs.setdefault('PHOT_APERTURES',apers)
kwargs.setdefault('PARAMETERS_NAME',
os.path.join(bokphot.configDir,'cfht_catalog_tmp.par'))
#kwargs.setdefault('BACK_SIZE','64,128')
#kwargs.setdefault('BACK_FILTERSIZE','1')
kwargs.setdefault('BACKPHOTO_TYPE','LOCAL')
#kwargs.setdefault('CHECKIMAGE_TYPE','BACKGROUND')
#kwargs.setdefault('CHECKIMAGE_NAME',imgFile.replace('.fits.fz','.back.fits'))
if not os.path.exists(catFile):
if not os.path.exists(tmpFile):
subprocess.call(['funpack',imgFile])
bokphot.sextract(tmpFile,catFile,psfFile,full=True,
clobber=clobber,verbose=verbose,**kwargs)
if os.path.exists(tmpFile):
os.remove(tmpFile)
def _exc_cat_worker(*args,**kwargs):
try:
_cat_worker(*args,**kwargs)
except:
pass
def make_sextractor_catalogs(dataMap,procMap,**kwargs):
files = dataMap.getFiles()
p_cat_worker = partial(_exc_cat_worker,dataMap,**kwargs)
status = procMap(p_cat_worker,files)
def get_phot_fn(dataMap,imFile,catPfx):
fmap = SimpleFileNameMap(None,cfhtrm.cfhtCatDir,
'.'.join(['',catPfx,'phot']))
catFile = dataMap('cat')(imFile)
return fmap(imFile)
def _phot_worker(dataMap,photCat,inp,matchRad=2.0,redo=False,verbose=0):
imFile,frame = inp
refCat = photCat.refCat
catFile = dataMap('cat')(imFile)
aperFile = get_phot_fn(dataMap,imFile,photCat.name)
if verbose:
print '--> ',imFile
if os.path.exists(aperFile) and not redo:
return
tabs = []
try:
f = fits.open(catFile)
except IOError:
print catFile,' not found!'
return
for ccdNum,hdu in enumerate(f[1:]):
c = hdu.data
m1,m2,sep = idmrmphot.srcor(refCat['ra'],refCat['dec'],
c['ALPHA_J2000'],c['DELTA_J2000'],matchRad)
if len(m1)==0:
continue
expTime = dataMap.obsDb['expTime'][frame]
t = Table()
t['x'] = c['X_IMAGE'][m2]
t['y'] = c['Y_IMAGE'][m2]
t['objId'] = refCat['objId'][m1]
t['counts'] = c['FLUX_APER'][m2] / expTime
t['countsErr'] = c['FLUXERR_APER'][m2] / expTime
t['flags'] = np.tile(c['FLAGS'][m2],(len(cfhtrm_aperRad),1)).T
t['psfCounts'] = c['FLUX_PSF'][m2] / expTime
t['psfCountsErr'] = c['FLUXERR_PSF'][m2] / expTime
t['ccdNum'] = ccdNum
t['frameIndex'] = dataMap.obsDb['frameIndex'][frame]
t['__number'] = c['NUMBER'][m2]
t['__nmatch'] = len(m1)
t['__sep'] = sep
tabs.append(t)
f.close()
if len(tabs)==0:
if verbose:
print 'no objects!'
return
vstack(tabs).write(aperFile,overwrite=True)
def make_phot_catalogs(dataMap,procMap,photCat,**kwargs):
files = zip(*dataMap.getFiles(with_frames=True))
p_phot_worker = partial(_phot_worker,dataMap,photCat,**kwargs)
status = procMap(p_phot_worker,files)
def load_raw_cfht_aperphot(dataMap,photCat):
photTabs = []
for imFile in dataMap.getFiles():
aperFile = get_phot_fn(dataMap,imFile,photCat.name)
try:
photTabs.append(Table.read(aperFile))
print "loaded catalog {}".format(aperFile)
except IOError:
print "WARNING: catalog {} missing, skipped!".format(aperFile)
return vstack(photTabs)
def calc_zeropoints(dataMap,refCat,cfhtCfg,debug=False):
#
fields = ['frameIndex','utDate','filter','mjdStart','mjdMid','airmass']
good = dataMap.obsDb['good']
frameList = dataMap.obsDb[fields][good]
frameList.sort('frameIndex')
# zero point trends are fit over a season
if 'season' not in frameList.colnames:
frameList['season'] = idmrmphot.get_season(frameList['mjdStart'])
# select the zeropoint aperture
cfhtPhot = load_raw_cfht_aperphot(dataMap,refCat)
# XXX temporary hack
cfhtPhot['nMasked'] = np.int32(0)
cfhtPhot['peakCounts'] = np.float32(1)
phot = idmrmphot.extract_aperture(cfhtPhot,cfhtCfg.zpAperNum)
# calculate zeropoints and aperture corrections
# XXX I guess would have to split i band out eventually? it won't have
# same epochs
epochs = cfhtCfg.colorXform.get_epoch('g',frameList['mjdStart'])
outputs = []
for epoch in np.unique(epochs):
ii = np.where(epochs==epoch)[0]
jj = np.where(np.in1d(phot['frameIndex'],
frameList['frameIndex'][ii]))[0]
zpdat = idmrmphot.iter_selfcal(phot[jj],frameList[ii],refCat,cfhtCfg,
mode='focalplane')
outputs.append(zpdat)
frameList = vstack([ zpdat.zpts for zpdat in outputs ])
frameList.sort('frameIndex')
frameList = idmrmphot.calc_apercorrs(cfhtPhot,frameList,cfhtCfg,
mode='focalplane')
#
if True:
zptrend = vstack([ zpdat.zptrend for zpdat in outputs ])
zptrend.write('cfht_zptrend.dat',overwrite=True,format='ascii')
if debug:
zpdat.sePhot.write('zp_sephot.fits',overwrite=True)
zpdat.coaddPhot.write('zp_coaddphot.fits',overwrite=True)
return frameList
def calibrate_lightcurves(dataMap,photCat,zpFile,cfhtCfg):
zpTab = Table.read(zpFile)
if False:
# these are hacks to fill the zeropoints table for CCDs with no
# measurements... this may be necessary as sometimes too few reference
# stars will land on a given CCD. but need to understand it better.
for row in zpTab:
iszero = row['aperZp'] == 0
if np.sum(~iszero) > 10:
row['aperZp'][iszero] = np.median(row['aperZp'][~iszero])
row['aperNstar'][iszero] = 999
for j in range(7):
iszero = row['aperCorr'][:,j] == 0
if np.sum(~iszero) > 5:
row['aperCorr'][iszero,j] = np.median(row['aperCorr'][~iszero,j])
phot = load_raw_cfht_aperphot(dataMap,photCat)
phot = idmrmphot.calibrate_lightcurves(phot,zpTab,cfhtCfg,
zpmode='focalplane',
apcmode='focalplane')
return phot
def check_status(dataMap):
from collections import defaultdict
from bokpipe.bokastrom import read_headers
missing = defaultdict(list)
incomplete = defaultdict(list)
files = dataMap.getFiles()
for i,f in enumerate(files):
imgFile = dataMap('img')(f)
if not os.path.exists(imgFile):
missing['img'].append(f)
continue
nCCD = fits.getheader(imgFile,0)['NEXTEND']
aheadFile = imgFile.replace('.fits.fz','.ahead')
if not os.path.exists(aheadFile):
missing['ahead'].append(f)
else:
hdrs = read_headers(aheadFile)
if len(hdrs) < nCCD:
incomplete['ahead'].append(f)
for k in ['wcscat','psf','cat']:
outFile = dataMap(k)(f)
if not os.path.exists(outFile):
missing[k].append(f)
else:
try:
ff = fits.open(outFile)
except IOError:
incomplete[k].append(f)
continue
n = len(ff)-1
if k == 'wcscat':
n //= 2 # ldac
if n < nCCD:
incomplete[k].append(f)
sys.stdout.write("\r%d/%d" % (i+1,len(files)))
sys.stdout.flush()
print
print 'total images: ',len(files)
for k in ['img','ahead','wcscat','psf','cat']:
n = len(files) - len(missing[k]) - len(incomplete[k])
print '%10s %5d %5d %5d' % (k,n,len(missing[k]),len(incomplete[k]))
d = { f for l in missing.values() for f in l }
if len(d)>0:
logfile = open('missing.log','w')
for f in d:
logfile.write(f+'\n')
logfile.close()
d = { f for l in incomplete.values() for f in l }
if len(d)>0:
logfile = open('incomplete.log','w')
for f in d:
logfile.write(f+'\n')
logfile.close()
def load_phot(phot,photCat,frameList,lctable,aper,season=None,photo=False):
if phot is None:
photFile = get_phot_file(photCat,args.lctable)
print 'loaded lightcurve catalog {}'.format(photFile)
phot = Table.read(photFile)
apPhot = idmrmphot.extract_aperture(phot,args.aper,lightcurve=True)
if args.photo:
if frameList is None:
print 'loading zeropoints table {0}'.format(frameListFile)
frameList = Table.read(frameListFile)
photoFrames = frameList['frameIndex'][frameList['isPhoto']]
nbefore = len(apPhot)
apPhot = apPhot[np.in1d(apPhot['frameIndex'],photoFrames)]
print 'restricting to {0} photo frames yields {1}/{2}'.format(
len(photoFrames),nbefore,len(apPhot))
apPhot['season'] = idmrmphot.get_season(apPhot['mjd'])
if season is None:
# there's too little 2009 data for useful statistics
apPhot = apPhot[apPhot['season']!='2009']
else:
apPhot = apPhot[apPhot['season']==season]
return apPhot
if __name__=='__main__':
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--catalogs',action='store_true',
help='make source extractor catalogs and PSF models')
parser.add_argument('--dophot',action='store_true',
help='do photometry on images')
parser.add_argument('--zeropoint',action='store_true',
help='do zero point calculation')
parser.add_argument('--lightcurves',action='store_true',
help='construct lightcurves')
parser.add_argument('--aggregate',action='store_true',
help='construct aggregate photometry')
parser.add_argument('--binnedstats',action='store_true',
help='compute phot stats in mag bins')
parser.add_argument('--status',action='store_true',
help='check processing status')
parser.add_argument('--catalog',type=str,default='sdssrm',
help='reference catalog ([sdssrm]|sdss|cfht)')
parser.add_argument('-p','--processes',type=int,default=1,
help='number of processes to use [default=single]')
parser.add_argument('-R','--redo',action='store_true',
help='redo (overwrite existing files)')
parser.add_argument('-u','--utdate',type=str,default=None,
help='UT date(s) to process [default=all]')
parser.add_argument('--lctable',type=str,
help='lightcurve table')
parser.add_argument('--season',type=str,
help='observing season')
parser.add_argument('--aper',type=int,default=-2,
help='index of aperture to select [-2]')
parser.add_argument('--zptable',type=str,
default='config/CFHTRMFrameList.fits.gz',
help='zeropoints table')
parser.add_argument('--outfile',type=str,default='',
help='output file')
parser.add_argument('--photo',action='store_true',
help='use only photometric frames')
parser.add_argument('--catdir',type=str,
help='directory containing photometry catalogs')
parser.add_argument('-v','--verbose',action='count',
help='increase output verbosity')
args = parser.parse_args()
#
if args.processes > 1:
pool = multiprocessing.Pool(args.processes)
procMap = pool.map
else:
procMap = map
dataMap = cfhtrm.CfhtDataMap()
photCat = idmrmphot.load_target_catalog(args.catalog)
timerLog = bokutil.TimerLog()
kwargs = dict(redo=args.redo,verbose=args.verbose)
cfhtCfg = CfhtConfig()
phot = None
if args.utdate:
utDate = args.utdate.split(',')
dataMap.setUtDate(utDate)
if args.catalogs:
make_sextractor_catalogs(dataMap,procMap,**kwargs)
timerLog('sextractor catalogs')
if args.dophot:
make_phot_catalogs(dataMap,procMap,photCat,**kwargs)
timerLog('photometry catalogs')
if args.zeropoint:
zps = calc_zeropoints(dataMap,photCat,cfhtCfg,debug=True)
zps.write(args.zptable,overwrite=True)
timerLog('zeropoints')
if args.lightcurves:
phot = calibrate_lightcurves(dataMap,photCat,args.zptable,cfhtCfg)
photFile = get_phot_file(photCat,args.lctable)
phot.write(photFile,overwrite=True)
timerLog('lightcurves')
if args.aggregate:
# which = 'nightly' if args.nightly else 'all'
frameList = Table.read(args.zptable)
apPhot = load_phot(phot,photCat,frameList,
args.lctable,args.aper,args.season)
apPhot = apPhot.group_by(['season','filter','objId'])
objPhot = idmrmphot.clipped_group_mean_rms(apPhot['aperMag',])
aggPhot = hstack([apPhot.groups.keys,objPhot])
outfile = args.outfile if args.outfile \
else 'meanphot_cfht_{}.fits'.format(args.season)
aggPhot.write(outfile)
timerLog('aggregate phot')
if args.binnedstats:
frameList = Table.read(args.zptable)
apPhot = load_phot(phot,photCat,frameList,args.lctable,args.aper)
bs = idmrmphot.get_binned_stats(apPhot,photCat.refCat,cfhtCfg,
binEdges=np.arange(17.5,20.11,0.2))
outfile = args.outfile if args.outfile else 'phot_stats_cfht.fits'
bs.write(outfile,overwrite=True)
timerLog('binned stats')
if args.status:
check_status(dataMap)
timerLog.dump()
if args.processes > 1:
pool.close()
| gpl-3.0 | -6,972,535,924,764,832,000 | 35.183628 | 79 | 0.667074 | false | 2.776269 | false | false | false |
tejal29/pants | src/python/pants/base/target.py | 1 | 19104 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import functools
import os
from hashlib import sha1
from six import string_types
from pants.base.address import Addresses, SyntheticAddress
from pants.base.build_environment import get_buildroot
from pants.base.build_manual import manual
from pants.base.deprecated import deprecated
from pants.base.exceptions import TargetDefinitionException
from pants.base.fingerprint_strategy import DefaultFingerprintStrategy
from pants.base.hash_utils import hash_all
from pants.base.payload import Payload
from pants.base.payload_field import DeferredSourcesField, SourcesField
from pants.base.source_root import SourceRoot
from pants.base.target_addressable import TargetAddressable
from pants.base.validation import assert_list
class AbstractTarget(object):
_deprecated_predicate = functools.partial(deprecated, '0.0.30')
@property
def has_resources(self):
"""Returns True if the target has an associated set of Resources."""
return hasattr(self, 'resources') and self.resources
@property
def is_exported(self):
"""Returns True if the target provides an artifact exportable from the repo."""
# TODO(John Sirois): fixup predicate dipping down into details here.
return self.has_label('exportable') and self.provides
@property
@_deprecated_predicate('Do not use this method, use an isinstance check on JarDependency.')
def is_jar(self):
"""Returns True if the target is a jar."""
return False
@property
@_deprecated_predicate('Do not use this method, use an isinstance check on JavaAgent.')
def is_java_agent(self):
"""Returns `True` if the target is a java agent."""
return self.has_label('java_agent')
@property
@_deprecated_predicate('Do not use this method, use an isinstance check on JvmApp.')
def is_jvm_app(self):
"""Returns True if the target produces a java application with bundled auxiliary files."""
return False
# DEPRECATED to be removed after 0.0.29
# do not use this method, use isinstance(..., JavaThriftLibrary) or a yet-to-be-defined mixin
@property
def is_thrift(self):
"""Returns True if the target has thrift IDL sources."""
return False
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_jvm(self):
"""Returns True if the target produces jvm bytecode."""
return self.has_label('jvm')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_codegen(self):
"""Returns True if the target is a codegen target."""
return self.has_label('codegen')
@property
@_deprecated_predicate('Do not use this method, use an isinstance check on JarLibrary.')
def is_jar_library(self):
"""Returns True if the target is an external jar library."""
return self.has_label('jars')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_java(self):
"""Returns True if the target has or generates java sources."""
return self.has_label('java')
@property
@_deprecated_predicate('Do not use this method, use an isinstance check on AnnotationProcessor.')
def is_apt(self):
"""Returns True if the target exports an annotation processor."""
return self.has_label('apt')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_python(self):
"""Returns True if the target has python sources."""
return self.has_label('python')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_scala(self):
"""Returns True if the target has scala sources."""
return self.has_label('scala')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_scalac_plugin(self):
"""Returns True if the target builds a scalac plugin."""
return self.has_label('scalac_plugin')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_test(self):
"""Returns True if the target is comprised of tests."""
return self.has_label('tests')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_android(self):
"""Returns True if the target is an android target."""
return self.has_label('android')
class Target(AbstractTarget):
"""The baseclass for all pants targets.
Handles registration of a target amongst all parsed targets as well as location of the target
parse context.
"""
class WrongNumberOfAddresses(Exception):
"""Internal error, too many elements in Addresses"""
pass
LANG_DISCRIMINATORS = {
'java': lambda t: t.is_jvm,
'python': lambda t: t.is_python,
}
@classmethod
def lang_discriminator(cls, lang):
"""Returns a tuple of target predicates that select the given lang vs all other supported langs.
The left hand side accepts targets for the given language; the right hand side accepts
targets for all other supported languages.
"""
def is_other_lang(target):
for name, discriminator in cls.LANG_DISCRIMINATORS.items():
if name != lang and discriminator(target):
return True
return False
return (cls.LANG_DISCRIMINATORS[lang], is_other_lang)
@classmethod
def get_addressable_type(target_cls):
class ConcreteTargetAddressable(TargetAddressable):
@classmethod
def get_target_type(cls):
return target_cls
return ConcreteTargetAddressable
@property
def target_base(self):
""":returns: the source root path for this target."""
return SourceRoot.find(self)
@classmethod
def identify(cls, targets):
"""Generates an id for a set of targets."""
return cls.combine_ids(target.id for target in targets)
@classmethod
def maybe_readable_identify(cls, targets):
"""Generates an id for a set of targets.
If the set is a single target, just use that target's id."""
return cls.maybe_readable_combine_ids([target.id for target in targets])
@staticmethod
def combine_ids(ids):
"""Generates a combined id for a set of ids."""
return hash_all(sorted(ids)) # We sort so that the id isn't sensitive to order.
@classmethod
def maybe_readable_combine_ids(cls, ids):
"""Generates combined id for a set of ids, but if the set is a single id, just use that."""
ids = list(ids) # We can't len a generator.
return ids[0] if len(ids) == 1 else cls.combine_ids(ids)
def __init__(self, name, address, build_graph, payload=None, tags=None, description=None):
"""
:param string name: The name of this target, which combined with this
build file defines the target address.
:param dependencies: Other targets that this target depends on.
:type dependencies: list of target specs
:param Address address: The Address that maps to this Target in the BuildGraph
:param BuildGraph build_graph: The BuildGraph that this Target lives within
:param Payload payload: The configuration encapsulated by this target. Also in charge of
most fingerprinting details.
:param iterable<string> tags: Arbitrary string tags that describe this target. Usable
by downstream/custom tasks for reasoning about build graph. NOT included in payloads
and thus not used in fingerprinting, thus not suitable for anything that affects how
a particular target is built.
:param string description: Human-readable description of this target.
"""
# dependencies is listed above; implementation hides in TargetAddressable
self.payload = payload or Payload()
self.payload.freeze()
self.name = name
self.address = address
self._tags = set(tags or [])
self._build_graph = build_graph
self.description = description
self.labels = set()
self._cached_fingerprint_map = {}
self._cached_transitive_fingerprint_map = {}
@property
def tags(self):
return self._tags
@property
def num_chunking_units(self):
return max(1, len(self.sources_relative_to_buildroot()))
def assert_list(self, maybe_list, expected_type=string_types):
return assert_list(maybe_list, expected_type,
raise_type=lambda msg: TargetDefinitionException(self, msg))
def compute_invalidation_hash(self, fingerprint_strategy=None):
"""
:param FingerprintStrategy fingerprint_strategy: optional fingerprint strategy to use to compute
the fingerprint of a target
:return: a fingerprint representing this target (no dependencies)
:rtype: string
"""
fingerprint_strategy = fingerprint_strategy or DefaultFingerprintStrategy()
return fingerprint_strategy.fingerprint_target(self)
def invalidation_hash(self, fingerprint_strategy=None):
fingerprint_strategy = fingerprint_strategy or DefaultFingerprintStrategy()
if fingerprint_strategy not in self._cached_fingerprint_map:
self._cached_fingerprint_map[fingerprint_strategy] = self.compute_invalidation_hash(fingerprint_strategy)
return self._cached_fingerprint_map[fingerprint_strategy]
def mark_extra_invalidation_hash_dirty(self):
pass
def mark_invalidation_hash_dirty(self):
self._cached_fingerprint_map = {}
self._cached_transitive_fingerprint_map = {}
self.mark_extra_invalidation_hash_dirty()
def transitive_invalidation_hash(self, fingerprint_strategy=None):
"""
:param FingerprintStrategy fingerprint_strategy: optional fingerprint strategy to use to compute
the fingerprint of a target
:return: A fingerprint representing this target and all of its dependencies.
The return value can be `None`, indicating that this target and all of its transitive dependencies
did not contribute to the fingerprint, according to the provided FingerprintStrategy.
:rtype: string
"""
fingerprint_strategy = fingerprint_strategy or DefaultFingerprintStrategy()
if fingerprint_strategy not in self._cached_transitive_fingerprint_map:
hasher = sha1()
def dep_hash_iter():
for dep in self.dependencies:
dep_hash = dep.transitive_invalidation_hash(fingerprint_strategy)
if dep_hash is not None:
yield dep_hash
dep_hashes = sorted(list(dep_hash_iter()))
for dep_hash in dep_hashes:
hasher.update(dep_hash)
target_hash = self.invalidation_hash(fingerprint_strategy)
if target_hash is None and not dep_hashes:
return None
dependencies_hash = hasher.hexdigest()[:12]
combined_hash = '{target_hash}.{deps_hash}'.format(target_hash=target_hash,
deps_hash=dependencies_hash)
self._cached_transitive_fingerprint_map[fingerprint_strategy] = combined_hash
return self._cached_transitive_fingerprint_map[fingerprint_strategy]
def mark_transitive_invalidation_hash_dirty(self):
self._cached_transitive_fingerprint_map = {}
self.mark_extra_transitive_invalidation_hash_dirty()
def mark_extra_transitive_invalidation_hash_dirty(self):
pass
def inject_dependency(self, dependency_address):
self._build_graph.inject_dependency(dependent=self.address, dependency=dependency_address)
def invalidate_dependee(dependee):
dependee.mark_transitive_invalidation_hash_dirty()
self._build_graph.walk_transitive_dependee_graph([self.address], work=invalidate_dependee)
def has_sources(self, extension=''):
"""
:param string extension: suffix of filenames to test for
:return: True if the target contains sources that match the optional extension suffix
:rtype: bool
"""
sources_field = self.payload.get_field('sources')
if sources_field:
return sources_field.has_sources(extension)
else:
return False
def sources_relative_to_buildroot(self):
if self.has_sources():
return self.payload.sources.relative_to_buildroot()
else:
return []
def sources_relative_to_source_root(self):
if self.has_sources():
abs_source_root = os.path.join(get_buildroot(), self.target_base)
for source in self.sources_relative_to_buildroot():
abs_source = os.path.join(get_buildroot(), source)
yield os.path.relpath(abs_source, abs_source_root)
@property
def derived_from(self):
"""Returns the target this target was derived from.
If this target was not derived from another, returns itself.
"""
return self._build_graph.get_derived_from(self.address)
@property
def derived_from_chain(self):
"""Returns all targets that this target was derived from.
If this target was not derived from another, returns an empty sequence.
"""
cur = self
while cur.derived_from is not cur:
cur = cur.derived_from
yield cur
@property
def concrete_derived_from(self):
"""Returns the concrete target this target was (directly or indirectly) derived from.
The returned target is guaranteed to not have been derived from any other target, and is thus
guaranteed to be a 'real' target from a BUILD file, not a programmatically injected target.
"""
return self._build_graph.get_concrete_derived_from(self.address)
@property
def traversable_specs(self):
"""
:return: specs referenced by this target to be injected into the build graph
:rtype: list of strings
"""
return []
@property
def traversable_dependency_specs(self):
"""
:return: specs representing dependencies of this target that will be injected to the build
graph and linked in the graph as dependencies of this target
:rtype: list of strings
"""
# To support DeferredSourcesField
for name, payload_field in self.payload.fields:
if isinstance(payload_field, DeferredSourcesField) and payload_field.address:
yield payload_field.address.spec
@property
def dependencies(self):
"""
:return: targets that this target depends on
:rtype: list of Target
"""
return [self._build_graph.get_target(dep_address)
for dep_address in self._build_graph.dependencies_of(self.address)]
@property
def dependents(self):
"""
:return: targets that depend on this target
:rtype: list of Target
"""
return [self._build_graph.get_target(dep_address)
for dep_address in self._build_graph.dependents_of(self.address)]
@property
def is_synthetic(self):
"""
:return: True if this target did not originate from a BUILD file.
"""
return self.concrete_derived_from.address != self.address
@property
def is_original(self):
"""Returns ``True`` if this target is derived from no other."""
return self.derived_from == self
@property
def id(self):
"""A unique identifier for the Target.
The generated id is safe for use as a path name on unix systems.
"""
return self.address.path_safe_spec
@property
def identifier(self):
"""A unique identifier for the Target.
The generated id is safe for use as a path name on unix systems.
"""
return self.id
def walk(self, work, predicate=None):
"""Walk of this target's dependency graph, DFS preorder traversal, visiting each node exactly
once.
If a predicate is supplied it will be used to test each target before handing the target to
work and descending. Work can return targets in which case these will be added to the walk
candidate set if not already walked.
:param work: Callable that takes a :py:class:`pants.base.target.Target`
as its single argument.
:param predicate: Callable that takes a :py:class:`pants.base.target.Target`
as its single argument and returns True if the target should passed to ``work``.
"""
if not callable(work):
raise ValueError('work must be callable but was %s' % work)
if predicate and not callable(predicate):
raise ValueError('predicate must be callable but was %s' % predicate)
self._build_graph.walk_transitive_dependency_graph([self.address], work, predicate)
def closure(self):
"""Returns this target's transitive dependencies, in DFS preorder traversal."""
return self._build_graph.transitive_subgraph_of_addresses([self.address])
@manual.builddict()
@deprecated('0.0.30', hint_message='Use the description parameter of target() instead')
def with_description(self, description):
"""Set a human-readable description of this target.
:param description: Descriptive string"""
self.description = description
return self
# TODO(Eric Ayers) As of 2/5/2015 this call is DEPRECATED and should be removed soon
def add_labels(self, *label):
self.labels.update(label)
# TODO(Eric Ayers) As of 2/5/2015 this call is DEPRECATED and should be removed soon
def remove_label(self, label):
self.labels.remove(label)
# TODO(Eric Ayers) As of 2/5/2015 this call is DEPRECATED and should be removed soon
def has_label(self, label):
return label in self.labels
def __lt__(self, other):
return self.address < other.address
def __eq__(self, other):
return isinstance(other, Target) and self.address == other.address
def __hash__(self):
return hash(self.address)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
addr = self.address if hasattr(self, 'address') else 'address not yet set'
return "%s(%s)" % (type(self).__name__, addr)
def create_sources_field(self, sources, sources_rel_path, address=None, build_graph=None):
"""Factory method to create a SourcesField appropriate for the type of the sources object.
Note that this method is called before the call to Target.__init__ so don't expect fields to
be populated!
:return: a payload field object representing the sources parameter
:rtype: SourcesField
"""
if isinstance(sources, Addresses):
# Currently, this is only created by the result of from_target() which takes a single argument
if len(sources.addresses) != 1:
raise self.WrongNumberOfAddresses(
"Expected a single address to from_target() as argument to {spec}"
.format(spec=address.spec))
referenced_address = SyntheticAddress.parse(sources.addresses[0],
relative_to=sources.rel_path)
return DeferredSourcesField(ref_address=referenced_address)
return SourcesField(sources=sources, sources_rel_path=sources_rel_path)
| apache-2.0 | -5,264,794,649,847,011,000 | 36.754941 | 111 | 0.706397 | false | 4.097812 | false | false | false |
USC-ACTLab/pyCreate2 | pyCreate2/visualization/virtual_create.py | 1 | 2464 | """
Module to control a virtual create
"""
from ..vrep import vrep as vrep
from enum import Enum
class VirtualCreate:
"""
Class to control a virtual create in V-REP.
"""
def __init__(self, client_id):
"""Constructor.
Args:
client_id (integer): V-REP client id.
"""
self._clientID = client_id
# query objects
rc, self._obj = vrep.simxGetObjectHandle(self._clientID, "create_estimate", vrep.simx_opmode_oneshot_wait)
# Use custom GUI
_, self._uiHandle = vrep.simxGetUIHandle(self._clientID, "UI", vrep.simx_opmode_oneshot_wait)
vrep.simxGetUIEventButton(self._clientID, self._uiHandle, vrep.simx_opmode_streaming)
def set_pose(self, position, yaw):
vrep.simxSetObjectPosition(self._clientID, self._obj, -1, position,
vrep.simx_opmode_oneshot_wait)
vrep.simxSetObjectOrientation(self._clientID, self._obj, -1, (0, 0, yaw),
vrep.simx_opmode_oneshot_wait)
def set_point_cloud(self, data):
signal = vrep.simxPackFloats(data)
vrep.simxWriteStringStream(self._clientID, "pointCloud", signal, vrep.simx_opmode_oneshot)
class Button(Enum):
MoveForward = 3
TurnLeft = 4
TurnRight = 5
Sense = 6
def get_last_button(self):
self.enable_buttons()
err, button_id, aux = vrep.simxGetUIEventButton(self._clientID, self._uiHandle, vrep.simx_opmode_buffer)
if err == vrep.simx_return_ok and button_id != -1:
self.disable_buttons()
vrep.simxGetUIEventButton(self._clientID, self._uiHandle, vrep.simx_opmode_streaming)
return self.Button(button_id)
return None
def disable_buttons(self):
for i in range(3, 7):
_, prop = vrep.simxGetUIButtonProperty(self._clientID, self._uiHandle, i, vrep.simx_opmode_oneshot)
prop &= ~vrep.sim_buttonproperty_enabled
vrep.simxSetUIButtonProperty(self._clientID, self._uiHandle, i, prop, vrep.simx_opmode_oneshot)
def enable_buttons(self):
for i in range(3, 7):
_, prop = vrep.simxGetUIButtonProperty(self._clientID, self._uiHandle, i, vrep.simx_opmode_oneshot)
# print(prop)
prop |= vrep.sim_buttonproperty_enabled
vrep.simxSetUIButtonProperty(self._clientID, self._uiHandle, i, prop, vrep.simx_opmode_oneshot)
| mit | 540,666,363,558,763,500 | 37.5 | 114 | 0.622565 | false | 3.311828 | false | false | false |
lord63/wangyi_music_top100 | pyspider/app.py | 1 | 1273 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from os import path
import json
from flask import Flask, g, render_template
from peewee import Model, SqliteDatabase, CharField, FloatField
app = Flask(__name__)
# TODO: override on_result(self, result) method to manage the result yourself.
database_path = path.join(path.abspath(path.dirname(__file__)), 'result.db')
database = SqliteDatabase(database_path)
class BaseModel(Model):
class Meta:
database = database
class Resultdb_top100_version_4(BaseModel):
taskid = CharField(primary_key=True)
result = CharField()
updatetime = FloatField()
url = CharField()
@app.before_request
def before_request():
g.db = database
g.db.connect()
@app.after_request
def after_request(response):
g.db.close()
return response
@app.route('/')
@app.route('/sortby/<sorted_key>')
def index(sorted_key='played'):
top100 = []
for record in Resultdb_top100_version_4.select():
top100.append(json.loads(record.result))
top100 = sorted(top100, key=lambda t: t[sorted_key], reverse=True)[:100]
return render_template('index.html', top100=top100)
if __name__ == '__main__':
app.run(debug=True, port=5001)
| mit | 3,199,717,885,062,091,000 | 22.145455 | 78 | 0.685782 | false | 3.385638 | false | false | false |
wolverineav/neutron | neutron/agent/l3/agent.py | 3 | 30869 | # Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import eventlet
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import timeutils
from neutron._i18n import _, _LE, _LI, _LW
from neutron.agent.common import utils as common_utils
from neutron.agent.l3 import dvr
from neutron.agent.l3 import dvr_edge_ha_router
from neutron.agent.l3 import dvr_edge_router as dvr_router
from neutron.agent.l3 import dvr_local_router as dvr_local_router
from neutron.agent.l3 import ha
from neutron.agent.l3 import ha_router
from neutron.agent.l3 import legacy_router
from neutron.agent.l3 import namespace_manager
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_processing_queue as queue
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import pd
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron import context as n_context
from neutron import manager
try:
from neutron_fwaas.services.firewall.agents.l3reference \
import firewall_l3_agent
except Exception:
# TODO(dougw) - REMOVE THIS FROM NEUTRON; during l3_agent refactor only
from neutron.services.firewall.agents.l3reference import firewall_l3_agent
LOG = logging.getLogger(__name__)
# TODO(Carl) Following constants retained to increase SNR during refactoring
NS_PREFIX = namespaces.NS_PREFIX
INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
# Number of routers to fetch from server at a time on resync.
# Needed to reduce load on server side and to speed up resync on agent side.
SYNC_ROUTERS_MAX_CHUNK_SIZE = 256
SYNC_ROUTERS_MIN_CHUNK_SIZE = 32
class L3PluginApi(object):
"""Agent side of the l3 agent RPC API.
API version history:
1.0 - Initial version.
1.1 - Floating IP operational status updates
1.2 - DVR support: new L3 plugin methods added.
- get_ports_by_subnet
- get_agent_gateway_port
Needed by the agent when operating in DVR/DVR_SNAT mode
1.3 - Get the list of activated services
1.4 - Added L3 HA update_router_state. This method was reworked in
to update_ha_routers_states
1.5 - Added update_ha_routers_states
1.6 - Added process_prefix_update
1.7 - DVR support: new L3 plugin methods added.
- delete_agent_gateway_port
1.8 - Added address scope information
1.9 - Added get_router_ids
"""
def __init__(self, topic, host):
self.host = host
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_routers(self, context, router_ids=None):
"""Make a remote process call to retrieve the sync data for routers."""
cctxt = self.client.prepare()
return cctxt.call(context, 'sync_routers', host=self.host,
router_ids=router_ids)
def get_router_ids(self, context):
"""Make a remote process call to retrieve scheduled routers ids."""
cctxt = self.client.prepare(version='1.9')
return cctxt.call(context, 'get_router_ids', host=self.host)
def get_external_network_id(self, context):
"""Make a remote process call to retrieve the external network id.
@raise oslo_messaging.RemoteError: with TooManyExternalNetworks as
exc_type if there are more than one
external network
"""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_external_network_id', host=self.host)
def update_floatingip_statuses(self, context, router_id, fip_statuses):
"""Call the plugin update floating IPs's operational status."""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'update_floatingip_statuses',
router_id=router_id, fip_statuses=fip_statuses)
def get_ports_by_subnet(self, context, subnet_id):
"""Retrieve ports by subnet id."""
cctxt = self.client.prepare(version='1.2')
return cctxt.call(context, 'get_ports_by_subnet', host=self.host,
subnet_id=subnet_id)
def get_agent_gateway_port(self, context, fip_net):
"""Get or create an agent_gateway_port."""
cctxt = self.client.prepare(version='1.2')
return cctxt.call(context, 'get_agent_gateway_port',
network_id=fip_net, host=self.host)
def get_service_plugin_list(self, context):
"""Make a call to get the list of activated services."""
cctxt = self.client.prepare(version='1.3')
return cctxt.call(context, 'get_service_plugin_list')
def update_ha_routers_states(self, context, states):
"""Update HA routers states."""
cctxt = self.client.prepare(version='1.5')
return cctxt.call(context, 'update_ha_routers_states',
host=self.host, states=states)
def process_prefix_update(self, context, prefix_update):
"""Process prefix update whenever prefixes get changed."""
cctxt = self.client.prepare(version='1.6')
return cctxt.call(context, 'process_prefix_update',
subnets=prefix_update)
def delete_agent_gateway_port(self, context, fip_net):
"""Delete Floatingip_agent_gateway_port."""
cctxt = self.client.prepare(version='1.7')
return cctxt.call(context, 'delete_agent_gateway_port',
host=self.host, network_id=fip_net)
class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
ha.AgentMixin,
dvr.AgentMixin,
manager.Manager):
"""Manager for L3NatAgent
API version history:
1.0 initial Version
1.1 changed the type of the routers parameter
to the routers_updated method.
It was previously a list of routers in dict format.
It is now a list of router IDs only.
Per rpc versioning rules, it is backwards compatible.
1.2 - DVR support: new L3 agent methods added.
- add_arp_entry
- del_arp_entry
1.3 - fipnamespace_delete_on_ext_net - to delete fipnamespace
after the external network is removed
Needed by the L3 service when dealing with DVR
"""
target = oslo_messaging.Target(version='1.3')
def __init__(self, host, conf=None):
if conf:
self.conf = conf
else:
self.conf = cfg.CONF
self.router_info = {}
self._check_config_params()
self.process_monitor = external_process.ProcessMonitor(
config=self.conf,
resource_type='router')
self.driver = common_utils.load_interface_driver(self.conf)
self.context = n_context.get_admin_context_without_session()
self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)
self.fullsync = True
self.sync_routers_chunk_size = SYNC_ROUTERS_MAX_CHUNK_SIZE
# Get the list of service plugins from Neutron Server
# This is the first place where we contact neutron-server on startup
# so retry in case its not ready to respond.
retry_count = 5
while True:
retry_count = retry_count - 1
try:
self.neutron_service_plugins = (
self.plugin_rpc.get_service_plugin_list(self.context))
except oslo_messaging.RemoteError as e:
with excutils.save_and_reraise_exception() as ctx:
ctx.reraise = False
LOG.warning(_LW('l3-agent cannot check service plugins '
'enabled at the neutron server when '
'startup due to RPC error. It happens '
'when the server does not support this '
'RPC API. If the error is '
'UnsupportedVersion you can ignore this '
'warning. Detail message: %s'), e)
self.neutron_service_plugins = None
except oslo_messaging.MessagingTimeout as e:
with excutils.save_and_reraise_exception() as ctx:
if retry_count > 0:
ctx.reraise = False
LOG.warning(_LW('l3-agent cannot check service '
'plugins enabled on the neutron '
'server. Retrying. '
'Detail message: %s'), e)
continue
break
self.metadata_driver = None
if self.conf.enable_metadata_proxy:
self.metadata_driver = metadata_driver.MetadataDriver(self)
self.namespaces_manager = namespace_manager.NamespaceManager(
self.conf,
self.driver,
self.metadata_driver)
self._queue = queue.RouterProcessingQueue()
super(L3NATAgent, self).__init__(conf=self.conf)
self.target_ex_net_id = None
self.use_ipv6 = ipv6_utils.is_enabled()
self.pd = pd.PrefixDelegation(self.context, self.process_monitor,
self.driver,
self.plugin_rpc.process_prefix_update,
self.create_pd_router_update,
self.conf)
def _check_config_params(self):
"""Check items in configuration files.
Check for required and invalid configuration items.
The actual values are not verified for correctness.
"""
if not self.conf.interface_driver:
msg = _LE('An interface driver must be specified')
LOG.error(msg)
raise SystemExit(1)
if self.conf.ipv6_gateway:
# ipv6_gateway configured. Check for valid v6 link-local address.
try:
msg = _LE("%s used in config as ipv6_gateway is not a valid "
"IPv6 link-local address."),
ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway)
if ip_addr.version != 6 or not ip_addr.is_link_local():
LOG.error(msg, self.conf.ipv6_gateway)
raise SystemExit(1)
except netaddr.AddrFormatError:
LOG.error(msg, self.conf.ipv6_gateway)
raise SystemExit(1)
def _fetch_external_net_id(self, force=False):
"""Find UUID of single external network for this agent."""
if self.conf.gateway_external_network_id:
return self.conf.gateway_external_network_id
# L3 agent doesn't use external_network_bridge to handle external
# networks, so bridge_mappings with provider networks will be used
# and the L3 agent is able to handle any external networks.
if not self.conf.external_network_bridge:
return
if not force and self.target_ex_net_id:
return self.target_ex_net_id
try:
self.target_ex_net_id = self.plugin_rpc.get_external_network_id(
self.context)
return self.target_ex_net_id
except oslo_messaging.RemoteError as e:
with excutils.save_and_reraise_exception() as ctx:
if e.exc_type == 'TooManyExternalNetworks':
ctx.reraise = False
msg = _(
"The 'gateway_external_network_id' option must be "
"configured for this agent as Neutron has more than "
"one external network.")
raise Exception(msg)
def _create_router(self, router_id, router):
args = []
kwargs = {
'router_id': router_id,
'router': router,
'use_ipv6': self.use_ipv6,
'agent_conf': self.conf,
'interface_driver': self.driver,
}
if router.get('distributed'):
kwargs['agent'] = self
kwargs['host'] = self.host
if router.get('distributed') and router.get('ha'):
if self.conf.agent_mode == l3_constants.L3_AGENT_MODE_DVR_SNAT:
kwargs['state_change_callback'] = self.enqueue_state_change
return dvr_edge_ha_router.DvrEdgeHaRouter(*args, **kwargs)
if router.get('distributed'):
if self.conf.agent_mode == l3_constants.L3_AGENT_MODE_DVR_SNAT:
return dvr_router.DvrEdgeRouter(*args, **kwargs)
else:
return dvr_local_router.DvrLocalRouter(*args, **kwargs)
if router.get('ha'):
kwargs['state_change_callback'] = self.enqueue_state_change
return ha_router.HaRouter(*args, **kwargs)
return legacy_router.LegacyRouter(*args, **kwargs)
def _router_added(self, router_id, router):
ri = self._create_router(router_id, router)
registry.notify(resources.ROUTER, events.BEFORE_CREATE,
self, router=ri)
self.router_info[router_id] = ri
ri.initialize(self.process_monitor)
# TODO(Carl) This is a hook in to fwaas. It should be cleaned up.
self.process_router_add(ri)
def _safe_router_removed(self, router_id):
"""Try to delete a router and return True if successful."""
try:
self._router_removed(router_id)
except Exception:
LOG.exception(_LE('Error while deleting router %s'), router_id)
return False
else:
return True
def _router_removed(self, router_id):
ri = self.router_info.get(router_id)
if ri is None:
LOG.warning(_LW("Info for router %s was not found. "
"Performing router cleanup"), router_id)
self.namespaces_manager.ensure_router_cleanup(router_id)
return
registry.notify(resources.ROUTER, events.BEFORE_DELETE,
self, router=ri)
ri.delete(self)
del self.router_info[router_id]
registry.notify(resources.ROUTER, events.AFTER_DELETE, self, router=ri)
def router_deleted(self, context, router_id):
"""Deal with router deletion RPC message."""
LOG.debug('Got router deleted notification for %s', router_id)
update = queue.RouterUpdate(router_id,
queue.PRIORITY_RPC,
action=queue.DELETE_ROUTER)
self._queue.add(update)
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
LOG.debug('Got routers updated notification :%s', routers)
if routers:
# This is needed for backward compatibility
if isinstance(routers[0], dict):
routers = [router['id'] for router in routers]
for id in routers:
update = queue.RouterUpdate(id, queue.PRIORITY_RPC)
self._queue.add(update)
def router_removed_from_agent(self, context, payload):
LOG.debug('Got router removed from agent :%r', payload)
router_id = payload['router_id']
update = queue.RouterUpdate(router_id,
queue.PRIORITY_RPC,
action=queue.DELETE_ROUTER)
self._queue.add(update)
def router_added_to_agent(self, context, payload):
LOG.debug('Got router added to agent :%r', payload)
self.routers_updated(context, payload)
def _process_router_if_compatible(self, router):
if (self.conf.external_network_bridge and
not ip_lib.device_exists(self.conf.external_network_bridge)):
LOG.error(_LE("The external network bridge '%s' does not exist"),
self.conf.external_network_bridge)
return
if self.conf.router_id and router['id'] != self.conf.router_id:
raise n_exc.RouterNotCompatibleWithAgent(router_id=router['id'])
# Either ex_net_id or handle_internal_only_routers must be set
ex_net_id = (router['external_gateway_info'] or {}).get('network_id')
if not ex_net_id and not self.conf.handle_internal_only_routers:
raise n_exc.RouterNotCompatibleWithAgent(router_id=router['id'])
# If target_ex_net_id and ex_net_id are set they must be equal
target_ex_net_id = self._fetch_external_net_id()
if (target_ex_net_id and ex_net_id and ex_net_id != target_ex_net_id):
# Double check that our single external_net_id has not changed
# by forcing a check by RPC.
if ex_net_id != self._fetch_external_net_id(force=True):
raise n_exc.RouterNotCompatibleWithAgent(
router_id=router['id'])
if router['id'] not in self.router_info:
self._process_added_router(router)
else:
self._process_updated_router(router)
def _process_added_router(self, router):
self._router_added(router['id'], router)
ri = self.router_info[router['id']]
ri.router = router
ri.process(self)
registry.notify(resources.ROUTER, events.AFTER_CREATE, self, router=ri)
def _process_updated_router(self, router):
ri = self.router_info[router['id']]
ri.router = router
registry.notify(resources.ROUTER, events.BEFORE_UPDATE,
self, router=ri)
ri.process(self)
registry.notify(resources.ROUTER, events.AFTER_UPDATE, self, router=ri)
def _resync_router(self, router_update,
priority=queue.PRIORITY_SYNC_ROUTERS_TASK):
router_update.timestamp = timeutils.utcnow()
router_update.priority = priority
router_update.router = None # Force the agent to resync the router
self._queue.add(router_update)
def _process_router_update(self):
for rp, update in self._queue.each_update_to_next_router():
LOG.debug("Starting router update for %s, action %s, priority %s",
update.id, update.action, update.priority)
if update.action == queue.PD_UPDATE:
self.pd.process_prefix_update()
LOG.debug("Finished a router update for %s", update.id)
continue
router = update.router
if update.action != queue.DELETE_ROUTER and not router:
try:
update.timestamp = timeutils.utcnow()
routers = self.plugin_rpc.get_routers(self.context,
[update.id])
except Exception:
msg = _LE("Failed to fetch router information for '%s'")
LOG.exception(msg, update.id)
self._resync_router(update)
continue
if routers:
router = routers[0]
if not router:
removed = self._safe_router_removed(update.id)
if not removed:
self._resync_router(update)
else:
# need to update timestamp of removed router in case
# there are older events for the same router in the
# processing queue (like events from fullsync) in order to
# prevent deleted router re-creation
rp.fetched_and_processed(update.timestamp)
LOG.debug("Finished a router update for %s", update.id)
continue
try:
self._process_router_if_compatible(router)
except n_exc.RouterNotCompatibleWithAgent as e:
LOG.exception(e.msg)
# Was the router previously handled by this agent?
if router['id'] in self.router_info:
LOG.error(_LE("Removing incompatible router '%s'"),
router['id'])
self._safe_router_removed(router['id'])
except Exception:
msg = _LE("Failed to process compatible router '%s'")
LOG.exception(msg, update.id)
self._resync_router(update)
continue
LOG.debug("Finished a router update for %s", update.id)
rp.fetched_and_processed(update.timestamp)
def _process_routers_loop(self):
LOG.debug("Starting _process_routers_loop")
pool = eventlet.GreenPool(size=8)
while True:
pool.spawn_n(self._process_router_update)
# NOTE(kevinbenton): this is set to 1 second because the actual interval
# is controlled by a FixedIntervalLoopingCall in neutron/service.py that
# is responsible for task execution.
@periodic_task.periodic_task(spacing=1, run_immediately=True)
def periodic_sync_routers_task(self, context):
self.process_services_sync(context)
if not self.fullsync:
return
LOG.debug("Starting fullsync periodic_sync_routers_task")
# self.fullsync is True at this point. If an exception -- caught or
# uncaught -- prevents setting it to False below then the next call
# to periodic_sync_routers_task will re-enter this code and try again.
# Context manager self.namespaces_manager captures a picture of
# namespaces *before* fetch_and_sync_all_routers fetches the full list
# of routers from the database. This is important to correctly
# identify stale ones.
try:
with self.namespaces_manager as ns_manager:
self.fetch_and_sync_all_routers(context, ns_manager)
except n_exc.AbortSyncRouters:
self.fullsync = True
def fetch_and_sync_all_routers(self, context, ns_manager):
prev_router_ids = set(self.router_info)
curr_router_ids = set()
timestamp = timeutils.utcnow()
try:
router_ids = ([self.conf.router_id] if self.conf.router_id else
self.plugin_rpc.get_router_ids(context))
# fetch routers by chunks to reduce the load on server and to
# start router processing earlier
for i in range(0, len(router_ids), self.sync_routers_chunk_size):
routers = self.plugin_rpc.get_routers(
context, router_ids[i:i + self.sync_routers_chunk_size])
LOG.debug('Processing :%r', routers)
for r in routers:
curr_router_ids.add(r['id'])
ns_manager.keep_router(r['id'])
if r.get('distributed'):
# need to keep fip namespaces as well
ext_net_id = (r['external_gateway_info'] or {}).get(
'network_id')
if ext_net_id:
ns_manager.keep_ext_net(ext_net_id)
update = queue.RouterUpdate(
r['id'],
queue.PRIORITY_SYNC_ROUTERS_TASK,
router=r,
timestamp=timestamp)
self._queue.add(update)
except oslo_messaging.MessagingTimeout:
if self.sync_routers_chunk_size > SYNC_ROUTERS_MIN_CHUNK_SIZE:
self.sync_routers_chunk_size = max(
self.sync_routers_chunk_size / 2,
SYNC_ROUTERS_MIN_CHUNK_SIZE)
LOG.error(_LE('Server failed to return info for routers in '
'required time, decreasing chunk size to: %s'),
self.sync_routers_chunk_size)
else:
LOG.error(_LE('Server failed to return info for routers in '
'required time even with min chunk size: %s. '
'It might be under very high load or '
'just inoperable'),
self.sync_routers_chunk_size)
raise
except oslo_messaging.MessagingException:
LOG.exception(_LE("Failed synchronizing routers due to RPC error"))
raise n_exc.AbortSyncRouters()
self.fullsync = False
LOG.debug("periodic_sync_routers_task successfully completed")
# adjust chunk size after successful sync
if self.sync_routers_chunk_size < SYNC_ROUTERS_MAX_CHUNK_SIZE:
self.sync_routers_chunk_size = min(
self.sync_routers_chunk_size + SYNC_ROUTERS_MIN_CHUNK_SIZE,
SYNC_ROUTERS_MAX_CHUNK_SIZE)
# Delete routers that have disappeared since the last sync
for router_id in prev_router_ids - curr_router_ids:
ns_manager.keep_router(router_id)
update = queue.RouterUpdate(router_id,
queue.PRIORITY_SYNC_ROUTERS_TASK,
timestamp=timestamp,
action=queue.DELETE_ROUTER)
self._queue.add(update)
def after_start(self):
# Note: the FWaaS' vArmourL3NATAgent is a subclass of L3NATAgent. It
# calls this method here. So Removing this after_start() would break
# vArmourL3NATAgent. We need to find out whether vArmourL3NATAgent
# can have L3NATAgentWithStateReport as its base class instead of
# L3NATAgent.
eventlet.spawn_n(self._process_routers_loop)
LOG.info(_LI("L3 agent started"))
def create_pd_router_update(self):
router_id = None
update = queue.RouterUpdate(router_id,
queue.PRIORITY_PD_UPDATE,
timestamp=timeutils.utcnow(),
action=queue.PD_UPDATE)
self._queue.add(update)
class L3NATAgentWithStateReport(L3NATAgent):
def __init__(self, host, conf=None):
super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
self.agent_state = {
'binary': 'neutron-l3-agent',
'host': host,
'availability_zone': self.conf.AGENT.availability_zone,
'topic': topics.L3_AGENT,
'configurations': {
'agent_mode': self.conf.agent_mode,
'router_id': self.conf.router_id,
'handle_internal_only_routers':
self.conf.handle_internal_only_routers,
'external_network_bridge': self.conf.external_network_bridge,
'gateway_external_network_id':
self.conf.gateway_external_network_id,
'interface_driver': self.conf.interface_driver,
'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats},
'start_flag': True,
'agent_type': l3_constants.AGENT_TYPE_L3}
report_interval = self.conf.AGENT.report_interval
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
for ri in router_infos:
ex_gw_port = ri.get_ex_gw_port()
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY,
[]))
num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY,
[]))
configurations = self.agent_state['configurations']
configurations['routers'] = num_routers
configurations['ex_gw_ports'] = num_ex_gw_ports
configurations['interfaces'] = num_interfaces
configurations['floating_ips'] = num_floating_ips
try:
agent_status = self.state_rpc.report_state(self.context,
self.agent_state,
True)
if agent_status == l3_constants.AGENT_REVIVED:
LOG.info(_LI('Agent has just been revived. '
'Doing a full sync.'))
self.fullsync = True
self.agent_state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
LOG.warning(_LW("Neutron server does not support state report. "
"State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def after_start(self):
eventlet.spawn_n(self._process_routers_loop)
LOG.info(_LI("L3 agent started"))
# Do the report state before we do the first full sync.
self._report_state()
self.pd.after_start()
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.fullsync = True
LOG.info(_LI("agent_updated by server side %s!"), payload)
| apache-2.0 | -1,998,541,116,222,786,800 | 42.848011 | 79 | 0.584405 | false | 4.265441 | true | false | false |
pradeepbp/stocker2 | watchlist.py | 1 | 2048 | #!/usr/bin/python
'''
watchlist.py
Copyright (C) 2011 Pradeep Balan Pillai
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
'''
import gtk
import pickle
class Watchlist:
def __init__(self):
self.tickers = []
# Load the tickers from pickled list of stocks
def load_tickers(self):
ticker_file = open('ticker_list', 'rb')
if(ticker_file.readlines() == []): # Check whether the file contains data
ticker_file.close() # else add data before initiating
self.add_stock()
ticker_file = open('ticker_list','rb')
self.tickers = pickle.load(ticker_file)
else:
ticker_file = open('ticker_list','rb')
self.tickers = pickle.load(ticker_file)
ticker_file.close()
# Add stocks to watchlist. Argument should be pair of display_name:stock_code dictionary
def add_stocks(self, stocks = {}):
pickled_stocks = {}
f = open('watch_list', 'rb') # Load existing watchlist add new stocks and write
try:
pickled_stocks = pickle.load(f)
f.close()
for key in stocks.keys():
pickled_stocks[key] = stocks[key]
f = open('watch_list', 'wb')
pickle.dump(pickled_stocks, f)
f.close()
except EOFError:
f.close()
return 0
def delete_stocks(self, stocks = {}):
pass
| gpl-3.0 | 7,806,833,505,427,336,000 | 32.57377 | 92 | 0.615234 | false | 4.087824 | false | false | false |
nuxeh/morph | morphlib/morphologyfinder.py | 1 | 1834 | # Copyright (C) 2013-2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# =*= License: GPL-2 =*=
import cliapp
import morphlib
class MorphologyFinder(object):
'''Abstract away finding morphologies in a git repository.
This class provides an abstraction layer between a git repository
and the morphologies contained in it.
'''
def __init__(self, gitdir, ref=None):
self.gitdir = gitdir
self.ref = ref
def read_morphology(self, filename):
'''Return the un-parsed text of a morphology.
For the given morphology name, locate and return the contents
of the morphology as a string.
Parsing of this morphology into a form useful for manipulating
is handled by the MorphologyLoader class.
'''
return self.gitdir.read_file(filename, self.ref)
def list_morphologies(self):
'''Return the filenames of all morphologies in the (repo, ref).
Finds all morphologies in the git directory at the specified
ref.
'''
def is_morphology_path(path):
return path.endswith('.morph')
return (path
for path in self.gitdir.list_files(self.ref)
if is_morphology_path(path))
| gpl-2.0 | 6,994,628,587,820,302,000 | 29.065574 | 73 | 0.673391 | false | 4.121348 | false | false | false |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/unknown_tlv/__init__.py | 1 | 12582 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class unknown_tlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/unknown-tlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: An unknown TLV within the context. Unknown TLVs are
defined to be the set of TLVs that are not modelled
within the OpenConfig model, or are unknown to the
local system such that it cannot decode their value.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "unknown-tlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"router-information",
"tlvs",
"tlv",
"unknown-tlv",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/unknown_tlv/state (container)
YANG Description: Contents of an unknown TLV within the LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/unknown_tlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Contents of an unknown TLV within the LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class unknown_tlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/unknown-tlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: An unknown TLV within the context. Unknown TLVs are
defined to be the set of TLVs that are not modelled
within the OpenConfig model, or are unknown to the
local system such that it cannot decode their value.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "unknown-tlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"router-information",
"tlvs",
"tlv",
"unknown-tlv",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/unknown_tlv/state (container)
YANG Description: Contents of an unknown TLV within the LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/unknown_tlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Contents of an unknown TLV within the LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
| apache-2.0 | 3,881,322,362,448,355,000 | 36.783784 | 375 | 0.574313 | false | 4.289806 | true | false | false |
tobias47n9e/social-core | social_core/backends/line.py | 3 | 3130 | # vim:fileencoding=utf-8
import requests
import json
from .oauth import BaseOAuth2
from ..exceptions import AuthFailed
from ..utils import handle_http_errors
class LineOAuth2(BaseOAuth2):
name = 'line'
AUTHORIZATION_URL = 'https://access.line.me/dialog/oauth/weblogin'
ACCESS_TOKEN_URL = 'https://api.line.me/v1/oauth/accessToken'
BASE_API_URL = 'https://api.line.me'
USER_INFO_URL = BASE_API_URL + '/v1/profile'
ACCESS_TOKEN_METHOD = 'POST'
STATE_PARAMETER = True
REDIRECT_STATE = True
ID_KEY = 'mid'
EXTRA_DATA = [
('mid', 'id'),
('expire', 'expire'),
('refreshToken', 'refresh_token')
]
def auth_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
return {
'client_id': client_id,
'redirect_uri': self.get_redirect_uri(),
'response_type': self.RESPONSE_TYPE
}
def process_error(self, data):
error_code = data.get('errorCode') or \
data.get('statusCode') or \
data.get('error')
error_message = data.get('errorMessage') or \
data.get('statusMessage') or \
data.get('error_desciption')
if error_code is not None or error_message is not None:
raise AuthFailed(self, error_message or error_code)
@handle_http_errors
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
client_id, client_secret = self.get_key_and_secret()
code = self.data.get('code')
self.process_error(self.data)
try:
response = self.request_access_token(
self.access_token_url(),
method=self.ACCESS_TOKEN_METHOD,
params={
'requestToken': code,
'channelSecret': client_secret
}
)
self.process_error(response)
return self.do_auth(response['accessToken'], response=response,
*args, **kwargs)
except requests.HTTPError as err:
self.process_error(json.loads(err.response.content))
def get_user_details(self, response):
response.update({
'fullname': response.get('displayName'),
'picture_url': response.get('pictureUrl')
})
return response
def get_user_id(self, details, response):
"""
Return a unique ID for the current user, by default from
server response.
"""
return response.get(self.ID_KEY)
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
try:
response = self.get_json(
self.USER_INFO_URL,
headers={
"Authorization": "Bearer {}".format(access_token)
}
)
self.process_error(response)
return response
except requests.HTTPError as err:
self.process_error(err.response.json())
| bsd-3-clause | 5,554,109,290,867,153,000 | 32.297872 | 75 | 0.554952 | false | 4.075521 | false | false | false |
mcgonagle/ansible_f5 | library/_bigip_node.py | 1 | 15441 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# Copyright (c) 2013 Matt Hite <[email protected]>
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: _bigip_node
short_description: Manages F5 BIG-IP LTM nodes
deprecated: Deprecated in 2.5. Use the C(bigip_node) module instead.
description:
- "Manages F5 BIG-IP LTM nodes via iControl SOAP API"
version_added: "1.4"
author:
- Matt Hite (@mhite)
- Tim Rupp (@caphrim007)
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
requirements:
- bigsuds
options:
state:
description:
- Pool member state.
required: True
default: present
choices: ['present', 'absent']
session_state:
description:
- Set new session availability status for node.
version_added: "1.9"
choices: ['enabled', 'disabled']
monitor_state:
description:
- Set monitor availability status for node.
version_added: "1.9"
choices: ['enabled', 'disabled']
partition:
description:
- Partition.
default: Common
name:
description:
- Node name.
monitor_type:
description:
- Monitor rule type when monitors > 1.
version_added: "2.2"
choices: ['and_list', 'm_of_n']
quorum:
description:
- Monitor quorum value when monitor_type is m_of_n.
version_added: "2.2"
monitors:
description:
- Monitor template name list. Always use the full path to the monitor.
version_added: "2.2"
host:
description:
- Node IP. Required when state=present and node does not exist. Error when
C(state) is C(absent).
required: True
aliases: ['address', 'ip']
description:
description:
- Node description.
extends_documentation_fragment: f5
'''
EXAMPLES = r'''
- name: Add node
bigip_node:
server: lb.mydomain.com
user: admin
password: secret
state: present
partition: Common
host: 10.20.30.40
name: 10.20.30.40
delegate_to: localhost
# Note that the BIG-IP automatically names the node using the
# IP address specified in previous play's host parameter.
# Future plays referencing this node no longer use the host
# parameter but instead use the name parameter.
# Alternatively, you could have specified a name with the
# name parameter when state=present.
- name: Add node with a single 'ping' monitor
bigip_node:
server: lb.mydomain.com
user: admin
password: secret
state: present
partition: Common
host: 10.20.30.40
name: mytestserver
monitors:
- /Common/icmp
delegate_to: localhost
- name: Modify node description
bigip_node:
server: lb.mydomain.com
user: admin
password: secret
state: present
partition: Common
name: 10.20.30.40
description: Our best server yet
delegate_to: localhost
- name: Delete node
bigip_node:
server: lb.mydomain.com
user: admin
password: secret
state: absent
partition: Common
name: 10.20.30.40
delegate_to: localhost
# The BIG-IP GUI doesn't map directly to the API calls for "Node ->
# General Properties -> State". The following states map to API monitor
# and session states.
#
# Enabled (all traffic allowed):
# monitor_state=enabled, session_state=enabled
# Disabled (only persistent or active connections allowed):
# monitor_state=enabled, session_state=disabled
# Forced offline (only active connections allowed):
# monitor_state=disabled, session_state=disabled
#
# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
- name: Force node offline
bigip_node:
server: lb.mydomain.com
user: admin
password: mysecret
state: present
session_state: disabled
monitor_state: disabled
partition: Common
name: 10.20.30.40
delegate_to: localhost
'''
def node_exists(api, address):
# hack to determine if node exists
result = False
try:
api.LocalLB.NodeAddressV2.get_object_status(nodes=[address])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_node_address(api, address, name):
try:
api.LocalLB.NodeAddressV2.create(
nodes=[name],
addresses=[address],
limits=[0]
)
result = True
desc = ""
except bigsuds.OperationFailed as e:
if "already exists" in str(e):
result = False
desc = "referenced name or IP already in use"
else:
# genuine exception
raise
return (result, desc)
def get_node_address(api, name):
return api.LocalLB.NodeAddressV2.get_address(nodes=[name])[0]
def delete_node_address(api, address):
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
desc = ""
except bigsuds.OperationFailed as e:
if "is referenced by a member of pool" in str(e):
result = False
desc = "node referenced by pool"
else:
# genuine exception
raise
return (result, desc)
def set_node_description(api, name, description):
api.LocalLB.NodeAddressV2.set_description(nodes=[name],
descriptions=[description])
def get_node_description(api, name):
return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0]
def set_node_session_enabled_state(api, name, session_state):
session_state = "STATE_%s" % session_state.strip().upper()
api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name],
states=[session_state])
def get_node_session_status(api, name):
result = api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0]
result = result.split("SESSION_STATUS_")[-1].lower()
return result
def set_node_monitor_state(api, name, monitor_state):
monitor_state = "STATE_%s" % monitor_state.strip().upper()
api.LocalLB.NodeAddressV2.set_monitor_state(nodes=[name],
states=[monitor_state])
def get_node_monitor_status(api, name):
result = api.LocalLB.NodeAddressV2.get_monitor_status(nodes=[name])[0]
result = result.split("MONITOR_STATUS_")[-1].lower()
return result
def get_monitors(api, name):
result = api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=[name])[0]
monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower()
quorum = result['quorum']
monitor_templates = result['monitor_templates']
return (monitor_type, quorum, monitor_templates)
def set_monitors(api, name, monitor_type, quorum, monitor_templates):
monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper()
monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates}
api.LocalLB.NodeAddressV2.set_monitor_rule(nodes=[name],
monitor_rules=[monitor_rule])
def main():
monitor_type_choices = ['and_list', 'm_of_n']
argument_spec = f5_argument_spec()
meta_args = dict(
session_state=dict(type='str', choices=['enabled', 'disabled']),
monitor_state=dict(type='str', choices=['enabled', 'disabled']),
name=dict(type='str', required=True),
host=dict(type='str', aliases=['address', 'ip']),
description=dict(type='str'),
monitor_type=dict(type='str', choices=monitor_type_choices),
quorum=dict(type='int'),
monitors=dict(type='list')
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
state = module.params['state']
partition = module.params['partition']
validate_certs = module.params['validate_certs']
session_state = module.params['session_state']
monitor_state = module.params['monitor_state']
host = module.params['host']
name = module.params['name']
address = fq_name(partition, name)
description = module.params['description']
monitor_type = module.params['monitor_type']
if monitor_type:
monitor_type = monitor_type.lower()
quorum = module.params['quorum']
monitors = module.params['monitors']
if monitors:
monitors = []
for monitor in module.params['monitors']:
monitors.append(fq_name(partition, monitor))
# sanity check user supplied values
if state == 'absent' and host is not None:
module.fail_json(msg="host parameter invalid when state=absent")
if monitors:
if len(monitors) == 1:
# set default required values for single monitor
quorum = 0
monitor_type = 'single'
elif len(monitors) > 1:
if not monitor_type:
module.fail_json(msg="monitor_type required for monitors > 1")
if monitor_type == 'm_of_n' and not quorum:
module.fail_json(msg="quorum value required for monitor_type m_of_n")
if monitor_type != 'm_of_n':
quorum = 0
elif monitor_type:
# no monitors specified but monitor_type exists
module.fail_json(msg="monitor_type require monitors parameter")
elif quorum is not None:
# no monitors specified but quorum exists
module.fail_json(msg="quorum requires monitors parameter")
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
result = {'changed': False} # default
if state == 'absent':
if node_exists(api, address):
if not module.check_mode:
deleted, desc = delete_node_address(api, address)
if not deleted:
module.fail_json(msg="unable to delete: %s" % desc)
else:
result = {'changed': True}
else:
# check-mode return value
result = {'changed': True}
elif state == 'present':
if not node_exists(api, address):
if host is None:
module.fail_json(msg="host parameter required when "
"state=present and node does not exist")
if not module.check_mode:
created, desc = create_node_address(api, address=host, name=address)
if not created:
module.fail_json(msg="unable to create: %s" % desc)
else:
result = {'changed': True}
if session_state is not None:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
if monitor_state is not None:
set_node_monitor_state(api, address, monitor_state)
result = {'changed': True}
if description is not None:
set_node_description(api, address, description)
result = {'changed': True}
if monitors:
set_monitors(api, address, monitor_type, quorum, monitors)
else:
# check-mode return value
result = {'changed': True}
else:
# node exists -- potentially modify attributes
if host is not None:
if get_node_address(api, address) != host:
module.fail_json(msg="Changing the node address is "
"not supported by the API; "
"delete and recreate the node.")
if session_state is not None:
session_status = get_node_session_status(api, address)
if session_state == 'enabled' and \
session_status == 'forced_disabled':
if not module.check_mode:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
elif session_state == 'disabled' and \
session_status != 'force_disabled':
if not module.check_mode:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
if monitor_state is not None:
monitor_status = get_node_monitor_status(api, address)
if monitor_state == 'enabled' and \
monitor_status == 'forced_down':
if not module.check_mode:
set_node_monitor_state(api, address,
monitor_state)
result = {'changed': True}
elif monitor_state == 'disabled' and \
monitor_status != 'forced_down':
if not module.check_mode:
set_node_monitor_state(api, address,
monitor_state)
result = {'changed': True}
if description is not None:
if get_node_description(api, address) != description:
if not module.check_mode:
set_node_description(api, address, description)
result = {'changed': True}
if monitors:
t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, address)
if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)):
if not module.check_mode:
set_monitors(api, address, monitor_type, quorum, monitors)
result = {'changed': True}
except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
| apache-2.0 | 6,062,553,131,121,641,000 | 34.578341 | 166 | 0.574704 | false | 4.2177 | false | false | false |
ttitto/python | Basics/ExceptionsPackagesFiles/ExceptionsPackagesFiles/AveragePriceFromFileByGroups.py | 1 | 1840 | import sys
from io import TextIOWrapper
def get_categorized_prices_from_file(filepath :str) -> dict:
prices = {}
try:
with open(filepath, 'r') as f:
assert isinstance(f, TextIOWrapper)
for line_number, line_content in enumerate(f):
try:
if line_content and len(line_content.rstrip('\n')) > 0:
splitted_line = line_content.rstrip('\n').split(',')
category = splitted_line[-2]
price_float = float(splitted_line[-1])
if category in prices:
prices[category].append(price_float)
else:
prices[category] = [price_float]
except ValueError:
print('Price on row {row} not convertable to float. This price is not included in result'.format(row = line_number + 1))
except IOError:
print("Failed to open data file.")
return prices
def average_prices_from_file(prices :dict) -> dict:
average_prices = {}
for key in prices:
average_prices[key] = sum(prices[key]) / float(len(prices[key]))
return average_prices
def print_categorized_average_prices(average_prices :dict):
for key in average_prices:
print('{} - average price: {:.2f}'.format(key, average_prices[key]))
def main():
prices = get_categorized_prices_from_file('./data/catalog_sample.csv')
average_prices = average_prices_from_file(prices)
print_categorized_average_prices(average_prices)
print('\n')
prices = get_categorized_prices_from_file('./data/catalog_full.csv')
average_price = average_prices_from_file(prices)
print_categorized_average_prices(average_price)
if __name__ == "__main__":
sys.exit(int(main() or 0)) | mit | -3,506,475,599,382,948,400 | 38.170213 | 140 | 0.588043 | false | 3.982684 | false | false | false |
elfi-dev/elfi | elfi/methods/density_ratio_estimation.py | 1 | 7670 | """This module contains methods for density ratio estimation."""
import logging
from functools import partial
import numpy as np
logger = logging.getLogger(__name__)
def calculate_densratio_basis_sigma(sigma_1, sigma_2):
"""Heuristic way to choose a basis sigma for density ratio estimation.
Parameters
----------
sigma_1 : float
Standard deviation related to population 1
sigma_2 : float
Standard deviation related to population 2
Returns
-------
float
Basis function scale parameter that works often well in practice.
"""
sigma = sigma_1 * sigma_2 / np.sqrt(np.abs(sigma_1 ** 2 - sigma_2 ** 2))
return sigma
class DensityRatioEstimation:
"""A density ratio estimation class."""
def __init__(self,
n=100,
epsilon=0.1,
max_iter=500,
abs_tol=0.01,
conv_check_interval=20,
fold=5,
optimize=False):
"""Construct the density ratio estimation algorithm object.
Parameters
----------
n : int
Number of RBF basis functions.
epsilon : float
Parameter determining speed of gradient descent.
max_iter : int
Maximum number of iterations used in gradient descent optimization of the weights.
abs_tol : float
Absolute tolerance value for determining convergence of optimization of the weights.
conv_check_interval : int
Integer defining the interval of convergence checks in gradient descent.
fold : int
Number of folds in likelihood cross validation used to optimize basis scale-params.
optimize : boolean
Boolean indicating whether or not to optimize RBF scale.
"""
self.n = n
self.epsilon = epsilon
self.max_iter = max_iter
self.abs_tol = abs_tol
self.fold = fold
self.sigma = None
self.conv_check_interval = conv_check_interval
self.optimize = optimize
def fit(self,
x,
y,
weights_x=None,
weights_y=None,
sigma=None):
"""Fit the density ratio estimation object.
Parameters
----------
x : array
Sample from the nominator distribution.
y : sample
Sample from the denominator distribution.
weights_x : array
Vector of non-negative nominator sample weights, must be able to normalize.
weights_y : array
Vector of non-negative denominator sample weights, must be able to normalize.
sigma : float or list
List of RBF kernel scales, fit selected at initial call.
"""
self.x_len = x.shape[0]
self.y_len = y.shape[0]
x = x.reshape(self.x_len, -1)
y = y.reshape(self.y_len, -1)
self.x = x
if self.x_len < self.n:
raise ValueError("Number of RBFs ({}) can't be larger "
"than number of samples ({}).".format(self.n, self.x_len))
self.theta = x[:self.n, :]
if weights_x is None:
weights_x = np.ones(self.x_len)
if weights_y is None:
weights_y = np.ones(self.y_len)
self.weights_x = weights_x / np.sum(weights_x)
self.weights_y = weights_y / np.sum(weights_y)
self.x0 = np.average(x, axis=0, weights=weights_x)
if isinstance(sigma, float):
self.sigma = sigma
self.optimize = False
if self.optimize:
if isinstance(sigma, list):
scores_tuple = zip(*[self._KLIEP_lcv(x, y, sigma_i)
for sigma_i in sigma])
self.sigma = sigma[np.argmax(scores_tuple)]
else:
raise ValueError("To optimize RBF scale, "
"you need to provide a list of candidate scales.")
if self.sigma is None:
raise ValueError("RBF width (sigma) has to provided in first call.")
A = self._compute_A(x, self.sigma)
b, b_normalized = self._compute_b(y, self.sigma)
alpha = self._KLIEP(A, b, b_normalized, weights_x, self.sigma)
self.w = partial(self._weighted_basis_sum, sigma=self.sigma, alpha=alpha)
def _gaussian_basis(self, x, x0, sigma):
"""N-D RBF basis-function with equal scale-parameter for every dim."""
return np.exp(-0.5 * np.sum((x - x0) ** 2) / sigma / sigma)
def _weighted_basis_sum(self, x, sigma, alpha):
"""Weighted sum of gaussian basis functions evaluated at x."""
return np.dot(np.array([[self._gaussian_basis(j, i, sigma) for j in self.theta]
for i in np.atleast_2d(x)]), alpha)
def _compute_A(self, x, sigma):
A = np.array([[self._gaussian_basis(i, j, sigma) for j in self.theta] for i in x])
return A
def _compute_b(self, y, sigma):
b = np.sum(np.array(
[[self._gaussian_basis(i, y[j, :], sigma) * self.weights_y[j]
for j in np.arange(self.y_len)]
for i in self.theta]), axis=1)
b_normalized = b / np.dot(b.T, b)
return b, b_normalized
def _KLIEP_lcv(self, x, y, sigma):
"""Compute KLIEP scores for fold-folds."""
A = self._compute_A(x, sigma)
b, b_normalized = self._compute_b(y, sigma)
non_null = np.any(A > 1e-64, axis=1)
non_null_length = sum(non_null)
if non_null_length == 0:
return np.Inf
A_full = A[non_null, :]
x_full = x[non_null, :]
weights_x_full = self.weights_x[non_null]
fold_indices = np.array_split(np.arange(non_null_length), self.fold)
score = np.zeros(self.fold)
for i_fold, fold_index in enumerate(fold_indices):
fold_index_minus = np.setdiff1d(np.arange(non_null_length), fold_index)
alpha = self._KLIEP(A=A_full[fold_index_minus, :], b=b, b_normalized=b_normalized,
weights_x=weights_x_full[fold_index_minus], sigma=sigma)
score[i_fold] = np.average(
np.log(self._weighted_basis_sum(x_full[fold_index, :], sigma, alpha)),
weights=weights_x_full[fold_index])
return [np.mean(score)]
def _KLIEP(self, A, b, b_normalized, weights_x, sigma):
"""Kullback-Leibler Importance Estimation Procedure using gradient descent."""
alpha = 1 / self.n * np.ones(self.n)
target_fun_prev = self._weighted_basis_sum(x=self.x, sigma=sigma, alpha=alpha)
abs_diff = 0.0
non_null = np.any(A > 1e-64, axis=1)
A_full = A[non_null, :]
weights_x_full = weights_x[non_null]
for i in np.arange(self.max_iter):
dAdalpha = np.matmul(A_full.T, (weights_x_full / (np.matmul(A_full, alpha))))
alpha += self.epsilon * dAdalpha
alpha = np.maximum(0, alpha + (1 - np.dot(b.T, alpha)) * b_normalized)
alpha = alpha / np.dot(b.T, alpha)
if np.remainder(i, self.conv_check_interval) == 0:
target_fun = self._weighted_basis_sum(x=self.x, sigma=sigma, alpha=alpha)
abs_diff = np.linalg.norm(target_fun - target_fun_prev)
if abs_diff < self.abs_tol:
break
target_fun_prev = target_fun
return alpha
def max_ratio(self):
"""Find the maximum of the density ratio at numerator sample."""
max_value = np.max(self.w(self.x))
return max_value
| bsd-3-clause | -3,806,967,867,252,121,000 | 36.05314 | 96 | 0.560626 | false | 3.802677 | false | false | false |
Esri/raster-functions | functions/Reference.py | 1 | 21163 | import numpy as np
"""
This class serves as a quick reference for all methods and attributes associated with a python raster function.
Feel free to use this template a starting point for your implementation or as a cheat-sheet.
"""
class Reference():
"""Class name defaults to module name unless specified in the Python Adapter function's property page.
"""
def __init__(self):
"""Initialize your class attributes here.
"""
self.name = "Reference Function" # a short name for the function. Usually named "<something> Function".
self.description = "Story of the function..." # a detailed description of what this function does.
def getParameterInfo(self):
"""This method returns information on each parameter to your function as a list of dictionaries.
This method must be defined.
Args:
None
Returns:
A list of dictionaries where each entry in the list corresponds to an input parameter--and describes the parameter.
These are the recognized attributes of a parameter:
. name: The keyword associated with this parameter that enables dictionary lookup in other methods
. dataType: The data type of the value held by this parameter.
Allowed values: {'numeric', 'string', 'raster', 'rasters', 'boolean'}
. value: The default value associated with this parameter.
. required: Indicates whether this parameter is required or optional. Allowed values: {True, False}.
. displayName: A friendly name that represents this parameter in Python Adapter function's property page and other UI components
. domain: Indicates the set of allowed values for this parameter.
If specified, the property page shows a drop-down list pre-populated with these values.
This attribute is applicable only to string parameters (dataType='string').
. description: Details on this parameter that's displayed as tooltip in Python Adapter function's property page.
"""
return [
{
'name': 'raster',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': "Input Raster",
'description': "The story of this raster...",
},
{
'name': 'processing_parameter',
'dataType': 'numeric',
'value': "<default value>",
'required': False,
'displayName': "Friendly Name",
'description': "The story of this parameter...",
},
# ... add dictionaries here for additional parameters
]
def getConfiguration(self, **scalars):
"""This method can manage how the output raster is pre-constructed gets.
This method, if defined, controls aspects of parent dataset based on all scalar (non-raster) user inputs.
It's invoked after .getParameterInfo() but before .updateRasterInfo().
Args:
Use scalar['x'] to obtain the user-specified value of the scalar whose 'name' attribute is
'x' in the .getParameterInfo().
Returns:
A dictionary describing the configuration. These are the recognized configuration attributes:
. extractBands: Tuple(ints) containing indexes of bands of the input raster that need to be extracted.
The first band has index 0.
If unspecified, all bands of the input raster are available in .updatePixels()
. compositeRasters: Boolean indicating whether all input rasters are composited as a single multi-band raster.
Defaults to False. If set to True, a raster by the name 'compositeraster' is available
in .updateRasterInfo() and .updatePixels().
. inheritProperties: Bitwise-OR'd integer that indicates the set of input raster properties that are inherited
by the output raster. If unspecified, all properties are inherited.
These are the recognized values:
. 1: Pixel type
. 2: NoData
. 4: Dimensions (spatial reference, extent, and cell-size)
. 8: Resampling type
. invalidateProperties: Bitwise-OR'd integer that indicates the set of properties of the parent dataset that needs
to be invalidated. If unspecified, no property gets invalidated.
These are the recognized values:
. 1: XForm stored by the function raster dataset.
. 2: Statistics stored by the function raster dataset.
. 4: Histogram stored by the function raster dataset.
. 8: The key properties stored by the function raster dataset.
. padding: The number of extra pixels needed on each side of input pixel blocks.
. inputMask: Boolean indicating whether NoData mask arrays associated with all input rasters are needed
by this function for proper construction of output pixels and mask.
If set to True, the input masks are made available in the pixelBlocks keyword
argument in .updatePixels(). For improved performance, input masks are not made available if
attribute is unspecified.
"""
return {
'extractBands': (0, 2), # we only need the first (red) and third (blue) band.
'compositeRasters': False,
'inheritProperties': 2 | 4 | 8, # inherit everything but the pixel type (1)
'invalidateProperties': 2 | 4 | 8, # invalidate these aspects because we are modifying pixel values and updating key properties.
'padding': 0, # No padding needed. Return input pixel block as is.
'inputMask': False # Don't need mask in .updatePixels. Simply use inherited NoData.
}
def updateRasterInfo(self, **kwargs):
"""This method can update the output raster's information.
This method, if defined, gets called after .getConfiguration().
It's invoked each time a function raster dataset containing this python function is initialized.
Args:
kwargs contains all user-specified scalar values and information associated with all input rasters.
Use kwargs['x'] to obtain the user-specified value of the scalar whose 'name' attribute is 'x' in the .getParameterInfo().
If 'x' represents a raster, kwargs['x_info'] will be a dictionary representing the the information associated with the raster.
Access aspects of a particular raster's information like this: kwargs['<rasterName>_info']['<propertyName>']
where <rasterName> corresponds to a raster parameter where 'rasterName' is the value of the 'name' attribute of the parameter.
and <propertyName> is an aspect of the raster information.
If <rasterName> represents a parameter of type rasters (dataType='rasters'), then
kwargs['<rasterName>_info'] is a tuple of raster info dictionaries.
kwargs['output_info'] is always available and populated with values based on the first raster parameter and .getConfiguration().
These are the properties associated with a raster information:
. bandCount: Integer representing the number of bands in the raster.
. pixelType: String representation of pixel type of the raster. These are the allowed values:
{'t1', 't2', 't4', 'i1', 'i2', 'i4', 'u1', 'u2', 'u4', 'f4', 'f8'}
cf: http://docs.scipy.org/doc/numpy/reference/arrays.interface.html
. noData: ndarray(<bandCount> x <dtype>): An array of one value per raster band representing NoData.
. cellSize: Tuple(2 x floats) representing cell-size in the x- and y-direction.
. nativeExtent: Tuple(4 x floats) representing XMin, YMin, XMax, YMax values of the native image coordinates.
. nativeSpatialReference: Int representing the EPSG code of the native image coordinate system.
. geodataXform: XML-string representation of the associated XForm between native image and map coordinate systems.
. extent: Tuple(4 x floats) representing XMin, YMin, XMax, YMax values of the map coordinates.
. spatialReference: Int representing the EPSG code of the raster's map coordinate system.
. colormap: Tuple(ndarray(int32), 3 x ndarray(uint8)) A tuple of four arrays where the first array contains 32-bit integers
corresponding to pixel values in the indexed raster. The subsequent three arrays contain unsigned 8-bit integers
corresponding to the Red, Green, and Blue components of the mapped color. The sizes of all arrays
must match and correspond to the number of colors in the RGB image.
. rasterAttributeTable: Tuple(String, Tuple(Strings)): A tuple of a string representing the path of the attribute table,
and another tuple representing field names.
Use the information in this tuple with arcpy.da.TableToNumPyArray() to access the values.
. levelOfDetails: Int: The number of level of details in the input raster.
. origin: Tuple(Floats): Tuple of (x,y) coordinate corresponding to the origin.
. bandSelection: Boolean
. histogram: Tuple(numpy.ndarrays): Tuple where each entry is an array of histogram values of a band.
. statistics: Tuple(dicts): Tuple of statistics values.
Each entry in the tuple is a dictionary containing the following attributes of band statistics:
. minimum: Float. Approximate lowest value.
. maximum: Float. Approximate highest value.
. mean: Float. Approximate average value.
. standardDeviation: Float. Approximate measure of spread of values about the mean.
. skipFactorX: Int. Number of horizontal pixels between samples when calculating statistics.
. skipFactorY: Int. Number of vertical pixels between samples when calculating statistics.
Returns:
A dictionary containing output raster info.
This method can update the values of the dictionary in kwargs['output_info'] depending on the kind of
operation in .updatePixels()
Note:
. The tuple in cellSize and maximumCellSize attributes can be used to construct an arcpy.Point object.
. The tuple in extent, nativeExtent and origin attributes can be used to construct an arcpy.Extent object.
. The epsg code in nativeSpatialReference and spatialReference attributes can be used to construct an
arcpy.SpatialReference() object.
"""
kwargs['output_info']['bandCount'] = 1 # output is a single band raster
kwargs['output_info']['pixelType'] = 'f4' # ... with floating-point pixel values.
kwargs['output_info']['statistics'] = () # invalidate any statistics
kwargs['output_info']['histogram'] = () # invalidate any histogram
return kwargs
def updatePixels(self, tlc, shape, props, **pixelBlocks):
"""This method can provide output pixels based on pixel blocks associated with all input rasters.
A python raster function that doesn't actively modify output pixel values doesn't need to define this method.
Args:
. tlc: Tuple(2 x floats) representing the coordinates of the top-left corner of the pixel request.
. shape: Tuple(ints) representing the shape of ndarray that defines the output pixel block.
For a single-band pixel block, the tuple contains two ints (rows, columns).
For multi-band output raster, the tuple defines a three-dimensional array (bands, rows, columns).
The shape associated with the outgoing pixel block and mask must match this argument's value.
. props: A dictionary containing properties that define the output raster from which
a pixel block--of dimension and location is defined by the 'shape' and 'tlc' arguments--is being requested.
These are the available attributes in this dictionary:
. extent: Tuple(4 x floats) representing XMin, YMin, XMax, YMax values of the output
raster's map coordinates.
. pixelType: String representation of pixel type of the raster. These are the allowed values:
{'t1', 't2', 't4', 'i1', 'i2', 'i4', 'u1', 'u2', 'u4', 'f4', 'f8'}
cf: http://docs.scipy.org/doc/numpy/reference/arrays.interface.html
. spatialReference: Int representing the EPSG code of the output raster's map coordinate system.
. cellSize: Tuple(2 x floats) representing cell-size in the x- and y-direction.
. width: Number of columns of pixels in the output raster.
. height: Number of rows of pixels in the output raster.
. noData: TODO.
. pixelBlocks: Keyword argument containing pixels and mask associated with each input raster.
For a raster parameter with dataType='raster' and name='x', pixelBlocks['x_pixels'] and
pixelBlocks['x_mask'] are numpy.ndarrays of pixel and mask values for that input raster.
For a parameter of type rasters (dataType='rasters'), these are tuples of ndarrays--one entry per raster.
The arrays are three-dimensional for multiband rasters.
Note:
. The pixelBlocks dictionary does not contain any scalars parameters.
Returns:
A dictionary with a numpy array containing pixel values in the 'output_pixels' key and,
optionally, an array representing the mask in the 'output_mask' key. The shape of both arrays
must match the 'shape' argument.
"""
if not 'raster_pixels' in pixelBlocks:
raise Exception("No input raster was provided.")
raise Exception("{0}".format(shape))
if len(shape) != 3 or shape[1] < 2:
raise Exception("Input raster must have at least two bands.")
inputBlock = pixelBlocks['raster_pixels'] # get pixels of an raster
red = np.array(inputBlock[0], 'f4') # assuming red's the first band
blue = np.array(inputBlock[1], 'f4') # assuming blue's the second band... per extractBands in .getConfiguration()
outBlock = (red + blue) / 2.0 # this is just an example. nothing complicated here.
pixelBlocks['output_pixels'] = outBlock.astype(props['pixelType'])
return pixelBlocks
def updateKeyMetadata(self, names, bandIndex, **keyMetadata):
"""This method can update dataset-level or band-level key metadata.
When a request for a dataset's key metadata is made, this method (if present) allows the python raster function
to invalidate or overwrite specific requests.
Args:
. names: A tuple containing names of the properties being requested. An empty tuple
indicates that all properties are being requested.
. bandIndex: A zero-based integer representing the raster band for which key metadata is being requested.
bandIndex == -1 indicates that the request is for dataset-level key properties.
. keyMetadata: Keyword argument containing all currently known metadata (or a subset as defined by the names tuple).
Returns:
The updated keyMetadata dictionary.
"""
if bandIndex == -1: # dataset-level properties
keyMetadata['datatype'] = 'Processed' # outgoing dataset is now 'Processed'
elif bandIndex == 0: # properties for the first band
keyMetadata['wavelengthmin'] = None # reset inapplicable band-specific key metadata
keyMetadata['wavelengthmax'] = None
keyMetadata['bandname'] = 'Red_and_Blue' # ... or something meaningful
return keyMetadata
def isLicensed(self, **productInfo):
"""This method, if defined, indicates whether this python raster function is licensed to execute.
This method is invoked soon after the function object is constructed. It enables the python
raster function to halt execution--given information about the parent product and the context of execution.
It also allows the function to, optionally, indicate the expected product-level and the extension that
must be available before execution can proceed.
Args:
The productInfo keyword argument describes the current execution environment.
It contains the following attributes:
. productName: String representing the name of the product {'Desktop', 'Server', 'Engine', ...}
. version: The version string associated with the product
. path: String conntaining the installation path of the product.
. major: An integer representing the major version number of the product.
. minor: A floating-point number representing the minor version number of the product.
. build: An integer represening the build number associated with the product.
. spNumber: An integer representing the service pack number, if applicable.
. spBuild: An integer representing the service pack build, if applicable.
Returns:
A dictionary containing an attribute that indicates whether licensing checks specific to this
python raster function has passed--and, optional attributes that control additional licensing checks
enforced by the Python Adapter:
. okToRun: [Required] Boolean indicating whether it's OK to proceed with the use of this
raster function object. This attribute must be present and, specifically,
set to False for execution to halt. Otherwise, it's assumed to be True (and, that it's OK to proceed).
. message: [Optional] String representing the message to be displayed to the user or logged
when okToRun is False.
. productLevel: [Optional] String representing the product license-level expected from the parent application.
Allowed values include {'Basic', 'Standard', 'Advanced'}.
. extension: [Optional] String representing the name of the extension that must be available before
the Python Adapter is allowed to use this raster function. The set of recognized extension names
are enumerated here: http://resources.arcgis.com/en/help/main/10.2/index.html#//002z0000000z000000.
"""
major = productInfo.get('major', 0)
minor = productInfo.get('minor', 0.0)
build = productInfo.get('build', 0)
return {
'okToRun': major >= 10 and minor >= 3.0 and build >= 4276,
'message': "The python raster function is only compatible with ArcGIS 10.3 build 4276",
'productLevel': 'Standard',
'extension': 'Image'
}
| apache-2.0 | 6,065,817,760,460,987,000 | 65.613419 | 150 | 0.597978 | false | 5.24876 | true | false | false |
time-river/wander | practice/urllib/黑板客爬虫闯关/third.py | 1 | 4637 | '''
爬虫闯关第一关 http://www.heibanke.com/lesson/crawler_ex00/
提示技能: 模拟登陆、csrf-token
来自 http://www.zhihu.com/question/20899988 黑板客的回答
这里成功实现了模拟登陆,写一下思路:
1.提交登陆表单,查看POST方法文件的消息头、Cookie、参数
消息头中的请求头没啥有价值的内容,请求网址与登陆网址是一样的
Cookie倒是引起了注意。清除Cookie,直接打开登陆界面,服务器响应了一个同样名称的Cookie
表单参数里有这三个选项:
"csrfmiddlewaretoken" / "username" / "password"
"csrfmiddlewaretoken"怎么来的呢?查看登陆页面的HTML源码文件,发现<form>后有这么一行:
<input type='hidden' name='csrfmiddlewaretoken' value='3TwYYML662nMWaafvVDWg8pp6RVCAS1d' />
2.模拟登陆步骤:
a.opener.open(auth_url),得到Cookie与csrfmiddlewaretoken
b.构造请求体req,包含Cookie的headers、有csrfmiddlewaretoken/username/password的data
b.opener.open(req),得到Cookie
'''
from urllib import request
from urllib import parse
from urllib import error
from http import cookiejar
import re
class third:
def __init__(self):
self.username = "1234567"
self.password = "1234567890"
self.auth_url = "http://www.heibanke.com/accounts/login"
self.url = "http://www.heibanke.com/lesson/crawler_ex02/"
self.csrfmiddlewaretoken = ""
def __get_cookies(self, req):
cookies = cookiejar.CookieJar()
handler = request.HTTPCookieProcessor(cookies)
opener = request.build_opener(handler)
try:
with opener.open(req) as f:
if f.code == 200:
pattern = re.compile(r"<input.*?type='hidden'.*?name='csrfmiddlewaretoken'.*?value='(.*?)'.*>")
try:
self.csrfmiddlewaretoken = pattern.search(f.read().decode("utf-8")).group(1)
print("Achieved cookies and csrfmiddlewaretoken sucessfully")
except:
print("Achieved cookies sucessfully")
return cookies
else:
print("Lost cookies")
except error.URLError as e:
if hasattr(e, "reason"):
print ("We failed to reach a server. Please check your url and read the Reason")
print ("Reason: {}".format(e.reason))
elif hasattr(e, "code"):
print("The server couldn't fulfill the request.")
print("Error code: {}".format(e.code))
exit()
def __request(self, url, cookies=None):
form = {
"csrfmiddlewaretoken": self.csrfmiddlewaretoken,
"username": self.username,
"password": self.password
}
data = parse.urlencode(form).encode("utf-8")
headers = {}
header_cookie = ""
for cookie in cookies:
header_cookie = "{} {}={};".format(header_cookie, cookie.name, cookie.value)
headers["Cookie"] = header_cookie.strip(' ;')
req = request.Request(url, data, headers=headers)
return req
def __auth_cookies(self, pre_auth_cookies):
req = self.__request(self.auth_url, pre_auth_cookies)
cookies = self.__get_cookies(req)
return cookies
def guess_passwd(self, auth_cookies):
for i in range(31):
self.password = i
req = self.__request(self.url, auth_cookies)
print("正在猜测密码为{}".format(self.password))
try:
with request.urlopen(req) as f:
body = f.read().decode("utf-8")
if not "您输入的密码错误" in body:
print(body)
print("密码为{}".format(i))
break
except error.URLError as e:
if hasattr(e, "reason"):
print ("We failed to reach a server. Please check your url and read the Reason")
print ("Reason: {}".format(e.reason))
elif hasattr(e, "code"):
print("The server couldn't fulfill the request.")
print("Error code: {}".format(e.code))
return
def start(self):
pre_auth_cookies = self.__get_cookies(self.auth_url)
auth_cookies = self.__auth_cookies(pre_auth_cookies)
self.guess_passwd(auth_cookies)
spider = third()
spider.start()
| mit | -7,444,610,969,289,944,000 | 38.820755 | 115 | 0.562189 | false | 3.307994 | false | false | false |
TariqAHassan/BioVida | biovida/images/_interface_support/dicom_data_to_dict.py | 1 | 3569 | # coding: utf-8
"""
Convert DICOM Data into a Python Dictionary
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Also see: https://github.com/darcymason/pydicom/issues/319.
import os
from biovida.support_tools.support_tools import cln, dicom
def _extract_numeric(value):
"""
:param value:
:return:
"""
return float("".join((i for i in value if i.isdigit() or i == '.')))
def parse_age(value):
"""
:param value:
:return:
"""
if not isinstance(value, str):
raise TypeError('`value` must be a string.')
elif len(value) > 4:
return value
if 'y' in value.lower():
return _extract_numeric(value)
elif 'm' in value.lower():
return _extract_numeric(value) / 12.0
else:
return value
def parse_string_to_tuple(value):
"""
:param value:
:type value: ``str``
:return:
"""
braces = [['[', ']'], ['(', ')']]
for (left, right) in braces:
if left in value and right in value:
value_split = value.replace(left, "").replace(right, "").split(",")
value_split_cln = list(filter(None, map(cln, value_split)))
if len(value_split_cln) == 0:
return None
try:
to_return = tuple(map(_extract_numeric, value_split_cln))
except:
to_return = tuple(value_split_cln)
return to_return[0] if len(to_return) == 1 else to_return
else:
raise ValueError("Cannot convert `value` to a tuple.")
def dicom_value_parse(key, value):
"""
Try to convert ``value`` to a numeric or tuple of numerics.
:param key:
:param value:
:return:
"""
value = cln(str(value).replace("\'", "").replace("\"", ""))
if not len(value) or value.lower() == 'none':
return None
if key.lower().endswith(' age') or key == 'PatientAge':
try:
return parse_age(value)
except:
return value
else:
try:
return int(value)
except:
try:
return float(value)
except:
try:
return parse_string_to_tuple(value)
except:
return value
def dicom_object_dict_gen(dicom_object):
"""
:param dicom_object:
:type dicom_object: ``dicom.FileDataset``
:return:
"""
d = dict()
for k in dicom_object.__dir__():
if not k.startswith("__") and k != 'PixelData':
try:
value = dicom_object.data_element(k).value
if type(value).__name__ != 'Sequence':
d[k] = dicom_value_parse(key=k, value=value)
except:
pass
return d
def dicom_to_dict(dicom_file):
"""
Convert the metadata associated with ``dicom_file`` into a python dictionary
:param dicom_file: a path to a dicom file or the yield of ``dicom.read_file(FILE_PATH)``.
:type dicom_file: ``FileDataset`` or ``str``
:return: a dictionary with the dicom meta data.
:rtype: ``dict``
"""
if isinstance(dicom_file, str):
if not os.path.isfile(dicom_file):
raise FileNotFoundError("Could not locate '{0}'.".format(dicom_file))
dicom_object = dicom.read_file(dicom_file)
elif type(dicom_file).__name__ == 'FileDataset':
dicom_object = dicom_file
else:
raise TypeError("`dicom_file` must be of type `dicom.FileDataset` or a string.")
return dicom_object_dict_gen(dicom_object)
| bsd-3-clause | -6,127,188,851,917,151,000 | 25.437037 | 93 | 0.538526 | false | 3.702282 | false | false | false |
kazeevn/Canecycle-of-Caerbannog | canecycle/tests/cache_test.py | 1 | 2595 | import os.path
from unittest import TestCase
from tempfile import NamedTemporaryFile
from itertools import izip, imap
import numpy as np
from canecycle.reader import from_shad_lsml
from canecycle.cache import CacheWriter, CacheReader
class TestReader(TestCase):
test_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"train_except.txt")
def test_cache_write_and_read(self):
cache_file = './testing.cache'
hash_size = 2**20
reader = from_shad_lsml(self.test_file, hash_size)
reader.restart(0)
cache_writer = CacheWriter(60, hash_size)
cache_writer.open(cache_file)
for item in reader:
cache_writer.write_item(item)
cache_writer.close()
reader.restart(3)
cache_reader = CacheReader(cache_file)
cache_reader.restart(3)
self.assertEqual(hash_size, cache_reader.get_features_count())
for read_item, cached_item in izip(reader, cache_reader):
self.assertEqual(read_item.label, cached_item.label)
self.assertEqual(read_item.weight, cached_item.weight)
np.testing.assert_array_equal(
read_item.data, cached_item.data)
np.testing.assert_array_equal(
read_item.indices, cached_item.indices)
reader.restart(-3)
cache_reader.restart(-3)
for read_item, cached_item in izip(reader, cache_reader):
self.assertEqual(read_item.label, cached_item.label)
self.assertEqual(read_item.weight, cached_item.weight)
np.testing.assert_array_equal(
read_item.data, cached_item.data)
np.testing.assert_array_equal(
read_item.indices, cached_item.indices)
reader.close()
cache_reader.restart(-4)
self.assertEqual(sum(imap(lambda item: 1, cache_reader)), 250)
cache_reader.restart(-2)
self.assertEqual(sum(imap(lambda item: 1, cache_reader)), 500)
cache_reader.restart(-100)
self.assertEqual(sum(imap(lambda item: 1, cache_reader)), 10)
self.assertEqual(sum(imap(lambda item: 1, cache_reader)), 0)
cache_reader.restart(4)
self.assertEqual(sum(imap(lambda item: 1, cache_reader)), 750)
cache_reader.restart(2)
self.assertEqual(sum(imap(lambda item: 1, cache_reader)), 500)
cache_reader.restart(100)
self.assertEqual(sum(imap(lambda item: 1, cache_reader)), 990)
self.assertEqual(sum(imap(lambda item: 1, cache_reader)), 0)
| gpl-3.0 | -91,430,640,367,480,530 | 37.731343 | 73 | 0.630058 | false | 3.75 | true | false | false |
geobricks/pgeorest | pgeorest/rest/browse_trmm2.py | 1 | 2691 | import json
from flask import Blueprint
from flask import Response
from flask.ext.cors import cross_origin
# from pgeo.config.settings import read_config_file_json
from pgeo.error.custom_exceptions import PGeoException
from pgeo.dataproviders import trmm2 as t
browse_trmm2 = Blueprint('browse_trmm2', __name__)
# conf = read_config_file_json('trmm2', 'data_providers')
@browse_trmm2.route('/')
@cross_origin(origins='*')
def list_years_service():
try:
out = t.list_years()
return Response(json.dumps(out), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@browse_trmm2.route('/<year>')
@browse_trmm2.route('/<year>/')
@cross_origin(origins='*')
def list_months_service(year):
try:
out = t.list_months(year)
return Response(json.dumps(out), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@browse_trmm2.route('/<year>/<month>')
@browse_trmm2.route('/<year>/<month>/')
@cross_origin(origins='*')
def list_days_service(year, month):
try:
out = t.list_days(year, month)
return Response(json.dumps(out), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@browse_trmm2.route('/<year>/<month>/<day>')
@browse_trmm2.route('/<year>/<month>/<day>/')
@cross_origin(origins='*')
def list_layers_service(year, month, day):
try:
out = t.list_layers(year, month, day)
return Response(json.dumps(out), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@browse_trmm2.route('/<year>/<month>/<from_day>/<to_day>')
@browse_trmm2.route('/<year>/<month>/<from_day>/<to_day>/')
@cross_origin(origins='*')
def list_layers_subset_service(year, month, from_day, to_day):
try:
out = t.list_layers_subset(year, month, from_day, to_day)
return Response(json.dumps(out), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@browse_trmm2.route('/layers/<year>/<month>')
@browse_trmm2.route('/layers/<year>/<month>/')
@cross_origin(origins='*')
def list_layers_month_subset_service(year, month):
try:
out = t.list_layers_month_subset(year, month)
return Response(json.dumps(out), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code()) | gpl-2.0 | -503,901,827,237,316,000 | 34.421053 | 88 | 0.674842 | false | 3.136364 | false | false | false |
sergiusens/snapcraft | tests/unit/sources/test_mercurial.py | 2 | 8603 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import shutil
from unittest import mock
from testtools.matchers import Equals
from snapcraft.internal import sources
from tests import unit
from tests.subprocess_utils import call, call_with_output
# LP: #1733584
class TestMercurial(unit.sources.SourceTestCase): # type: ignore
def setUp(self):
super().setUp()
patcher = mock.patch("snapcraft.sources.Mercurial._get_source_details")
self.mock_get_source_details = patcher.start()
self.mock_get_source_details.return_value = ""
self.addCleanup(patcher.stop)
def test_pull(self):
hg = sources.Mercurial("hg://my-source", "source_dir")
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "clone", "hg://my-source", "source_dir"]
)
def test_pull_branch(self):
hg = sources.Mercurial(
"hg://my-source", "source_dir", source_branch="my-branch"
)
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "clone", "-u", "my-branch", "hg://my-source", "source_dir"]
)
def test_pull_tag(self):
hg = sources.Mercurial("hg://my-source", "source_dir", source_tag="tag")
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "clone", "-u", "tag", "hg://my-source", "source_dir"]
)
def test_pull_commit(self):
hg = sources.Mercurial("hg://my-source", "source_dir", source_commit="2")
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "clone", "-u", "2", "hg://my-source", "source_dir"]
)
def test_pull_existing(self):
self.mock_path_exists.return_value = True
hg = sources.Mercurial("hg://my-source", "source_dir")
hg.pull()
self.mock_run.assert_called_once_with(["hg", "pull", "hg://my-source"])
def test_pull_existing_with_tag(self):
self.mock_path_exists.return_value = True
hg = sources.Mercurial("hg://my-source", "source_dir", source_tag="tag")
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "pull", "-r", "tag", "hg://my-source"]
)
def test_pull_existing_with_commit(self):
self.mock_path_exists.return_value = True
hg = sources.Mercurial("hg://my-source", "source_dir", source_commit="2")
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "pull", "-r", "2", "hg://my-source"]
)
def test_pull_existing_with_branch(self):
self.mock_path_exists.return_value = True
hg = sources.Mercurial(
"hg://my-source", "source_dir", source_branch="my-branch"
)
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "pull", "-b", "my-branch", "hg://my-source"]
)
def test_init_with_source_branch_and_tag_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceIncompatibleOptionsError,
sources.Mercurial,
"hg://mysource",
"source_dir",
source_tag="tag",
source_branch="branch",
)
self.assertThat(raised.source_type, Equals("mercurial"))
self.assertThat(raised.options, Equals(["source-tag", "source-branch"]))
def test_init_with_source_commit_and_tag_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceIncompatibleOptionsError,
sources.Mercurial,
"hg://mysource",
"source_dir",
source_commit="2",
source_tag="tag",
)
self.assertThat(raised.source_type, Equals("mercurial"))
self.assertThat(raised.options, Equals(["source-tag", "source-commit"]))
def test_init_with_source_commit_and_branch_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceIncompatibleOptionsError,
sources.Mercurial,
"hg://mysource",
"source_dir",
source_commit="2",
source_branch="branch",
)
self.assertThat(raised.source_type, Equals("mercurial"))
self.assertThat(raised.options, Equals(["source-branch", "source-commit"]))
def test_init_with_source_depth_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceInvalidOptionError,
sources.Mercurial,
"hg://mysource",
"source_dir",
source_depth=2,
)
self.assertThat(raised.source_type, Equals("mercurial"))
self.assertThat(raised.option, Equals("source-depth"))
def test_source_checksum_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceInvalidOptionError,
sources.Mercurial,
"hg://mysource",
"source_dir",
source_checksum="md5/d9210476aac5f367b14e513bdefdee08",
)
self.assertThat(raised.source_type, Equals("mercurial"))
self.assertThat(raised.option, Equals("source-checksum"))
def test_has_source_handler_entry(self):
self.assertTrue(sources._source_handler["mercurial"] is sources.Mercurial)
class MercurialBaseTestCase(unit.TestCase):
def rm_dir(self, dir):
if os.path.exists(dir):
shutil.rmtree(dir)
def clean_dir(self, dir):
self.rm_dir(dir)
os.mkdir(dir)
self.addCleanup(self.rm_dir, dir)
def clone_repo(self, repo, tree):
self.clean_dir(tree)
call(["hg", "clone", repo, tree])
os.chdir(tree)
def add_file(self, filename, body, message):
with open(filename, "w") as fp:
fp.write(body)
call(["hg", "add", filename])
call(["hg", "commit", "-am", message])
def check_file_contents(self, path, expected):
body = None
with open(path) as fp:
body = fp.read()
self.assertThat(body, Equals(expected))
class MercurialDetailsTestCase(MercurialBaseTestCase):
def setUp(self):
super().setUp()
self.working_tree = "hg-test"
self.source_dir = "hg-checkout"
self.clean_dir(self.working_tree)
self.clean_dir(self.source_dir)
os.chdir(self.working_tree)
call(["hg", "init"])
with open("testing", "w") as fp:
fp.write("testing")
call(["hg", "add", "testing"])
call(["hg", "commit", "-m", "testing", "-u", "Test User <[email protected]>"])
call(["hg", "tag", "-u", "test", "test-tag"])
self.expected_commit = call_with_output(["hg", "id"]).split()[0]
self.expected_branch = call_with_output(["hg", "branch"])
self.expected_tag = "test-tag"
os.chdir("..")
self.hg = sources.Mercurial(self.working_tree, self.source_dir, silent=True)
self.hg.pull()
self.source_details = self.hg._get_source_details()
def test_hg_details_commit(self):
self.assertThat(
self.source_details["source-commit"], Equals(self.expected_commit)
)
def test_hg_details_branch(self):
self.clean_dir(self.source_dir)
self.hg = sources.Mercurial(
self.working_tree, self.source_dir, silent=True, source_branch="default"
)
self.hg.pull()
self.source_details = self.hg._get_source_details()
self.assertThat(
self.source_details["source-branch"], Equals(self.expected_branch)
)
def test_hg_details_tag(self):
self.clean_dir(self.source_dir)
self.hg = sources.Mercurial(
self.working_tree, self.source_dir, silent=True, source_tag="test-tag"
)
self.hg.pull()
self.source_details = self.hg._get_source_details()
self.assertThat(self.source_details["source-tag"], Equals(self.expected_tag))
| gpl-3.0 | 4,370,385,673,678,980,000 | 32.474708 | 85 | 0.598628 | false | 3.659294 | true | false | false |
srgblnch/LinacDS | LinacData.py | 1 | 222153 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "Lothar Krause and Sergi Blanch-Torne"
__maintainer__ = "Sergi Blanch-Torne"
__copyright__ = "Copyright 2015, CELLS / ALBA Synchrotron"
__license__ = "GPLv3+"
"""Device Server to control the Alba's Linac manufactured by Thales."""
__all__ = ["LinacData", "LinacDataClass", "main"]
__docformat__ = 'restructuredtext'
import PyTango
from PyTango import AttrQuality
import sys
# Add additional import
# PROTECTED REGION ID(LinacData.additionnal_import) ---
from copy import copy
from ctypes import c_uint16, c_uint8, c_float, c_int16
import fcntl
import json # FIXME: temporal to dump dictionary on the relations collection
from numpy import uint16, uint8, float32, int16
import pprint
import psutil
import Queue
import socket
import struct
import time
import tcpblock
import threading
import traceback
from types import StringType
from constants import *
from LinacAttrs import (LinacException, CommandExc, AttrExc,
binaryByte, hex_dump)
from LinacAttrs import (EnumerationAttr, PLCAttr, InternalAttr, MeaningAttr,
AutoStopAttr, AutoStopParameter, HistoryAttr,
GroupAttr, LogicAttr)
from LinacAttrs.LinacFeatures import CircularBuffer, HistoryBuffer, EventCtr
class release:
author = 'Lothar Krause &'\
' Sergi Blanch-Torne <[email protected]>'
hexversion = (((MAJOR_VERSION << 8) | MINOR_VERSION) << 8) | BUILD_VERSION
__str__ = lambda self: hex(hexversion)
if False:
TYPE_MAP = {PyTango.DevUChar: c_uint8,
PyTango.DevShort: c_int16,
PyTango.DevFloat: c_float,
PyTango.DevDouble: c_float,
}
else:
TYPE_MAP = {PyTango.DevUChar: ('B', 1),
PyTango.DevShort: ('h', 2),
PyTango.DevFloat: ('f', 4),
PyTango.DevDouble: ('f', 4),
# the PLCs only use floats of 4 bytes
}
def john(sls):
'''used to encode the messages shown for each state code
'''
if type(sls) == dict:
return '\n'+''.join('%d:%s\n' % (t, sls[t]) for t in sls.keys())
else:
return '\n'+''.join('%d:%s\n' % t for t in enumerate(sls))
def latin1(x):
return x.decode('utf-8').replace(u'\u2070', u'\u00b0').\
replace(u'\u03bc', u'\u00b5').encode('latin1')
class AttrList(object):
'''Manages dynamic attributes and contains methods for conveniently adding
attributes to a running TANGO device.
'''
def __init__(self, device):
super(AttrList, self).__init__()
self.impl = device
self._db20_size = self.impl.ReadSize-self.impl.WriteSize
self._db22_size = self.impl.WriteSize
self.alist = list()
self.locals_ = {}
self._relations = {}
self._buider = None
self._fileParsed = threading.Event()
self._fileParsed.clear()
self.globals_ = globals()
self.globals_.update({
'DEVICE': self.impl,
'LIST': self,
'Attr': self.add_AttrAddr,
'AttrAddr': self.add_AttrAddr,
'AttrBit': self.add_AttrAddrBit,
'GrpBit': self.add_AttrGrpBit,
'AttrLogic': self.add_AttrLogic,
'AttrRampeable': self.add_AttrRampeable,
#'AttrLock_ST': self.add_AttrLock_ST,
#'AttrLocking': self.add_AttrLocking,
#'AttrHeartBeat': self.add_AttrHeartBeat,
'AttrPLC': self.add_AttrPLC,
'AttrEnumeration': self.add_AttrEnumeration,
# 'john' : john,
})
def add_Attr(self, name, T, rfun=None, wfun=None, label=None, desc=None,
minValue=None, maxValue=None, unit=None, format=None,
memorized=False, logLevel=None, xdim=0):
if wfun:
if xdim == 0:
attr = PyTango.Attr(name, T, PyTango.READ_WRITE)
else:
self.impl.error_stream("Not supported write attribute in "
"SPECTRUMs. %s will be readonly."
% (name))
attr = PyTango.SpectrumAttr(name, T, PyTango.READ_WRITE, xdim)
else:
if xdim == 0:
attr = PyTango.Attr(name, T, PyTango.READ)
else:
attr = PyTango.SpectrumAttr(name, T, PyTango.READ, xdim)
if logLevel is not None:
self.impl._getAttrStruct(name).logLevel = logLevel
aprop = PyTango.UserDefaultAttrProp()
if unit is not None:
aprop.set_unit(latin1(unit))
if minValue is not None:
aprop.set_min_value(str(minValue))
if maxValue is not None:
aprop.set_max_value(str(maxValue))
if format is not None:
attrStruct = self.impl._getAttrStruct(name)
attrStruct['format'] = str(format)
aprop.set_format(latin1(format))
if desc is not None:
aprop.set_description(latin1(desc))
if label is not None:
aprop.set_label(latin1(label))
if memorized:
attr.set_memorized()
attr.set_memorized_init(True)
self.impl.info_stream("Making %s memorized (%s,%s)"
% (name, attr.get_memorized(),
attr.get_memorized_init()))
attr.set_default_properties(aprop)
rfun = AttrExc(rfun)
try:
if wfun:
wfun = AttrExc(wfun)
except Exception as e:
self.impl.error_stream("Attribute %s build exception: %s"
% (name, e))
self.impl.add_attribute(attr, r_meth=rfun, w_meth=wfun)
if name in self.impl._plcAttrs and \
EVENTS in self.impl._plcAttrs[name]:
self.impl.set_change_event(name, True, False)
elif name in self.impl._internalAttrs and \
EVENTS in self.impl._internalAttrs[name]:
self.impl.set_change_event(name, True, False)
self.alist.append(attr)
return attr
def __mapTypes(self, attrType):
# ugly hack needed for SOLEILs archiving system
if attrType == PyTango.DevFloat:
return PyTango.DevDouble
elif attrType == PyTango.DevUChar:
return PyTango.DevShort
else:
return attrType
def add_AttrAddr(self, name, T, read_addr=None, write_addr=None,
meanings=None, qualities=None, events=None,
formula=None, label=None, desc=None,
readback=None, setpoint=None, switch=None,
IamChecker=None, minValue=None, maxValue=None,
*args, **kwargs):
'''This method is a most general builder of dynamic attributes, for RO
as well as for RW depending on if it's provided a write address.
There are other optional parameters to configure some special
characteristics.
With the meaning parameter, a secondary attribute to the give one
using the name is created (with the suffix *_Status and They share
other parameters like qualities and events). The numerical attribute
can be used in formulas, alarms and any other machine-like system,
but this secondary attribute is an DevString who concatenates the
read value with an string specified in a dictionary in side this
meaning parameter in order to provide a human-readable message to
understand that value.
All the Tango attributes have characteristics known as qualities
(like others like format, units, and so on) used to provide a 5
level state-like information. They can by 'invalid', 'valid',
'changing', 'warning' or 'alarm'. With the dictionary provided to
the parameter qualities it can be defined some ranges or some
discrete values. The structure splits between this two situations:
- Continuous ranges: that is mainly used for DevDoubles but also
integers. As an example of the dictionary:
- WARNING:{ABSOLUTE:{BELOW:15,ABOVE:80}}
This will show VALID quality between 15 and 80, but warning
if in absolute terms the read value goes out this thresholds.
- Discrete values: that is used mainly in the state-like attributes
and it will establish the quality by an equality. As example:
- ALARM:[0],
WARNING:[1,2,3,5,6,7],
CHANGING:[4]
Suppose a discrete attribute with values between 0 and 8. Then
when value is 8 will be VALID, between 0 and 7 will be WARNING
with the exception at 4 that will show CHANGING.
Next of the parameters is events and with this is configured the
behaviour of the attribute to emit events. Simply by passing a
dictionary (even void like {}) the attribute will be configured to
emit events. In this simplest case events will be emitted if the
value has changed from last reading. But for DevDouble is used a
key THRESHOLD to indicate that read changes below it will not
produce events (like below the format representation). For such
thing is used a circular buffer that collects reads and its mean
is used to compare with a new reading to decide if an event has to
be emitted or not.
Another parameter is the formula. This is mainly used with the
DevBooleans but it's possible for any other. It's again a dictionary
with two possible keys 'read' and/or 'write' and their items shall
be assessed strings in running time that would 'transform' a
reading.
The set of arguments about readback, setpoint and switch are there
to store defined relations between attributes. That is, to allow the
setpoint (that has a read and write addresses) to know if there is
another read only attribute that does the measure of what the
setpoint sets. Also this readback may like to know about the
setpoint and if the element is switch on or off.
In the attribute description, one key argument (default None) is:
'IamChecker'. It is made to, if it contains a list of valid read
values, add to the tcpblock reader to decide if a received block
has a valid structure or not.
'''
self.__traceAttrAddr(name, T, readAddr=read_addr, writeAddr=write_addr)
tango_T = self.__mapTypes(T)
try:
read_addr = self.__check_addresses_and_block_sizes(
name, read_addr, write_addr)
except IndexError:
return
self._prepareAttribute(name, T, readAddr=read_addr,
writeAddr=write_addr, formula=formula,
readback=readback, setpoint=setpoint,
switch=switch, label=label, description=desc,
minValue=minValue, maxValue=maxValue,
*args, **kwargs)
rfun = self.__getAttrMethod('read', name)
if write_addr is not None:
wfun = self.__getAttrMethod('write', name)
else:
wfun = None
# TODO: they are not necessary right now
#if readback is not None:
# self.append2relations(name, READBACK, readback)
#if setpoint is not None:
# self.append2relations(name, SETPOINT, setpoint)
#if switch is not None:
# self.append2relations(name, SWITCH, switch)
self._prepareEvents(name, events)
if IamChecker is not None:
try:
self.impl.setChecker(read_addr, IamChecker)
except Exception as e:
self.impl.error_stream("%s cannot be added in the checker set "
"due to:\n%s" % (name, e))
if meanings is not None:
return self._prepareAttrWithMeaning(name, tango_T, meanings,
qualities, rfun, wfun,
**kwargs)
elif qualities is not None:
return self._prepareAttrWithQualities(name, tango_T, qualities,
rfun, wfun, label=label,
**kwargs)
else:
return self.add_Attr(name, tango_T, rfun, wfun, minValue=minValue,
maxValue=maxValue, **kwargs)
def add_AttrAddrBit(self, name, read_addr=None, read_bit=0,
write_addr=None, write_bit=None, meanings=None,
qualities=None, events=None, isRst=False,
activeRst_t=None, formula=None, switchDescriptor=None,
readback=None, setpoint=None, logLevel=None,
label=None, desc=None, minValue=None, maxValue=None,
*args, **kwargs):
'''This method is a builder of a boolean dynamic attribute, even for RO
than for RW. There are many optional parameters.
With the meanings argument, moreover the DevBoolean a DevString
attribute will be also generated (suffixed *_Status) with the same
event and qualities configuration if they are, and will have a
human readable message from the concatenation of the value and its
meaning.
There are also boolean attributes with a reset feature, those are
attributes that can be triggered and after some short period of time
they are automatically set back. The time with this reset active
can be generic (and uses ACTIVE_RESET_T from the constants) or can
be specified for a particular attribute using the activeRst_t.
Another feature implemented for this type of attributes is the
formula. That requires a dictionary with keys:
+ 'read' | 'write': they contain an string to be evaluated when
value changes like a filter or to avoid an action based on some
condition.
For example, this is used to avoid to power up klystrons if there
is an interlock, or to switch of the led when an interlock occurs.
{'read':'VALUE and '\
'self._plcAttrs[\'HVPS_ST\'][\'read_value\'] == 9 and '\
'self._plcAttrs[\'Pulse_ST\'][\'read_value\'] == 8',
'write':'VALUE and '\
'self._plcAttrs[\'HVPS_ST\'][\'read_value\'] == 8 and '\
'self._plcAttrs[\'Pulse_ST\'][\'read_value\'] == 7'
},
The latest feature implemented has relation with the rampeable
attributes and this is a secondary configuration for the
AttrRampeable DevDouble attributes, but in this case the feature
to complain is to manage ramping on the booleans that power on and
off those elements.
The ramp itself shall be defined in the DevDouble attribute, the
switch attribute only needs to know where to send this when state
changes.
The switchDescriptor is a dictionary with keys:
+ ATTR2RAMP: the name of the numerical attribute involved with the
state transition.
+ WHENON | WHENOFF: keys to differentiate action interval between
the two possible state changes.
- FROM: initial value of the state change ramp
- TO: final value of the state change ramp
About those two last keys, they can be both or only one.
+ AUTOSTOP: in case it has also the autostop feature, this is used
to identify the buffer to clean when transition from off to on.
The set of arguments about readback, setpoint and switch are there
to store defined relations between attributes. That is, to allow the
setpoint (that has a read and write addresses) to know if there is
another read only attribute that does the measure of what the
setpoint sets. Also this readback may like to know about the
setpoint and if the element is switch on or off.
'''
self.__traceAttrAddr(name, PyTango.DevBoolean, readAddr=read_addr,
readBit=read_bit, writeAddr=write_addr,
writeBit=write_bit)
try:
read_addr = self.__check_addresses_and_block_sizes(
name, read_addr, write_addr)
except IndexError:
return
self._prepareAttribute(name, PyTango.DevBoolean, readAddr=read_addr,
readBit=read_bit, writeAddr=write_addr,
writeBit=write_bit, formula=formula,
readback=readback, setpoint=setpoint,
label=label, description=desc,
minValue=minValue, maxValue=maxValue,
*args, **kwargs)
rfun = self.__getAttrMethod('read', name, isBit=True)
if write_addr is not None:
wfun = self.__getAttrMethod('write', name, isBit=True)
if write_bit is None:
write_bit = read_bit
else:
wfun = None
if isRst:
self.impl._plcAttrs[name][ISRESET] = True
self.impl._plcAttrs[name][RESETTIME] = None
if activeRst_t is not None:
self.impl._plcAttrs[name][RESETACTIVE] = activeRst_t
if type(switchDescriptor) == dict:
self.impl._plcAttrs[name][SWITCHDESCRIPTOR] = switchDescriptor
self.impl._plcAttrs[name][SWITCHDEST] = None
# in the construction of the AutoStopAttr() the current switch
# may not be build yet. Then now they must be linked together.
if AUTOSTOP in switchDescriptor:
autostopAttrName = switchDescriptor[AUTOSTOP]
if autostopAttrName in self.impl._internalAttrs:
autostopper = self.impl._internalAttrs[autostopAttrName]
if autostopper.switch == name:
autostopper.setSwitchAttr(self.impl._plcAttrs[name])
self._prepareEvents(name, events)
if logLevel is not None:
self.impl._getAttrStruct(name).logLevel = logLevel
if meanings is not None:
return self._prepareAttrWithMeaning(name, PyTango.DevBoolean,
meanings, qualities, rfun,
wfun, historyBuffer=None,
**kwargs)
else:
return self.add_Attr(name, PyTango.DevBoolean, rfun, wfun,
minValue=minValue, maxValue=maxValue,
**kwargs)
def add_AttrGrpBit(self, name, attrGroup=None, meanings=None, qualities=None,
events=None, **kwargs):
'''An special type of attribute where, given a set of bits by the pair
[reg,bit] this attribute can operate all of them as one.
That is, the read value is True if _all_ are true.
the write value, is applied to _all_ of them
(almost) at the same time.
'''
self.__traceAttrAddr(name, PyTango.DevBoolean, internal=True)
attrObj = GroupAttr(name=name, device=self.impl, group=attrGroup)
self.impl._internalAttrs[name] = attrObj
rfun = attrObj.read_attr
wfun = attrObj.write_attr
toReturn = [self.add_Attr(name, PyTango.DevBoolean, rfun, wfun,
**kwargs)]
if qualities is not None:
attrObj.qualities = qualities
if meanings is not None:
meaningAttr = self._buildMeaningAttr(attrObj, meanings, rfun,
**kwargs)
toReturn.append(meaningAttr)
self._prepareEvents(name, events)
return tuple(toReturn)
def add_AttrLogic(self, name, logic, label, desc, events=None,
operator='and', inverted=False, **kwargs):
'''Internal type of attribute made to evaluate a logical formula with
other attributes owned by the device with a boolean result.
'''
self.__traceAttrAddr(name, PyTango.DevBoolean, internalRO=True)
self.impl.debug_stream("%s logic: %s" % (name, logic))
# self._prepareInternalAttribute(name, PyTango.DevBoolean, logic=logic,
# operator=operator, inverted=inverted)
attrObj = LogicAttr(name=name, device=self.impl,
valueType=PyTango.DevBoolean,logic=logic,
operator=operator, inverted=inverted)
self.impl._internalAttrs[name] = attrObj
rfun = self.__getAttrMethod('read', name, isLogical=True)
wfun = None # this kind can only be ReadOnly
for key in logic:
self.append2relations(name, LOGIC, key)
self._prepareEvents(name, events)
return self.add_Attr(name, PyTango.DevBoolean, rfun, wfun, label, **kwargs)
def add_AttrRampeable(self, name, T, read_addr, write_addr, label, unit,
rampsDescriptor, events=None, qualities=None,
readback=None, switch=None, desc=None, minValue=None,
maxValue=None, *args, **kwargs):
'''Given 2 plc memory positions (for read and write), with this method
build a RW attribute that looks like the other RWs but it includes
ramping features.
- rampsDescriptor is a dictionary with two main keys:
+ ASCENDING | DESCENDING: Each of these keys contain a
dictionary in side describing the behaviour of the ramp
('+' mandatory keys, '-' optional keys):
+ STEP: value added/subtracted on each step.
+ STEPTIME: seconds until next step.
- THRESHOLD: initial value from where start ramping.
- SWITCH: attribute to monitor if it has switched off
Those keys will generate attributes called '$name_$key' as memorised
to allow the user to adapt the behaviour depending on configuration.
About the threshold, it's a request from the user to have, it
klystronHV, to not apply the ramp between 0 to N and after, if it's
above, ramp it to the setpoint. Also the request of the user is to
only do this ramp in the increasing way and decrease goes direct.
Example:
- rampsDescriptor = {ASCENDING:
{STEP:0.5,#kV
STEPTIME:1,#s
THRESHOLD:20,#kV
SWITCH:'HVPS_ONC'
}}
Another request for the Filament voltage is a descending ramp in
similar characteristics than klystrons, but also: once commanded a
power off, delay it doing a ramps to 0. This second request will
be managed from the boolean that does this on/off transition using
AttrAddrBit() builder together with a switchDescriptor dictionary.
Example:
- rampsDescriptor = {DESCENDING:
{STEP:1,#kV
STEPTIME:1,#s
THRESHOLD:-50,#kV
SWITCH:'GUN_HV_ONC'
},
ASCENDING:
{STEP:5,#kV
STEPTIME:0.5,#s
THRESHOLD:-90,#kV
SWITCH:'GUN_HV_ONC'
}}
The set of arguments about readback, setpoint and switch are there
to store defined relations between attributes. That is, to allow the
setpoint (that has a read and write addresses) to know if there is
another read only attribute that does the measure of what the
setpoint sets. Also this readback may like to know about the
setpoint and if the element is switch on or off.
'''
self.__traceAttrAddr(name, T, readAddr=read_addr, writeAddr=write_addr)
tango_T = self.__mapTypes(T)
self._prepareAttribute(name, T, readAddr=read_addr,
writeAddr=write_addr, readback=readback,
switch=switch, label=label, description=desc,
minValue=minValue, maxValue=maxValue,
*args, **kwargs)
rfun = self.__getAttrMethod('read', name)
wfun = self.__getAttrMethod('write', name, rampeable=True)
self._prepareEvents(name, events)
if qualities is not None:
rampeableAttr = self._prepareAttrWithQualities(name, tango_T,
qualities, rfun,
wfun, label=label,
**kwargs)
else:
rampeableAttr = self.add_Attr(name, tango_T, rfun, wfun, label,
minValue=minValue, maxValue=maxValue,
**kwargs)
# until here, it's not different than another attribute
# Next is specific for rampeable attributes
rampAttributes = []
# FIXME: temporally disabled all the ramps
# TODO: review if the callback functionality can be usefull here
# self.impl._plcAttrs[name][RAMP] = rampsDescriptor
# self.impl._plcAttrs[name][RAMPDEST] = None
# for rampDirection in rampsDescriptor.keys():
# if not rampDirection in [ASCENDING,DESCENDING]:
# self.impl.error_stream("In attribute %s, the ramp direction "
# "%s has been not recognised."
# %(name,rampDirection))
# else:
# rampAttributes = []
# newAttr = self._buildInternalAttr4RampEnable(name,name)
# if newAttr != None:
# rampAttributes.append(newAttr)
# for subAttrName in rampsDescriptor[rampDirection].keys():
# if subAttrName in [STEP,STEPTIME,THRESHOLD]:
# if subAttrName == STEPTIME:
# subAttrUnit = 'seconds'
# else:
# subAttrUnit = unit
# defaultValue = rampsDescriptor[rampDirection]\
# [subAttrName]
# newAttr = self._buildInternalAttr4Ramping(\
# name+'_'+rampDirection, subAttrName,
# name+" "+rampDirection, subAttrUnit,
# defaultValue)
# if newAttr is not None:
# rampAttributes.append(newAttr)
rampAttributes.insert(0, rampeableAttr)
return tuple(rampAttributes)
def add_AttrPLC(self, heart, lockst, read_lockingAddr, read_lockingBit,
write_lockingAddr, write_lockingBit):
heartbeat = self.add_AttrHeartBeat(heart)
lockState, lockStatus = self.add_AttrLock_ST(lockst)
locking = self.add_AttrLocking(read_lockingAddr, read_lockingBit,
write_lockingAddr, write_lockingBit)
return (heartbeat, lockState, lockStatus, locking)
def add_AttrLock_ST(self, read_addr):
COMM_STATUS = {0: 'unlocked', 1: 'local', 2: 'remote'}
COMM_QUALITIES = {ALARM: [0], WARNING: [2]}
plc_name = self.impl.get_name().split('/')[-1]
desc = 'lock status %s' % plc_name
# This attr was a number but for the user what shows the ----
# information is an string
self.impl.lock_ST = read_addr
self.impl.setChecker(self.impl.lock_ST, ['\x00', '\x01', '\x02'])
LockAttrs = self.add_AttrAddr('Lock_ST', PyTango.DevUChar, read_addr,
label=desc, desc=desc+john(COMM_STATUS),
meanings=COMM_STATUS,
qualities=COMM_QUALITIES, events={})
# This UChar is to know what to read from the plc, the AttrAddr,
# because it has an enumerate, will set this attr as string
self.impl.set_change_event('Lock_ST', True, False)
self.impl.set_change_event('Lock_Status', True, False)
return LockAttrs
def add_AttrLocking(self, read_addr, read_bit, write_addr, write_bit):
desc = 'True when attempting to obtain write lock'
new_attr = self.add_AttrAddrBit('Locking', read_addr, read_bit,
write_addr, write_bit, desc=desc,
events={})
locking_attr = self.impl.get_device_attr().get_attr_by_name('Locking')
self.impl.Locking = locking_attr
locking_attr.set_write_value(False)
self.impl.locking_raddr = read_addr
self.impl.locking_rbit = read_bit
# TODO: adding this checker, it works worst
# if hasattr(self.impl,'read_db') and self.impl.read_db si not None:
# self.impl.setChecker(self.impl.locking_raddr, ['\x00', '\x01'])
self.impl.locking_waddr = write_addr
self.impl.locking_wbit = write_bit
# TODO: adding this checker, it works worst
# if hasattr(self.impl,'read_db') and self.impl.read_db is not None:
# self.impl.setChecker(self.impl.locking_waddr, ['\x00','\x01'])
self.impl.set_change_event('Locking', True, False)
return new_attr
def add_AttrHeartBeat(self, read_addr, read_bit=0):
self.impl.heartbeat_addr = read_addr
desc = 'cadence bit going from True to False when PLC is okay'
attr = self.add_AttrAddrBit('HeartBeat', read_addr, read_bit,
desc=desc, events={})
self.impl.set_change_event('HeartBeat', True, False)
return attr
def add_AttrEnumeration(self, name, prefix=None, suffixes=None,
*args, **kwargs):
self.impl.info_stream("Building a Enumeration attribute set for %s"
% name)
if prefix is not None:
# With the klystrons user likes to see the number in the label,
# we don't want in the attribute name because it will make
# different between those two equal devices.
try:
plcN = int(self.impl.get_name().split('plc')[-1])
except:
plcN = 0
if plcN in [4, 5]:
label = "%s%d_%s" % (prefix, plcN-3, name)
name = "%s_%s" % (prefix, name)
else:
label = "%s_%s" % (prefix, name)
name = "%s_%s" % (prefix, name)
# FIXME: but this is something "ad hoc"
else:
label = "%s" % (name)
if suffixes is None:
suffixes = {'options': [PyTango.DevString, 'read_write'],
'active': [PyTango.DevString, 'read_write'],
'numeric': [PyTango.DevUShort, 'read_only'],
'meaning': [PyTango.DevString, 'read_only']}
attrs = []
try:
enumObj = EnumerationAttr(name, valueType=None)
for suffix in suffixes.keys():
try:
attrType = suffixes[suffix][0]
rfun = enumObj.read_attr
if suffixes[suffix][1] == 'read_write':
wfun = enumObj.write_attr
else:
wfun = None
attr = self.add_Attr(name+'_'+suffix, attrType,
label="%s %s" % (label, suffix),
rfun=rfun, wfun=wfun, **kwargs)
# FIXME: setup events in the self.add_Attr(...)
self.impl.set_change_event(name+'_'+suffix, True, False)
attrs.append(attr)
except Exception as e:
self.impl.debug_stream("In %s enumeration, exception "
"with %s: %s" % (name, suffix, e))
self.impl._internalAttrs[name] = enumObj
enumObj.device = self.impl
except Exception as e:
self.impl.error_stream("Fatal exception building %s: %s"
% (name, e))
traceback.print_exc()
# No need to configure device memorised attributes because the
# _LinacAttr subclasses already have this feature nested in the
# implementation.
return tuple(attrs)
def remove_all(self):
for attr in self.alist:
try:
self.impl.remove_attribute(attr.get_name())
except ValueError as exc:
self.impl.debug_stream(attr.get_name()+': '+str(exc))
def build(self, fname):
if self._buider is not None:
if not isinstance(self._buider, threading.Thread):
msg = "AttrList builder is not a Thread! (%s)" \
% (type(self._buider))
self.impl.error_stream("Ups! This should never happen: %s"
% (msg))
raise AssertionError(msg)
elif self._buider.isAlive():
msg = "AttrList build while it is building"
self.impl.error_stream("Ups! This should never happen: %s"
% (msg))
return
else:
self._buider = None
self._buider = threading.Thread(name="FileParser",
target=self.parse_file, args=(fname,))
self.impl.info_stream("Launch a thread to build the dynamic attrs")
self._buider.start()
def parse_file(self, fname):
t0 = time.time()
self._fileParsed.clear()
msg = "%30s\t%10s\t%5s\t%6s\t%6s"\
% ("'attrName'", "'Type'", "'RO/RW'", "'read'", "'write'")
self.impl.info_stream(msg)
try:
execfile(fname, self.globals_, self.locals_)
except IOError as io:
self.impl.error_stream("AttrList.parse_file IOError: %s\n%s"
% (e, traceback.format_exc()))
raise LinacException(io)
except Exception as e:
self.impl.error_stream("AttrList.parse_file Exception: %s\n%s"
% (e, traceback.format_exc()))
self.impl.debug_stream('Parse attrFile done.')
# Here, I can be sure that all the objects are build,
# then any none existing object reports a configuration
# mistake in the parsed file.
for origName in self._relations:
try:
origObj = self.impl._getAttrStruct(origName)
for tag in self._relations[origName]:
for destName in self._relations[origName][tag]:
try:
destObj = self.impl._getAttrStruct(destName)
origObj.addReportTo(destObj)
self.impl.debug_stream("Linking %s with %s (%s)"
% (origName, destName, tag))
origObj.reporter.report()
except Exception as e:
self.impl.error_stream("Exception managing the "
"relation between %s and "
"%s: %s" % (origName,
destName, e))
except Exception as e:
self.impl.error_stream("Exception managing %s relations: %s"
% (origName, e))
traceback.print_exc()
self.impl.applyCheckers()
self._fileParsed.set()
tf = time.time()
self.impl.info_stream("file parsed in %6.3f seconds" % (tf-t0))
def parse(self, text):
exec text in self.globals_, self.locals_
# # internal auxiliar methods ---
def __getAttrMethod(self, operation, attrName, isBit=False,
rampeable=False, internal=False, isGroup=False,
isLogical=False):
# if exist an specific method
attrStruct = self.impl._getAttrStruct(attrName)
return getattr(attrStruct, "%s_attr" % (operation))
# if hasattr(self.impl, "%s_%s" % (operation, attrName)):
# return getattr(self.impl, "%s_%s" % (operation, attrName))
# # or use the generic method for its type
# elif isBit:
# return getattr(self.impl, "%s_attr_bit" % (operation))
# elif operation == 'write' and rampeable:
# # no sense with read operation
# # FIXME: temporally disabled all the ramps
# # return getattr(self.impl,"write_attr_with_ramp")
# return getattr(self.impl, "write_attr")
# elif isGroup:
# return getattr(self.impl, '%s_attrGrpBit' % (operation))
# elif internal:
# return getattr(self.impl, "%s_internal_attr" % (operation))
# elif isLogical:
# return getattr(self.impl, "%s_logical_attr" % (operation))
# else:
# return getattr(self.impl, "%s_attr" % (operation))
def __traceAttrAddr(self, attrName, attrType, readAddr=None, readBit=None,
writeAddr=None, writeBit=None, internal=False,
internalRO=False):
# first column is the attrName
msg = "%30s\t" % ("'%s'" % attrName)
# second, its type
msg += "%10s\t" % ("'%s'" % attrType)
# Then, if it's read only or read/write
if writeAddr is not None or internal:
msg += " 'RW'\t"
else:
msg += "'RO' \t"
if readAddr is not None:
if readBit is not None:
read = "'%s.%s'" % (readAddr, readBit)
else:
read = "'%s'" % (readAddr)
msg += "%6s\t" % (read)
if writeAddr is not None:
if writeBit is not None:
write = "'%s.%s'" % (writeAddr, writeBit)
else:
write = "'%s'" % (writeAddr)
msg += "%6s\t" % (write)
self.impl.info_stream(msg)
# # prepare attribute structures ---
def _prepareAttribute(self, attrName, attrType, readAddr, readBit=None,
writeAddr=None, writeBit=None, formula=None,
readback=None, setpoint=None, switch=None,
label=None, description=None,
minValue=None, maxValue=None, **kwargs):
'''This is a constructor of the item in the dictionary of attributes
related with PLC memory locations. At least they have a read address
and a type. The booleans also needs a read bit. For writable
attributes there are the equivalents write addresses and booleans
also the write bit (it doesn't happen with current PLCs, but we
support them if different).
Also is introduced the feature of the formula that can distinguish
between a read formula and write case. Not needing both coexisting.
The set of arguments about readback, setpoint and switch are there
to store defined relations between attributes. That is, to allow the
setpoint (that has a read and write addresses) to know if there is
another read only attribute that does the measure of what the
setpoint sets. Also this readback may like to know about the
setpoint and if the element is switch on or off.
'''
if readAddr is None and writeAddr is not None:
readAddr = self._db20_size + writeAddr
attrObj = PLCAttr(name=attrName, device=self.impl, valueType=attrType,
readAddr=readAddr, readBit=readBit,
writeAddr=writeAddr, writeBit=writeBit,
formula=formula,
readback=readback, setpoint=setpoint, switch=switch,
label=label, description=description,
minValue=minValue, maxValue=maxValue)
self.impl._plcAttrs[attrName] = attrObj
self._insert2reverseDictionary(attrName, attrType, readAddr, readBit,
writeAddr, writeBit)
# if readBit is not None:
# if readAddr not in self.impl._addrDct:
# self.impl._addrDct[readAddr] = {}
# self.impl._addrDct[readAddr][readBit] = []
# self.impl._addrDct[readAddr][readBit].append(attrName)
# if writeAddr is not None:
# self.impl._addrDct[readAddr][readBit].append(writeAddr)
# if writeBit is not None:
# self.impl._addrDct[readAddr][readBit].append(writeBit)
# else:
# self.impl._addrDct[readAddr][readBit].append(readBit)
# else:
# if readAddr not in self.impl._addrDct:
# self.impl._addrDct[readAddr] = []
# self.impl._addrDct[readAddr].append(attrName)
# self.impl._addrDct[readAddr].append(attrType)
# if writeAddr is not None:
# self.impl._addrDct[readAddr].append(writeAddr)
# else:
# self.impl.error_stream("The address %s has been found in the "
# "reverse dictionary" % (readAddr))
def _insert2reverseDictionary(self, name, valueType, readAddr, readBit,
writeAddr, writeBit):
'''
Hackish for the dump of a valid write block when the PLC provides
invalid values in the write datablock
:param name:
:param valueType:
:param readAddr:
:param readBit:
:param writeAddr:
:param writeBit:
:return:
'''
dct = self.impl._addrDct
if 'readBlock' not in dct:
dct['readBlock'] = {}
rDct = dct['readBlock']
if 'writeBlock' not in dct:
dct['writeBlock'] = {}
wDct = dct['writeBlock']
if readBit is not None: # boolean
if readAddr not in rDct:
rDct[readAddr] = {}
if readBit in rDct[readAddr]:
self.impl.warn_stream(
"{0} override readAddr {1} readBit {2}: {3}"
"".format(name, readAddr, readBit,
rDct[readAddr][readBit]['name']))
rDct[readAddr][readBit] = {}
rDct[readAddr][readBit]['name'] = name
rDct[readAddr][readBit]['type'] = valueType
if writeAddr is not None:
rDct[readAddr][readBit]['writeAddr'] = writeAddr
if writeBit is None:
writeBit = readBit
rDct[readAddr][readBit]['writeBit'] = writeBit
if writeAddr not in wDct:
wDct[writeAddr] = {}
wDct[writeAddr][writeBit] = {}
wDct[writeAddr][writeBit]['name'] = name
wDct[writeAddr][writeBit]['type'] = valueType
wDct[writeAddr][writeBit]['readAddr'] = readAddr
wDct[writeAddr][writeBit]['readBit'] = readBit
else: # Byte, Word or Float
if readAddr in rDct:
self.impl.warn_stream(
"{0} override readAddr {1}: {2}"
"".format(name, readAddr, rDct[readAddr]['name']))
rDct[readAddr] = {}
rDct[readAddr]['name'] = name
rDct[readAddr]['type'] = valueType
if writeAddr is not None:
rDct[readAddr]['writeAddr'] = writeAddr
if writeAddr in wDct:
self.impl.warn_stream(
"{0} override writeAddr {1}:{2}"
"".format(name, writeAddr, wDct[writeAddr]['name']))
wDct[writeAddr] = {}
wDct[writeAddr]['name'] = name
wDct[writeAddr]['type'] = valueType
wDct[writeAddr]['readAddr'] = readAddr
def _prepareInternalAttribute(self, attrName, attrType, memorized=False,
isWritable=False, defaultValue=None,
logic=None, operator=None, inverted=None):
attrObj = InternalAttr(name=attrName, device=self.impl,
valueType=attrType, memorized=memorized,
isWritable=isWritable,
defaultValue=defaultValue, logic=logic,
operator=operator, inverted=inverted)
self.impl._internalAttrs[attrName] = attrObj
def _prepareEvents(self, attrName, eventConfig):
if eventConfig is not None:
attrStruct = self.impl._getAttrStruct(attrName)
attrStruct[EVENTS] = eventConfig
attrStruct[LASTEVENTQUALITY] = PyTango.AttrQuality.ATTR_VALID
attrStruct[EVENTTIME] = None
def _prepareAttrWithMeaning(self, attrName, attrType, meanings, qualities,
rfun, wfun, historyBuffer=None, **kwargs):
'''There are some short integers where the number doesn't mean anything
by itself. The plcs register description shows a relation between
the possible numbers and its meaning.
These attributes are splitted in two:
- one with only the number (machine readable: archiver,plots)
- another string with the number and its meaning (human readable)
The historyBuffer parameter has been developed to introduce
interlock tracking (using a secondary attribute called *_History).
That is, starting from a set of non interlock values, when the
attibute reads something different to them, it starts collecting
those new values in order to provide a line in the interlock
activity. Until the interlock is cleaned, read value is again in
the list of non interlock values and this buffer is cleaned.
'''
# first, build the same than has been archived
attrState = self.add_Attr(attrName, attrType, rfun, wfun, **kwargs)
# then prepare the human readable attribute as a feature
attrStruct = self.impl._plcAttrs[attrName]
attrStruct.qualities = qualities
attrTuple = self._buildMeaningAttr(attrStruct, meanings, rfun,
**kwargs)
toReturn = (attrState,)
toReturn += (attrTuple,)
return toReturn
def _buildMeaningAttr(self, attrObj, meanings, rfun, historyBuffer=None,
**kwargs):
if attrObj.name.endswith('_ST'):
name = attrObj.name.replace('_ST', '_Status')
else:
name = "%s_Status" % (attrObj.name)
attrObj.meanings = meanings
self.impl._plcAttrs[name] = attrObj._meaningsObj
self.impl._plcAttrs[name].alias = name
self.impl._plcAttrs[name].meanings = meanings
self.impl._plcAttrs[name].qualities = attrObj.qualities
meaningAttr = self.add_Attr(name, PyTango.DevString, rfun, wfun=None,
**kwargs)
toReturn = (meaningAttr)
if historyBuffer is not None:
attrHistoryName = "%s_History" % (attrStruct._meaningsalias)
attrStruct.history = historyBuffer
historyStruct = attrStruct._historyObj
historyStruct.history = historyBuffer
historyStruct.alias = attrHistoryName
attrStruct.read_value = HistoryBuffer(
cleaners=historyBuffer[BASESET], maxlen=HISTORYLENGTH,
owner=attrStruct)
xdim = attrStruct.read_value.maxSize()
self.impl._plcAttrs[attrHistoryName] = historyStruct
attrHistory = self.add_Attr(attrHistoryName, PyTango.DevString,
rfun=historyStruct.read_attr,
xdim=xdim, **kwargs)
toReturn += (attrHistory,)
return toReturn
def _prepareAttrWithQualities(self, attrName, attrType, qualities,
rfun, wfun, label=None, unit=None,
autoStop=None, **kwargs):
'''The attributes with qualities definition, but without meanings for
their possible values, are specifically build to have a
CircularBuffer as the read element. That is made to collect a small
record of the previous values, needed for the RELATIVE condition
(mainly used with CHANGING quality). Without a bit of memory in the
past is not possible to know what had happen.
This kind of attributes have another possible keyword named
'autoStop'. This has been initially made for the eGun HV
leakage current, to stop it when this leak is too persistent on
time (adjustable using an extra attribute). Apart from that, the
user has a feature disable for it.
TODO: feature 'too far' from a setpoint value.
'''
self.impl._plcAttrs[attrName][READVALUE] = \
CircularBuffer([], owner=self.impl._plcAttrs[attrName])
self.impl._plcAttrs[attrName][QUALITIES] = qualities
toReturn = (self.add_Attr(attrName, attrType, rfun, wfun, label=label,
unit=unit, **kwargs),)
if autoStop is not None:
# FIXME: shall it be in the AttrWithQualities? Or more generic?
toReturn += self._buildAutoStopAttributes(attrName, label,
attrType, autoStop,
**kwargs)
return toReturn
# # Builders for subattributes ---
def _buildAutoStopAttributes(self, baseName, baseLabel, attrType,
autoStopDesc, logLevel, **kwargs):
# TODO: review if the callback between attributes can be usefull here
attrs = []
autostopperName = "%s_%s" % (baseName, AUTOSTOP)
autostopperLabel = "%s %s" % (baseLabel, AUTOSTOP)
autostopSwitch = autoStopDesc.get(SWITCHDESCRIPTOR, None)
if autostopSwitch in self.impl._plcAttrs:
autostopSwitch = self.impl._plcAttrs[autostopSwitch]
# depending on the build process, the switch object may not be
# build yet. That's why the name (as string) is stored.
# Later, when the switch (AttrAddrBit) is build, this assignment
# will be completed.
autostopper = AutoStopAttr(name=autostopperName,
valueType=attrType,
device=self.impl,
plcAttr=self.impl._plcAttrs[baseName],
below=autoStopDesc.get(BELOW, None),
above=autoStopDesc.get(ABOVE, None),
switchAttr=autostopSwitch,
integr_t=autoStopDesc.get(INTEGRATIONTIME,
None),
events={})
self.impl._internalAttrs[autostopperName] = autostopper
spectrumAttr = self.add_Attr(autostopperName, PyTango.DevDouble,
rfun=autostopper.read_attr, xdim=1000,
label=autostopperLabel)
attrs.append(spectrumAttr)
enableAttr = self._buildAutoStopperAttr(autostopperName,
autostopperLabel, ENABLE,
autostopper._enable,
PyTango.DevBoolean,
memorised=True, writable=True)
attrs.append(enableAttr)
for condition in [BELOW, ABOVE]:
if condition in autoStopDesc:
condAttr = self._buildAutoStopConditionAttr(condition,
autostopperName,
autostopperLabel,
autostopper)
attrs.append(condAttr)
integrAttr = self._buildAutoStopperAttr(autostopperName,
autostopperLabel,
INTEGRATIONTIME,
autostopper._integr_t,
PyTango.DevDouble,
memorised=True, writable=True)
meanAttr = self._buildAutoStopperAttr(autostopperName,
autostopperLabel, MEAN,
autostopper._mean,
PyTango.DevDouble)
attrs.append(meanAttr)
stdAttr = self._buildAutoStopperAttr(autostopperName,
autostopperLabel, STD,
autostopper._std,
PyTango.DevDouble)
attrs.append(stdAttr)
triggeredAttr = self._buildAutoStopperAttr(autostopperName,
autostopperLabel, TRIGGERED,
autostopper._triggered,
PyTango.DevBoolean)
attrs.append(triggeredAttr)
if logLevel is not None:
autostopper.logLevel = logLevel
# it is only necessary to set it in one of them (here is the main
# one), but can be any because they share their logLevel.
return tuple(attrs)
def _buildAutoStopperAttr(self, baseName, baseLabel, suffix,
autostopperComponent, dataType, memorised=False,
writable=False):
attrName = "%s_%s" % (baseName, suffix)
attrLabel = "%s %s" % (baseLabel, suffix)
autostopperComponent.alias = attrName
if memorised:
autostopperComponent.setMemorised()
rfun = autostopperComponent.read_attr
if writable:
wfun = autostopperComponent.write_attr
else:
wfun = None
self.impl._internalAttrs[attrName] = autostopperComponent
return self.add_Attr(attrName, dataType,
rfun=rfun, wfun=wfun,
label=attrLabel)
def _buildAutoStopConditionAttr(self, condition, baseName, baseLabel,
autostopper):
conditionName = "%s_%s_Threshold" % (baseName, condition)
conditionLabel = "%s %s Threshold" % (baseName, condition)
conditioner = getattr(autostopper, '_%s' % (condition.lower()))
conditioner.alias = conditionName
conditioner.setMemorised()
self.impl._internalAttrs[conditionName] = conditioner
return self.add_Attr(conditionName, PyTango.DevDouble,
rfun=conditioner.read_attr,
wfun=conditioner.write_attr,
label=conditionLabel)
def append2relations(self, origin, tag, dependency):
self.impl.debug_stream("%s depends on %s (%s)"
% (origin, dependency, tag))
if dependency not in self._relations:
self._relations[dependency] = {}
if tag not in self._relations[dependency]:
self._relations[dependency][tag] = []
self._relations[dependency][tag].append(origin)
def __check_addresses_and_block_sizes(self, name, read_addr, write_addr):
if read_addr is None and write_addr is not None:
read_addr = self._db20_size+write_addr
self.impl.debug_stream(
"{0} define the read_addr {1} relative to the db20 size {2} "
"and the write_addr{3}".format(name, read_addr,
self._db20_size, write_addr))
if read_addr > self.impl.ReadSize:
self.impl.warn_stream(
"{0} defines a read_addr {1} out of the size of the "
"db20+db22 {2}: it will not be build"
"".format(name, read_addr, self.impl.ReadSize))
raise IndexError("Out of the DB20")
if write_addr is not None and write_addr > self._db22_size:
self.impl.warn_stream(
"{0} defines a write_addr {1} out of the size of the db22 {2}: "
"it will not be build".format(name, write_addr,
self._db22_size))
raise IndexError("Out of the DB22")
return read_addr
def get_ip(iface='eth0'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockfd = sock.fileno()
SIOCGIFADDR = 0x8915
ifreq = struct.pack('16sH14s', iface, socket.AF_INET, '\x00'*14)
try:
res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq)
except:
return None
ip = struct.unpack('16sH2x4s8x', res)[2]
return socket.inet_ntoa(ip)
# PROTECTED REGION END --- LinacData.additionnal_import
# # Device States Description
# # INIT : The device is being initialised.
# # ON : PLC communication normal
# # ALARM : Transient issue
# # FAULT : Unrecoverable issue
# # UNKNOWN : No connection with the PLC, no state information
class LinacData(PyTango.Device_4Impl):
# --------- Add you global variables here --------------------------
# PROTECTED REGION ID(LinacData.global_variables) ---
ReadSize = None
WriteSize = None
BindAddress = None # deprecated
LocalAddress = None
RemoteAddress = None
IpAddress = None # deprecated
PlcAddress = None
Port = None
LocalPort = None
RemotePort = None
# assigned by addAttrLocking
locking_raddr = None
locking_rbit = None
locking_waddr = None
locking_wbit = None
lock_ST = None
Locking = None
is_lockedByTango = None
heartbeat_addr = None
AttrFile = None
_plcAttrs = {}
_internalAttrs = {}
_addrDct = {}
disconnect_t = 0
read_db = None
dataBlockSemaphore = threading.Semaphore()
_important_logs = []
# #ramping auxiliars
# _rampThreads = {}
# _switchThreads = {}
# #hackish to reemit events
# _sayAgainThread = None
# _sayAgainQueue = None
# FIXME: remove the expert attributes! ---
# special event emition trace
#_traceAttrs = []
#_tracedAttrsHistory = {}
#_historySize = 100
_traceTooClose = []
_prevMemDump = None
_prevLockSt = None
def debug_stream(self, msg):
super(LinacData, self).debug_stream(
"[%s] %s" % (threading.current_thread().getName(), msg))
def info_stream(self, msg):
super(LinacData, self).info_stream(
"[%s] %s" % (threading.current_thread().getName(), msg))
def warn_stream(self, msg):
super(LinacData, self).warn_stream(
"[%s] %s" % (threading.current_thread().getName(), msg))
def error_stream(self, msg):
super(LinacData, self).error_stream(
"[%s] %s" % (threading.current_thread().getName(), msg))
####
# PLC connectivity area ---
def connect(self):
'''This method is used to build the object that maintain the
communications with the assigned PLC.
'''
if self.read_db is not None:
return
self.info_stream('connecting...')
self.set_status('connecting...')
try:
self.read_db = tcpblock.open_datablock(self.PlcAddress,
self.Port,
self.ReadSize,
self.WriteSize,
self.BindAddress,
self.info_stream,
self.debug_stream,
self.warn_stream,
self.error_stream,
self.lock_ST)
self.info_stream("build the tcpblock, socket %d"
% (self.read_db.sock.fileno()))
self.write_db = self.read_db
self.info_stream('connected')
self.set_state(PyTango.DevState.ON)
self.set_status('connected')
self.applyCheckers()
return True
except Exception as e:
self.error_stream('connection failed exception: %s'
% (traceback.format_exc()))
self.set_state(PyTango.DevState.FAULT)
self.set_status(traceback.format_exc())
return False
def disconnect(self):
'''This method closes the connection to the assigned PLC.
'''
self.info_stream('disconnecting...')
self.set_status('disconnecting...')
# self._plcUpdatePeriod = PLC_MAX_UPDATE_PERIOD
self._setPlcUpdatePeriod(PLC_MAX_UPDATE_PERIOD)
try:
if self.is_connected():
tcpblock.close_datablock(self.read_db, self.warn_stream)
self.read_db = None
if self.get_state() == PyTango.DevState.ON:
self.set_state(PyTango.DevState.OFF)
self.set_status('not connected')
return True
except:
return False
def reconnect(self):
'''
'''
if time.time() - self.last_update_time > self.ReconnectWait:
self.connect()
def is_connected(self):
'''Checks if the object that interfaces the communication with
the PLC is well made and available.
'''
return self.read_db is not None and self.read_db.sock is not None
def has_data_available(self):
'''Check if there is some usable data give from the PLC.
'''
return self.is_connected() and \
len(self.read_db.buf) == self.ReadSize
def setChecker(self, addr, values):
if not hasattr(self, '_checks'):
self.debug_stream("Initialise checks dict")
self._checks = {}
if isinstance(addr, int) and isinstance(values, list):
self.debug_stream("Adding a checker for address %d "
"with values %s" % (addr, values))
self._checks[addr] = values
return True
return False
def applyCheckers(self):
if hasattr(self, '_checks') and isinstance(self._checks, dict) and \
hasattr(self, 'read_db') and isinstance(self.read_db,
tcpblock.Datablock):
try:
had = len(self.read_db._checks.keys())
for addr in self._checks:
if not addr in self.read_db._checks:
self.debug_stream(
"\tfor addr %d insert %s"
% (addr, self._checks[addr]))
self.read_db.setChecker(addr, self._checks[addr])
else:
lst = self.read_db.getChecker(addr)
for value in self._checks[addr]:
if value not in lst:
self.debug_stream(
"\tin addr %d append %s"
% (addr, self._checks[addr]))
self.read_db._checks.append(value)
now = len(self.read_db._checks.keys())
if had != now:
self.debug_stream("From %d to %d checkers" % (had, now))
except Exception as e:
self.error_stream("Incomplete applyCheckers: %s" % (e))
def forceWriteAttrs(self):
'''There are certain situations, like the PLC shutdown, that
results in a bad DB20 values received. Then the writable values
cannot be written because the datablock only changes one
register when many have bad values and is rejected by the plc.
Due to this we force a construction of a complete write
datablock to be send once for all.
'''
if not hasattr(self, 'write_db') or self.write_db is None:
return
wDct = self._addrDct['writeBlock']
self.info_stream("Force to reconstruct the write data block")
self.dataBlockSemaphore.acquire()
self.attr_forceWriteDB_read = "%s\n" % (time.strftime(
"%Y/%m/%d %H:%M:%S", time.localtime()))
wblock_was = self.write_db.buf[self.write_db.write_start:]
try:
for wAddr in wDct:
if 'readAddr' in wDct[wAddr]: # Uchars, Shorts, floats
name = wDct[wAddr]['name']
rAddr = wDct[wAddr]['readAddr']
T, size = TYPE_MAP[wDct[wAddr]['type']]
rValue = self.read_db.get(rAddr, T, size)
wValue = self.write_db.get(
wAddr+self.write_db.write_start, T, size)
msg = "%s = (%s, %s) [%s -> %s, %s, %s]" \
% (name, rValue, wValue, rAddr, wAddr, T, size)
self.attr_forceWriteDB_read += "%s\n" % msg
self.info_stream(msg)
self.write_db.write(wAddr, rValue, (T, size),
dry=True)
else: # booleans
was = byte = self.write_db.b(
wAddr+self.write_db.write_start)
msg = "booleans %d" % (wAddr)
self.attr_forceWriteDB_read += "%s\n" % msg
self.info_stream(msg)
for wBit in wDct[wAddr]:
name = wDct[wAddr][wBit]['name']
rAddr = wDct[wAddr][wBit]['readAddr']
rBit = wDct[wAddr][wBit]['readBit']
rValue = self.read_db.bit(rAddr, rBit)
wValue = self.write_db.bit(
wAddr+self.write_db.write_start, rBit)
msg = "\t%s = (%s, %s) [%s.%s -> %s.%s]" \
% (name, rValue, wValue, rAddr, rBit,
wAddr, wBit)
self.attr_forceWriteDB_read += "%s\n" % msg
self.info_stream(msg)
if rValue is True:
byte = byte | (int(1) << wBit)
else:
byte = byte & ((0xFF) ^ (1 << wBit))
msg = "%d = %s -> %s" \
% (wAddr, binaryByte(was), binaryByte(byte))
self.attr_forceWriteDB_read += "%s\n" % msg
self.info_stream(msg)
self.write_db.rewrite()
wblock_is = self.write_db.buf[self.write_db.write_start:]
i = 0
msg = "writeblock:\n%-11s\t%-11s\n" % ("was:","now:")
while i < len(wblock_was):
line = "%-11s\t%-11s\n" % (
' '.join("%02x" % x for x in wblock_was[i:i+4]),
' '.join("%02x" % x for x in wblock_is[i:i + 4]))
msg += line
i += 4
self.attr_forceWriteDB_read += "%s\n" % msg
self.info_stream(msg)
except Exception as e:
msg = "Could not complete the force Write\n%s" % (e)
self.attr_forceWriteDB_read += "%s\n" % msg
self.error_stream(msg)
self.dataBlockSemaphore.release()
# def _getWattrList(self):
# wAttrNames = []
# for attrName in self._plcAttrs.keys():
# attrStruct = self._getAttrStruct(attrName)
# if WRITEVALUE in attrStruct:
# wAttrNames.append(attrName)
# return wAttrNames
# def _forceWriteDB(self, attr2write):
# for attrName in attr2write:
# attrStruct = self._getAttrStruct(attrName)
# write_addr = attrStruct[WRITEADDR]
# write_value = attrStruct[READVALUE]
# if type(attrStruct[READVALUE]) in [CircularBuffer,
# HistoryBuffer]:
# write_value = attrStruct[READVALUE].value
# else:
# write_value = attrStruct[READVALUE]
# self.info_stream("Dry write of %s value %s"
# % (attrName, write_value))
# if WRITEBIT in attrStruct:
# read_addr = attrStruct[READADDR]
# write_bit = attrStruct[WRITEBIT]
# self.__writeBit(attrName, read_addr, write_addr, write_bit,
# write_value, dry=True)
# else:
# self.write_db.write(write_addr, write_value,
# attrStruct[TYPE], dry=True)
# self.write_db.rewrite()
# Done PLC connectivity area ---
####
# state/status manager methods ---
def set_state(self, newState, log=True):
'''Overload of the superclass method to add event
emission functionality.
'''
if self.get_state() != newState:
if log:
self.warn_stream("Change state from %s to %s"
% (self.get_state(), newState))
PyTango.Device_4Impl.set_state(self, newState)
self.push_change_event('State', newState)
self.set_status("")
# as this changes the state, clean non important
# messages in status
def set_status(self, newLine2status, important=False):
'''Overload of the superclass method to add the extra feature of
the persistent messages added to the status string.
'''
# self.debug_stream("In set_status()")
newStatus = "" # The device is in %s state.\n"%(self.get_state())
for importantMsg in self._important_logs:
if len(importantMsg) > 0:
newStatus = "%s%s\n" % (newStatus, importantMsg)
if len(newLine2status) > 0 and \
newLine2status not in self._important_logs:
newStatus = "%s%s\n" % (newStatus, newLine2status)
if important:
self._important_logs.append(newLine2status)
if len(newStatus) == 0:
newStatus = "The device is in %s state.\n" % (self.get_state())
oldStatus = self.get_status()
if newStatus != oldStatus:
PyTango.Device_4Impl.set_status(self, newStatus)
self.warn_stream("New status message: %s"
% (repr(self.get_status())))
self.push_change_event('Status', newStatus)
def clean_status(self):
'''With the extra feature of the important logs, this method allows
to clean all those logs as a clean interlocks method does.
'''
self.debug_stream("In clean_status()")
self._important_logs = []
self.set_status("")
# done state/status manager methods ---
# def __doTraceAttr(self, attrName, tag):
# if attrName in self._traceAttrs:
# attrStruct = self._getAttrStruct(attrName)
# readValue = attrStruct[READVALUE]
# if WRITEVALUE in attrStruct:
# writeValue = attrStruct[WRITEVALUE]
# else:
# writeValue = float('NaN')
# quality = "%s" % attrStruct[LASTEVENTQUALITY]
# timestamp = time.ctime(attrStruct[READTIME])
# if attrName not in self._tracedAttrsHistory:
# self._tracedAttrsHistory[attrName] = []
# self._tracedAttrsHistory[attrName].append(
# [tag, readValue, writeValue, quality, timestamp])
# self.debug_stream("Traceing %s with %s tag: "
# "read = %s, write = %s (%s,%s)"
# % (attrName, tag, readValue, writeValue,
# quality, timestamp))
# while len(self._tracedAttrsHistory[attrName]) > \
# self._historySize:
# self._tracedAttrsHistory[attrName].pop(0)
####
# event methods ---
def fireEvent(self, attrEventStruct, timestamp=None):
'''Method with the procedure to emit an event from one existing
attribute. Minimal needs are the attribute name and the value
to emit, but also can be specified the quality and the timestamp
'''
attrName = attrEventStruct[0]
if attrName not in ['lastUpdate', 'lastUpdateStatus']:
self.warn_stream("DEPRECATED: fireEvent(%s)" % attrName)
attrValue = attrEventStruct[1]
if timestamp is None:
timestamp = time.time()
if len(attrEventStruct) == 3: # the quality is specified
quality = attrEventStruct[2]
else:
quality = PyTango.AttrQuality.ATTR_VALID
# self.__doTraceAttr(attrName, "fireEvent(%s)" % attrValue)
if self.__isHistoryBuffer(attrName):
attrValue = self.__buildHistoryBufferString(attrName)
self.push_change_event(attrName, attrValue, timestamp, quality)
else:
self.push_change_event(attrName, attrValue, timestamp, quality)
attrStruct = self._getAttrStruct(attrName)
if attrStruct is not None and \
LASTEVENTQUALITY in attrStruct and \
not quality == attrStruct[LASTEVENTQUALITY]:
attrStruct[LASTEVENTQUALITY] = quality
if attrStruct is not None and EVENTTIME in attrStruct:
now = time.time()
attrStruct[EVENTTIME] = now
attrStruct[EVENTTIMESTR] = time.ctime(now)
def fireEventsList(self, eventsAttrList, timestamp=None, log=False):
'''Given a set of pair [attr,value] (with an optional third element
with the quality) emit events for all of them with the same
timestamp.
'''
if log:
self.debug_stream("In fireEventsList(): %d events:\n%s"
% (len(eventsAttrList),
''.join("\t%s\n" % line
for line in eventsAttrList)))
if timestamp is None:
timestamp = time.time()
attrNames = []
for attrEvent in eventsAttrList:
try:
self.fireEvent(attrEvent, timestamp)
attrNames.append(attrEvent[0])
except Exception as e:
self.error_stream("In fireEventsList() Exception with "
"attribute %s: %s" % (attrEvent, e))
traceback.print_exc()
# done event methods ---
####
# Read Attr method for dynattrs ---
# def __applyReadValue(self, attrName, attrValue, timestamp=None):
# '''Hide the internal differences of the stored attribute struct
# and return the last value read from the PLC for a certain attr.
# '''
# self.warn_stream("DEPRECATED: __applyReadValue(%s)" % (attrName))
# attrStruct = self._getAttrStruct(attrName)
# if timestamp is None:
# timestamp = time.time()
# if not self.__filterAutoStopCollection(attrName):
# return
# if type(attrStruct[READVALUE]) in [CircularBuffer, HistoryBuffer]:
# attrStruct[READVALUE].append(attrValue)
# else:
# attrStruct[READVALUE] = attrValue
# attrStruct[READTIME] = timestamp
# # attrStruct[READTIMESTR] = time.ctime(timestamp)
# def __filterAutoStopCollection(self, attrName):
# '''This method is made to manage the collection of data on the
# integration buffer for attributes with the autostop feature.
# No data shall be collected when it is already off (and the
# autostop will not stop anything).
# '''
# self.warn_stream("DEPRECATED: __filterAutoStopCollection(%s)"
# % (attrName))
# attrStruct = self._getAttrStruct(attrName)
# if AUTOSTOP in attrStruct and \
# SWITCHDESCRIPTOR in attrStruct[AUTOSTOP]:
# switchName = attrStruct[AUTOSTOP][SWITCHDESCRIPTOR]
# switchStruct = self._getAttrStruct(switchName)
# if READVALUE in switchStruct and not switchStruct[READVALUE]:
# # do not collect data when the switch to stop
# # is already off
# self.debug_stream("The switch for %s the autostopper is "
# "off, no needed to collect values"
# % (attrName))
# # if there is data collected, do not clean it until a new
# # transition from off to on.
# return False
# return True
# def __applyWriteValue(self, attrName, attrValue):
# '''Hide the internal attribute struct representation and give an
# interface to set a value to be written.
# '''
# self.warn_stream("DEPRECATED: __applyWriteValue(%s)" % (attrName))
# attrStruct = self._getAttrStruct(attrName)
# if WRITEVALUE in attrStruct:
# attrStruct[WRITEVALUE] = attrValue
# def __buildAttrMeaning(self, attrName, attrValue):
# '''As some (state-like) attributes have a meaning, there is a
# status-like attribute that reports what the documentation
# assign to the enumeration.
# '''
# self.warn_stream("DEPRECATED: __buildAttrMeaning(%s)" % (attrName))
# attrStruct = self._getAttrStruct(attrName)
# meanings = attrStruct[MEANINGS]
# if attrValue in meanings:
# return "%d:%s" % (attrValue, meanings[attrValue])
# else:
# return "%d:unknown" % (attrValue)
# def __buildAttrQuality(self, attrName, attrValue):
# '''Resolve the quality the an specific value has for an attribute.
# '''
# self.warn_stream("DEPRECATED: __buildAttrQuality(%s)" % (attrName))
# attrStruct = self._getAttrStruct(attrName)
# if QUALITIES in attrStruct:
# qualities = attrStruct[QUALITIES]
# if self.__checkQuality(attrName, attrValue, ALARM):
# return PyTango.AttrQuality.ATTR_ALARM
# elif self.__checkQuality(attrName, attrValue, WARNING):
# return PyTango.AttrQuality.ATTR_WARNING
# elif self.__checkQuality(attrName, attrValue, CHANGING):
# return PyTango.AttrQuality.ATTR_CHANGING
# if self.attr_IsTooFarEnable_read and \
# SETPOINT in attrStruct:
# try:
# # This is to review if, not having the value changing
# # (previous if) the readback value is or not too far away
# # from the given setpoint.
# setpointAttrName = attrStruct[SETPOINT]
# try:
# readback = attrStruct[READVALUE].value
# except:
# return PyTango.AttrQuality.ATTR_INVALID
# setpoint = \
# self._getAttrStruct(setpointAttrName)[READVALUE].value
# if setpoint is not None:
# if self.__tooFar(attrName, setpoint, readback):
# if attrName in self._traceTooClose:
# self.warn_stream("Found %s readback (%6.3f) "
# "too far from setpoint "
# "(%6.3f)" % (attrName,
# readback,
# setpoint))
# return PyTango.AttrQuality.ATTR_WARNING
# if attrName in self._traceTooClose:
# self.info_stream("Found %s readback (%6.3f) "
# "close enought to the setpoint "
# "(%6.3f)" % (attrName, readback,
# setpoint))
# except Exception as e:
# self.warn_stream("Error comparing readback with "
# "setpoint: %s" % (e))
# traceback.print_exc()
# return PyTango.AttrQuality.ATTR_INVALID
# return PyTango.AttrQuality.ATTR_VALID
# def __tooFar(self, attrName, setpoint, readback):
# '''
# Definition of 'too far': when the readback and the setpoint
# differ more than a certain percentage, the quality of the
# readback attribute is warning.
# But this doesn't apply when the setpoint is too close to 0.
#
# Definition of 'too far': there are two different definitions
# - When the setpoint is "close to 0" the warning quality alert
# will be raised if the readback has a difference bigger than
# 0.1 (plus minus).
# - If the setpoint is not that close to 0, the warning alert
# will be raised when their difference is above the 10%.
# It has been used a multiplicative notation but it can be
# made also with additive notation using a multiplication
# factor.
# '''
# self.warn_stream("DEPRECATED: __tooFar(%s)" % (attrName))
# if (-CLOSE_ZERO < setpoint < CLOSE_ZERO) or readback == 0:
# diff = abs(setpoint - readback)
# if (diff > CLOSE_ZERO):
# return True
# else:
# diff = abs(setpoint / readback)
# # 10%
# if (1-REL_PERCENTAGE > diff or diff > 1+REL_PERCENTAGE):
# return True
# return False
# def __checkQuality(self, attrName, attrValue, qualityInQuery):
# '''Check if this attrName with the give attrValue is with in the
# threshold of the give quality
# '''
# self.warn_stream("DEPRECATED: __checkQuality(%s)" % (attrName))
# attrStruct = self._getAttrStruct(attrName)
# qualities = attrStruct[QUALITIES]
# if qualityInQuery in qualities:
# if type(qualities[qualityInQuery]) == dict:
# if self.__checkAbsoluteRange(qualities[qualityInQuery],
# attrValue):
# return True
# buffer = attrStruct[READVALUE]
# if self.__checkRelativeRange(qualities[qualityInQuery],
# buffer,
# attrValue):
# return True
# return False
# elif type(qualities[qualityInQuery]) == list:
# if attrValue in qualities[qualityInQuery]:
# return True
# return False
# def __checkAbsoluteRange(self, qualityDict, referenceValue):
# '''Check if the a value is with in any of the configured absolute
# ranges for the specific configuration with in an attribute.
# '''
# # self.warn_stream("DEPRECATED: __checkAbsoluteRango()")
# if ABSOLUTE in qualityDict:
# if ABOVE in qualityDict[ABSOLUTE]:
# above = qualityDict[ABSOLUTE][ABOVE]
# else:
# above = float('inf')
# if BELOW in qualityDict[ABSOLUTE]:
# below = qualityDict[ABSOLUTE][BELOW]
# else:
# below = float('-inf')
# if UNDER in qualityDict[ABSOLUTE] and \
# qualityDict[ABSOLUTE][UNDER]:
# if above < referenceValue < below:
# return True
# else:
# if not below <= referenceValue <= above:
# return True
# return False
# def __checkRelativeRange(self, qualityDict, buffer, referenceValue):
# '''Check if the a value is with in any of the configured relative
# ranges for the specific configuration with in an attribute.
# '''
# # self.warn_stream("DEPRECATED: __checkRelativeRange()")
# if RELATIVE in qualityDict and isintance(buffer, CircularBuffer):
# if buffer.std >= qualityDict[RELATIVE]:
# return True
# return False
def _getAttrStruct(self, attrName):
'''Given an attribute name, return the internal structure that
defines its behaviour.
'''
try:
return self._plcAttrs[
self.__getDctCaselessKey(attrName, self._plcAttrs)]
except ValueError as e:
pass # simply was not in the plcAttrs
try:
return self._internalAttrs[
self.__getDctCaselessKey(attrName, self._internalAttrs)]
except ValueError as e:
pass # simply was not in the internalAttrs
if attrName.count('_'):
mainName, suffix = attrName.rsplit('_', 1)
try:
return self._internalAttrs[
self.__getDctCaselessKey(mainName,
self._internalAttrs)]
except ValueError as e:
pass # simply was not in the internalAttrs
return None
def __getDctCaselessKey(self, key, dct):
position = [e.lower() for e in dct].index(key.lower())
return dct.keys()[position]
# def __solveFormula(self, attrName, VALUE, formula):
# '''Some attributes can have a formula to interpret or modify the
# value given from the PLC to the value reported by the device.
# '''
# self.warn_stream("DEPRECATED: __solveFormula(%s)" % (attrName))
# result = eval(formula)
# # self.debug_stream("%s formula eval(\"%s\") = %s" % (attrName,
# # formula,
# # result))
# return result
# def __setAttrValue(self, attr, attrName, attrType, attrValue,
# timestamp):
# '''
# '''
# self.warn_stream("DEPRECATED: __setAttrValue(%s)" % (attrName))
# attrStruct = self._getAttrStruct(attrName)
# self.__applyReadValue(attrName, attrValue, timestamp)
# if attrValue is None:
# attr.set_value_date_quality(0, timestamp,
# PyTango.AttrQuality.ATTR_INVALID)
# # if MEANINGS in attrStruct:
# # attrMeaning = self.__buildAttrMeaning(attrName, attrValue)
# # attrQuality = self.__buildAttrQuality(attrName, attrValue)
# # attr.set_value_date_quality(attrMeaning, timestamp,
# # attrQuality)
# elif QUALITIES in attrStruct:
# attrQuality = self.__buildAttrQuality(attrName, attrValue)
# attr.set_value_date_quality(attrValue, timestamp,
# attrQuality)
# else:
# attrQuality = PyTango.AttrQuality.ATTR_VALID
# attr.set_value_date_quality(attrValue, timestamp, attrQuality)
# if WRITEADDR in attrStruct:
# writeAddr = attrStruct[WRITEADDR]
# sp_addr = self.offset_sp + writeAddr
# if WRITEBIT in attrStruct:
# writeBit = attrStruct[WRITEBIT]
# writeValue = self.read_db.bit(sp_addr, writeBit)
# else:
# writeValue = self.read_db.get(sp_addr, *attrType)
# if FORMULA in attrStruct and \
# 'write' in attrStruct[FORMULA]:
# try:
# writeValue = self.\
# __solveFormula(attrName, writeValue,
# attrStruct[FORMULA]['write'])
# except Exception as e:
# self.error_stream("Cannot solve formula for the "
# "attribute %s: %s" % (attrName,
# e))
# # if attrStruct.formula is not None:
# # try:
# # writeValue = attrStruct.formula.writeHook(
# # writeValue)
# # except Exception as e:
# # self.error_stream("Cannot solve formula for the "
# # "attribute %s: %s" % (attrName,
# # e))
# if 'format' in attrStruct:
# try:
# format = attrStruct['format']
# if format.endswith("d"):
# writeValue = int(format % writeValue)
# else:
# writeValue = float(format % writeValue)
# except Exception as e:
# self.error_stream("Cannot format value for the "
# "attribute %s: %s"
# % (attrName, e))
# self.__applyWriteValue(attrName, writeValue)
# try:
# attr.set_write_value(writeValue)
# except PyTango.DevFailed as e:
# self.tainted = "%s/%s: failed to set point %s (%s)"\
# % (self.get_name(), attrName, writeValue, e)
# self.error_stream(self.tainted)
# elif WRITEVALUE in attrStruct:
# try:
# writeValue = attrStruct[WRITEVALUE]
# attr.set_write_value(writeValue)
# except PyTango.DevFailed:
# self.tainted = self.get_name() + '/'+attrName + \
# ': failed to set point '+str(writeValue)
# self.error_stream("On setAttrValue(%s,%s) tainted: %s"
# % (attrName, str(attrValue),
# self.tainted))
# except Exception as e:
# self.warn_stream("On setAttrValue(%s,%s) Exception: %s"
# % (attrName, str(attrValue), e))
# # self.__doTraceAttr(attrName, "__setAttrvalue")
# # Don't need to trace each time the attribute is read.
@AttrExc
def read_attr(self, attr):
'''
'''
if self.get_state() == PyTango.DevState.FAULT or \
not self.has_data_available():
return # raise AttributeError("Not available in fault state!")
name = attr.get_name()
attrStruct = self._getAttrStruct(name)
if any([isinstance(attrStruct, kls) for kls in [PLCAttr,
InternalAttr,
EnumerationAttr,
MeaningAttr,
HistoryAttr,
AutoStopAttr,
AutoStopParameter,
GroupAttr
]]):
attrStruct.read_attr(attr)
return
# self.warn_stream("DEPRECATED read_attr for %s" % (name))
# attrType = attrStruct[TYPE]
# read_addr = attrStruct[READADDR]
# if READBIT in attrStruct:
# read_bit = attrStruct[READBIT]
# else:
# read_bit = None
# try:
# if read_bit:
# read_value = self.read_db.bit(read_addr, read_bit)
# else:
# read_value = self.read_db.get(read_addr, *attrType)
# if FORMULA in attrStruct and \
# 'read' in attrStruct[FORMULA]:
# read_value = self.\
# __solveFormula(name, read_value,
# attrStruct[FORMULA]['read'])
# read_t = time.time()
# except Exception as e:
# self.error_stream('Trying to read %s/%s and looks to be not '
# 'well connected to the plc.'
# % (self.get_name(), attr.get_name()))
# self.debug_stream('Exception (%s/%s): %s'
# % (self.get_name(), attr.get_name(), e))
# traceback.print_exc()
# else:
# self.__setAttrValue(attr, name, attrType, read_value, read_t)
# @AttrExc
# def read_spectrumAttr(self, attr):
# '''This method is a generic read for dynamic spectrum attributes in
# this device. But right now only supports the historic buffers.
#
# The other spectrum attributes, related with the events
# generation are not using this because they have they own method.
# '''
# if self.get_state() == PyTango.DevState.FAULT or \
# not self.has_data_available():
# return # raise AttributeError("Not available in fault state!")
# name = attr.get_name()
# attrStruct = self._getAttrStruct(name)
# if any([isinstance(attrStruct, kls) for kls in [PLCAttr,
# InternalAttr,
# EnumerationAttr,
# MeaningAttr,
# AutoStopAttr,
# AutoStopParameter,
# GroupAttr
# ]]):
# attrStruct.read_spectrumAttr(attr)
# return
# self.warn_stream("DEPRECATED read_spectrumAttr for %s" % (name))
# if BASESET in attrStruct:
# attrValue = self.__buildHistoryBufferString(name)
# elif AUTOSTOP in attrStruct:
# attrValue = attrStruct[READVALUE].array
# attrTimeStamp = attrStruct[READTIME] or time.time()
# attrQuality = attrStruct[LASTEVENTQUALITY] or \
# PyTango.AttrQuality.ATTR_VALID
# self.debug_stream("Attribute %s: value=%s timestamp=%g quality=%s "
# "len=%d" % (name, attrValue, attrTimeStamp,
# attrQuality, len(attrValue)))
# attr.set_value_date_quality(attrValue, attrTimeStamp, attrQuality)
# def read_logical_attr(self, attr):
# '''
# '''
# self.warn_stream("DEPRECATED: read_logical_attr(%s)"
# % attr.get_name())
# if self.get_state() == PyTango.DevState.FAULT or \
# not self.has_data_available():
# return # raise AttributeError("Not available in fault state!")
# attrName = attr.get_name()
# if attrName in self._internalAttrs:
# ret = self._evalLogical(attrName)
# read_t = self._internalAttrs[attrName][READTIME]
# self.__setAttrValue(attr, attrName, PyTango.DevBoolean, ret,
# read_t)
# def _evalLogical(self, attrName):
# '''
# '''
# self.warn_stream("DEPRECATED: _evalLogical(%s)" % (attrName))
# if attrName not in self._internalAttrs:
# return
# attrStruct = self._internalAttrs[attrName]
# if attrStruct.logicObj is None:
# return
# logic = attrStruct.logicObj.logic
# values = []
# self.info_stream("Evaluate %s LogicAttr" % attrName)
# for key in logic.keys():
# try:
# if type(logic[key]) == dict:
# values.append(self.__evaluateDict(key, logic[key]))
# elif type(logic[key]) == list:
# values.append(self.__evaluateList(key, logic[key]))
# else:
# self.warn_stream("step less to evaluate %s for "
# "key %s unmanaged content type"
# % (attrName, key))
# except Exception as e:
# self.error_stream("cannot eval logic attr %s for key %s: "
# "%s" % (attrName, key, e))
# traceback.print_exc()
# if attrStruct.logicObj.operator == 'or':
# ret = any(values)
# elif attrStruct.logicObj.operator == 'and':
# ret = all(values)
# attrStruct.read_t = time.time()
# if attrStruct.logicObj.inverted:
# ret = not ret
# self.info_stream("For %s: values %s (%s) (inverted) answer %s"
# % (attrName, values, attrStruct.operator, ret))
# else:
# self.info_stream("For %s: values %s (%s) answer %s"
# % (attrName, values, attrStruct.operator, ret))
# attrStruct.read_value = ret
# return ret
# def __evaluateDict(self, attrName, dict2eval):
# """
# """
# self.warn_stream("DEPRECATED: __evaluateDict(%s)" % (attrName))
# self.info_stream("%s dict2eval: %s" % (attrName, dict2eval))
# for key in dict2eval.keys():
# if key == QUALITIES:
# return self.__evaluateQuality(attrName, dict2eval[key])
# def __evaluateList(self, attrName, list2eval):
# """
# """
# self.warn_stream("DEPRECATED: __evaluateList(%s)" % (attrName))
# self.info_stream("%s list2eval: %r" % (attrName, list2eval))
# value = self.__getAttrReadValue(attrName)
# self.info_stream("%s value: %r" % (attrName, value))
# return value in list2eval
# def __evaluateQuality(self, attrName, searchList):
# """
# """
# self.warn_stream("DEPRECATED: __evaluateQuality(%s)" % (attrName))
# attrStruct = self._getAttrStruct(attrName)
# if LASTEVENTQUALITY in attrStruct:
# quality = attrStruct[LASTEVENTQUALITY]
# return quality in searchList
return False
# FIXME: this method is merged with read_attr(), and once write
# versions become also merged, they will be not necessary
# anymore.
# @AttrExc
# def read_attr_bit(self, attr):
# '''
# '''
# if self.get_state() == PyTango.DevState.FAULT or \
# not self.has_data_available():
# return # raise AttributeError("Not available in fault state!")
# name = attr.get_name()
# attrType = PyTango.DevBoolean
# attrStruct = self._getAttrStruct(name)
# if any([isinstance(attrStruct, kls) for kls in [PLCAttr,
# InternalAttr,
# EnumerationAttr,
# MeaningAttr,
# AutoStopAttr,
# AutoStopParameter,
# GroupAttr
# ]]):
# attrStruct.read_attr(attr)
# return
# self.warn_stream("DEPRECATED read_attr_bit for %s" % (name))
# read_addr = attrStruct[READADDR]
# read_bit = attrStruct[READBIT]
# # if WRITEADDR in attrStruct:
# # write_addr = attrStruct[WRITEADDR]
# # write_bit = attrStruct[WRITEBIT]
# # else:
# # write_addr = None
# # write_bit = None
# try:
# if read_addr and read_bit:
# read_value = self.read_db.bit(read_addr, read_bit)
# if FORMULA in attrStruct and \
# 'read' in attrStruct[FORMULA]:
# read_value = self.\
# __solveFormula(name, read_value,
# attrStruct[FORMULA]['read'])
# read_t = time.time()
# else:
# read_value, read_t, _ = attrStruct.vtq
# attrType = attrStruct.type
# except Exception as e:
# self.error_stream('Trying to read %s/%s and looks to be not '
# 'well connected to the plc.'
# % (self.get_name(), attr.get_name()))
# self.debug_stream('Exception (%s/%s): %s'
# % (self.get_name(), attr.get_name(), e))
# else:
# self.__setAttrValue(attr, name, attrType, read_value, read_t)
# def read_attrGrpBit(self, attr):
# '''
# '''
# self.warn_stream("DEPRECATED: read_attrGrpBit(%s)"
# % (attr.get_name()))
# if self.get_state() == PyTango.DevState.FAULT or \
# not self.has_data_available():
# return # raise AttributeError("Not available in fault state!")
# attrName = attr.get_name()
# if attrName in self._internalAttrs:
# attrStruct = self._getAttrStruct(attrName)
# if 'read_set' in attrStruct:
# read_value = self.__getGrpBitValue(attrName,
# attrStruct['read_set'],
# self.read_db)
# read_t = time.time()
# if 'write_set' in attrStruct:
# write_set = attrStruct['write_set']
# write_value = self.__getGrpBitValue(attrName,
# write_set,
# self.write_db)
# self.__applyWriteValue(attrName,
# attrStruct[WRITEVALUE])
# self.__setAttrValue(attr, attrName, PyTango.DevBoolean,
# read_value, read_t)
# def __getGrpBitValue(self, attrName, addrSet, memSegment):
# '''
# '''
# self.warn_stream("DEPRECATED: __getGrpBitValue(%s)" % (attrName))
# try:
# bitSet = []
# for addr, bit in addrSet:
# bitSet.append(memSegment.bit(addr, bit))
# if all(bitSet):
# return True
# except Exception as e:
# self.error_stream("Cannot get the bit group for %s [%s]: %s\n"
# % (attrName, str(addrSet), e,
# str(self._internalAttrs[attrName])))
# return False
def read_lock(self):
'''
'''
if self.get_state() == PyTango.DevState.FAULT or \
not self.has_data_available():
return # raise AttributeError("Not available in fault state!")
rbyte = self.read_db.b(self.locking_raddr)
locker = bool(rbyte & (1 << self.locking_rbit))
return locker
@AttrExc
def read_Locking(self, attr):
'''The read of this attribute is a boolean to represent if the
control of the plc has been take by tango. This doesn't look
to correspond exactly with the same meaning of the "Local Lock"
boolean in the memory map of the plc'''
if self.get_state() == PyTango.DevState.FAULT or \
not self.has_data_available():
return # raise AttributeError("Not available in fault state!")
self._checkLocking()
attrName = attr.get_name()
value, timestamp, quality = self._plcAttrs[attrName].vtq
attr.set_value_date_quality(value, timestamp, quality)
@AttrExc
def read_Lock_ST(self, attr):
'''
'''
if self.get_state() == PyTango.DevState.FAULT or \
not self.has_data_available():
return # raise AttributeError("Not available in fault state!")
attrName = attr.get_name()
self.info_stream('DEPRECATED: reading %s' % (attrName))
value, timestamp, quality = self._plcAttrs[attrName].vtq
attr.set_value_date_quality(value, timestamp, quality)
def _checkLocking(self):
if self._isLocalLocked() or self._isRemoteLocked():
self._lockingChange(True)
else:
self._lockingChange(False)
def _isLocalLocked(self):
return self._deviceIsInLocal and \
self._plcAttrs['Lock_ST'].rvalue == 1
def _isRemoteLocked(self):
return self._deviceIsInRemote and \
self._plcAttrs['Lock_ST'].rvalue == 2
def _lockingChange(self, newLockValue):
if self.is_lockedByTango != newLockValue:
if 'Locking' in self._plcAttrs:
self._plcAttrs['Locking'].read_value = newLockValue
self.is_lockedByTango = newLockValue
# @AttrExc
# def read_internal_attr(self, attr):
# '''this is referencing to a device attribute that doesn't
# have plc representation
# '''
# self.warn_stream("DEPRECATED: read_internal_attr(%s)"
# % (attr.get_name()))
# if self.get_state() == PyTango.DevState.FAULT or \
# not self.has_data_available():
# return # raise AttributeError("Not available in fault state!")
# try:
# attrName = attr.get_name()
# if attrName in self._internalAttrs:
# attrStruct = self._getAttrStruct(attrName)
# if READVALUE in attrStruct:
# read_value = attrStruct[READVALUE]
# if read_value is None:
# attr.set_value_date_quality(0, time.time(),
# PyTango.AttrQuality.
# ATTR_INVALID)
# else:
# attr.set_value(read_value)
# else:
# attr.set_value_date_quality(0, time.time(),
# PyTango.AttrQuality.
# ATTR_INVALID)
# if WRITEVALUE in attrStruct:
# write_value = attrStruct[WRITEVALUE]
# attr.set_write_value(write_value)
# except Exception as e:
# self.error_stream("read_internal_attr(%s) Exception %s"
# % (attr.get_name(), e))
# # Read Attr method for dynattrs ---
####
# Write Attr method for dynattrs ---
def prepare_write(self, attr):
'''
'''
self.warn_stream(": prepare_write(%s)"
% (attr.get_name()))
data = []
self.Locking.get_write_value(data)
val = data[0]
if attr.get_name().lower() in ['locking']:
self.debug_stream("Do not do the write checks, when what is "
"wanted is to write the locker")
# FIXME: perhaps check if it is already lock by another program
elif not self.read_lock():
try:
exceptionMsg = 'first required to set Locking flag on '\
'%s device' % self.get_name()
except Exception as e:
self.error_stream("Exception in prepare_write(): %s" % (e))
else:
raise LinacException(exceptionMsg)
if self.tainted:
raise LinacException('mismatch with '
'specification:\n'+self.tainted)
data = []
attr.get_write_value(data)
return data[0]
@AttrExc
def write_attr(self, attr):
'''
'''
if self.get_state() == PyTango.DevState.FAULT or \
not self.has_data_available():
return # raise AttributeError("Not available in fault state!")
name = attr.get_name()
attrStruct = self._getAttrStruct(name)
if any([isinstance(attrStruct, kls) for kls in [PLCAttr,
InternalAttr,
EnumerationAttr,
MeaningAttr,
AutoStopAttr,
AutoStopParameter,
GroupAttr
]]):
attrStruct.write_attr(attr)
return
# self.warn_stream("DEPRECATED write_attr for %s" % (name))
# attrType = attrStruct[TYPE]
# write_addr = attrStruct[WRITEADDR]
# write_value = self.prepare_write(attr)
# if FORMULA in attrStruct and 'write' in attrStruct[FORMULA]:
# write_value = self.__solveFormula(name, write_value,
# attrStruct[FORMULA]['write'])
# attrStruct[WRITEVALUE] = write_value
# # self.__doTraceAttr(name, "write_attr")
# self.write_db.write(write_addr, write_value, attrType)
# @AttrExc
# def write_attr_bit(self, attr):
# '''
# '''
# if self.get_state() == PyTango.DevState.FAULT or \
# not self.has_data_available():
# return # raise AttributeError("Not available in fault state!")
# name = attr.get_name()
# write_value = self.prepare_write(attr)
# self.doWriteAttrBit(attr, name, write_value)
# # self.__doTraceAttr(name, "write_attr_bit")
# def doWriteAttrBit(self, attr, name, write_value):
# attrStruct = self._getAttrStruct(name)
# if any([isinstance(attrStruct, kls) for kls in [PLCAttr,
# InternalAttr,
# EnumerationAttr,
# MeaningAttr,
# AutoStopAttr,
# AutoStopParameter,
# GroupAttr
# ]]):
# attrStruct.write_attr(attr)
# return
# self.warn_stream("DEPRECATED write_attr_bit for %s" % (name))
# read_addr = attrStruct[READADDR]
# write_addr = attrStruct[WRITEADDR]
# write_bit = attrStruct[WRITEBIT]
# if FORMULA in attrStruct and 'write' in attrStruct[FORMULA]:
# formula_value = self.\
# __solveFormula(name, write_value,
# attrStruct[FORMULA]['write'])
# self.info_stream("%s received %s formula eval(\"%s\") = %s"
# % (name, write_value,
# attrStruct[FORMULA]['write'],
# formula_value))
# if formula_value != write_value and \
# 'write_not_allowed' in attrStruct[FORMULA]:
# reason = "Write %s not allowed" % write_value
# description = attrStruct[FORMULA]['write_not_allowed']
# PyTango.Except.throw_exception(reason,
# description,
# name,
# PyTango.ErrSeverity.WARN)
# else:
# write_value = formula_value
# if SWITCHDESCRIPTOR in attrStruct:
# # For the switch with autostop, when transition to power on, is
# # necessary to clean the old collected information or it will
# # produce an influence on the conditions.
# descriptor = attrStruct[SWITCHDESCRIPTOR]
# if AUTOSTOP in descriptor:
# # if self.__stateTransitionToOn(write_value,descriptor) \
# # and descriptor.has_key(AUTOSTOP):
# self.__cleanAutoStopCollection(
# attrStruct[SWITCHDESCRIPTOR][AUTOSTOP])
# # #Depending to the on or off transition keys, this will launch
# # #a thread who will modify the ATTR2RAMP, and when that
# # #finishes the write will be set.
# # self.info_stream("attribute %s has receive a write %s"
# # %(name,write_value))
# # if self.__stateTransitionNeeded(write_value,name):
# # #attrStruct[SWITCHDESCRIPTOR]):
# # self.info_stream("doing state transition for %s"%(name))
# # attrStruct[SWITCHDEST] = write_value
# # self.createSwitchStateThread(name)
# # return
# # The returns are necessary to avoid the write that is set
# # later on this method. But in the final else case it has to
# # continue.
# self.__writeBit(name, read_addr, write_addr, write_bit,
# write_value)
# attrStruct[WRITEVALUE] = write_value
# self.info_stream("Received write %s (%s)" % (name,
# write_value))
# if self.__isRstAttr(name) and write_value:
# attrStruct[RESETTIME] = time.time()
# def __cleanAutoStopCollection(self, attrName):
# '''This will clean the buffer with autostop condition collected
# data and also the triggered boolean if it was raised.
# '''
# self.warn_stream("DEPRECATED: __cleanAutoStopCollection(%s)"
# % (attrName))
# attrStruct = self._getAttrStruct(attrName)
# if READVALUE in attrStruct and len(attrStruct[READVALUE]) != 0:
# self.info_stream("Clean up the buffer because collected data "
# "doesn't have sense having the swithc off.")
# attrStruct[READVALUE].resetBuffer()
# self._cleanTriggeredFlag(attrName)
# def __writeBit(self, name, read_addr, write_addr, write_bit,
# write_value, dry=False):
# '''
# '''
# rbyte = self.read_db.b(read_addr)
# attrStruct = self._getAttrStruct(name)
# if write_value:
# # sets bit 'bitno' of b
# toWrite = rbyte | (int(1) << write_bit)
# # a byte of 0s with a unique 1 in the place to set this 1
# else:
# # clears bit 'bitno' of b
# toWrite = rbyte & ((0xFF) ^ (1 << write_bit))
# # a byte of 1s with a unique 0 in the place to set this 0
# if not dry:
# self.write_db.write(write_addr, toWrite,
# TYPE_MAP[PyTango.DevUChar])
# reRead = self.read_db.b(read_addr)
# self.debug_stream("Writing %s boolean to %6s (%d.%d) byte was "
# "%s; write %s; now %s"
# % (name, write_value, write_addr, write_bit,
# bin(rbyte), bin(toWrite), bin(reRead)))
# def write_attrGrpBit(self, attr):
# '''
# '''
# self.warn_stream("DEPRECATED: write_attrGrpBit(%s)"
# % (attr.get_name()))
# if self.get_state() == PyTango.DevState.FAULT or \
# not self.has_data_available():
# return # raise AttributeError("Not available in fault state!")
# attrName = attr.get_name()
# if attrName in self._internalAttrs:
# attrDescr = self._internalAttrs[attrName]
# if 'write_set' in attrDescr:
# writeValue = self.prepare_write(attr)
# self.__setGrpBitValue(attrDescr['write_set'],
# self.write_db, writeValue)
# def __setGrpBitValue(self, addrSet, memSegment, value):
# '''
# '''
# # self.warn_stream("DEPRECATED: __setGrpBitValue()")
# try:
# for addr, bit in addrSet:
# rbyte = self.read_db.b(self.offset_sp+addr)
# if value:
# toWrite = rbyte | (int(value) << bit)
# else:
# toWrite = rbyte & (0xFF) ^ (1 << bit)
# memSegment.write(addr, toWrite, TYPE_MAP[PyTango.DevUChar])
# reRead = self.read_db.b(self.offset_sp+addr)
# self.debug_stream("Writing boolean to %6s (%d.%d) byte "
# "was %s; write %s; now %s"
# % (value, addr, bit, bin(rbyte),
# bin(toWrite), bin(reRead)))
# except Exception as e:
# self.error_stream("Cannot set the bit group: %s" % (e))
# @AttrExc
# def write_Locking(self, attr):
# '''
# '''
# if self.get_state() == PyTango.DevState.FAULT or \
# not self.has_data_available():
# return # raise AttributeError("Not available in fault state!")
# try:
# self.write_lock(attr.get_write_value())
# except:
# self.error_stream('Trying to write %s/%s and looks to be not '
# 'well connected to the plc.'
# % (self.get_name(), attr.get_name()))
# def check_lock(self):
# '''Drops lock if write_value is True, but did not receive
# lock_state if re
# '''
# pass
# # autostop area ---
# def _refreshInternalAutostopParams(self, attrName):
# '''There are auxiliar attibutes with the autostop conditions and
# when their values change them have to be introduced in the
# structure of the main attribute with the buffer, who will use it
# to take the decission.
# This includes the resizing task of the CircularBuffer.
# '''
# # FIXME: use the spectrum attribute and left the circular buffer
# # as it was to avoid side effects on relative events.
# if attrName not in self._internalAttrs:
# return
# attrStruct = self._internalAttrs[attrName]
# if AUTOSTOP not in attrStruct:
# return
# stopperDict = attrStruct[AUTOSTOP]
# if 'is'+ENABLE in stopperDict:
# refAttr = self._getAttrStruct(stopperDict['is'+ENABLE])
# refAttr[AUTOSTOP][ENABLE] = attrStruct[READVALUE]
# if 'is'+INTEGRATIONTIME in stopperDict:
# refAttr = self._getAttrStruct(stopperDict['is' +
# INTEGRATIONTIME])
# refAttr[AUTOSTOP][INTEGRATIONTIME] = attrStruct[READVALUE]
# # resize the CircularBuffer
# # time per sample int(INTEGRATIONTIME/self._plcUpdatePeriod)
# newBufferSize = \
# int(attrStruct[READVALUE]/self._getPlcUpdatePeriod())
# if refAttr[READVALUE].maxSize() != newBufferSize:
# self.info_stream("%s buffer to be resized from %d to %d "
# "(integration time %f seconds with a "
# "plc reading period of %f seconds)"
# % (attrName, refAttr[READVALUE].maxSize(),
# newBufferSize, attrStruct[READVALUE],
# self._plcUpdatePeriod))
# refAttr[READVALUE].resize(newBufferSize)
# else:
# for condition in [BELOW, ABOVE]:
# if 'is'+condition+THRESHOLD in stopperDict:
# key = 'is'+condition+THRESHOLD
# refAttr = self._getAttrStruct(stopperDict[key])
# refAttr[AUTOSTOP][condition] = attrStruct[READVALUE]
def _getPlcUpdatePeriod(self):
return self._plcUpdatePeriod
def _setPlcUpdatePeriod(self, value):
self.info_stream("modifying PLC Update period: was %.3f and now "
"becomes %.3f." % (self._plcUpdatePeriod, value))
self._plcUpdatePeriod = value
# FIXME: this is hardcoding!!
# self._refreshInternalAutostopParams('GUN_HV_I_AutoStop')
# def _updateStatistic(self, attrName):
# if attrName not in self._internalAttrs:
# return
# attrStruct = self._internalAttrs[attrName]
# if MEAN in attrStruct:
# refAttr = attrStruct[MEAN]
# if refAttr not in self._plcAttrs:
# return
# attrStruct[READVALUE] = self._plcAttrs[refAttr][READVALUE].mean
# elif STD in attrStruct:
# refAttr = attrStruct[STD]
# if refAttr not in self._plcAttrs:
# return
# attrStruct[READVALUE] = self._plcAttrs[refAttr][READVALUE].std
# def _cleanTriggeredFlag(self, attrName):
# triggerName = "%s_%s" % (attrName, TRIGGERED)
# if triggerName not in self._internalAttrs:
# return
# if self._internalAttrs[triggerName][TRIGGERED]:
# # if it's powered off and it was triggered, then this
# # power off would be because autostop has acted.
# # Is needed to clean the flag.
# self.info_stream("Clean the autostop triggered flag "
# "for %s" % (attrName))
# self._internalAttrs[triggerName][TRIGGERED] = False
# def _checkAutoStopConditions(self, attrName):
# '''The attribute with the Circular buffer has to do some checks
# to decide if it's necessary to proceed with the autostop
# procedure.
# '''
# if attrName not in self._plcAttrs:
# return
# attrStruct = self._plcAttrs[attrName]
# if AUTOSTOP not in attrStruct:
# return
# if ENABLE not in attrStruct[AUTOSTOP] or \
# not attrStruct[AUTOSTOP][ENABLE]:
# return
# if SWITCHDESCRIPTOR in attrStruct[AUTOSTOP]:
# switchStruct = \
# self._getAttrStruct(attrStruct[AUTOSTOP][SWITCHDESCRIPTOR])
# if READVALUE in switchStruct and \
# not switchStruct[READVALUE]:
# return
# if len(attrStruct[READVALUE]) < attrStruct[READVALUE].maxSize():
# return
# if SWITCHDESCRIPTOR in attrStruct[AUTOSTOP]:
# switchStruct = \
# self._getAttrStruct(attrStruct[AUTOSTOP][SWITCHDESCRIPTOR])
# if switchStruct is None or READVALUE not in switchStruct:
# return
# if SWITCHDEST in switchStruct:
# if switchStruct[SWITCHDEST]:
# return
# elif not switchStruct[READVALUE]:
# return
# for condition in [BELOW, ABOVE]:
# if condition in attrStruct[AUTOSTOP]:
# refValue = attrStruct[AUTOSTOP][condition]
# meanValue = attrStruct[READVALUE].mean
# # BELOW and ABOVE is compared with mean
# if condition == BELOW and refValue > meanValue:
# self.info_stream("Attribute %s stop condition "
# "%s is met ref=%g > mean=%g"
# % (attrName, condition,
# refValue, meanValue))
# self._doAutostop(attrName, condition)
# elif condition == ABOVE and refValue < meanValue:
# self.info_stream("Attribute %s stop condition "
# "%s is met ref=%g < mean=%g"
# % (attrName, condition,
# refValue, meanValue))
# self._doAutostop(attrName, condition)
# def _doAutostop(self, attrName, condition):
# attrStruct = self._plcAttrs[attrName]
# refValue = attrStruct[AUTOSTOP][condition]
# meanValue, stdValue = attrStruct[READVALUE].meanAndStd
# self.doWriteAttrBit(attrStruct[AUTOSTOP][SWITCHDESCRIPTOR], False)
# triggerStruct = self._internalAttrs["%s_%s"
# % (attrName, TRIGGERED)]
# self.warn_stream("Flag the autostop trigger for attribute %s"
# % (attrName))
# triggerStruct[TRIGGERED] = True
# done autostop area ---
def __isHistoryBuffer(self, attrName):
attrStruct = self._getAttrStruct(attrName)
if attrStruct is not None and BASESET in attrStruct and \
type(attrStruct[READVALUE]) == HistoryBuffer:
return True
return False
# def __buildHistoryBufferString(self, attrName):
# if self.__isHistoryBuffer(attrName):
# valuesList = self._getAttrStruct(attrName)[READVALUE].array
# self.debug_stream("For %s, building string list from %s"
# % (attrName, valuesList))
# strList = []
# for value in valuesList:
# strList.append(self.__buildAttrMeaning(attrName, value))
# return strList
# return None
# @AttrExc
# def write_internal_attr(self, attr):
# '''this is referencing to a device attribute that doesn't
# have plc representation'''
# self.warn_stream("DEPRECATED: write_internal_attr(%s)"
# % (attr.get_name()))
# if self.get_state() == PyTango.DevState.FAULT or \
# not self.has_data_available():
# return # raise AttributeError("Not available in fault state!")
# attrName = attr.get_name()
# self.info_stream('write_internal_attr(%s)' % (attrName))
#
# data = []
# attr.get_write_value(data)
# # FIXME: some cases must not allow values <= 0 ---
# if attrName in self._internalAttrs:
# attrDescr = self._internalAttrs[attrName]
# if WRITEVALUE in attrDescr:
# attrDescr[WRITEVALUE] = data[0]
# if attrDescr[TYPE] in [PyTango.DevDouble,
# PyTango.DevFloat]:
# attrValue = float(data[0])
# elif attrDescr[TYPE] in [PyTango.DevBoolean]:
# attrValue = bool(data[0])
# attrDescr[READVALUE] = attrValue
# attrQuality = self.\
# __buildAttrQuality(attrName, attrDescr[READVALUE])
# attrDescr.store(attrDescr[WRITEVALUE])
# if EVENTS in attrDescr:
# self.fireEventsList([[attrName, attrValue,
# attrQuality]], log=True)
def loadAttrFile(self):
self.attr_loaded = True
if self.AttrFile:
attr_fname = self.AttrFile
else:
attr_fname = self.get_name().split('/')[-1]+'.py'
try:
self.attr_list.build(attr_fname.lower())
except Exception as e:
if self.get_state() != PyTango.DevState.FAULT:
self.set_state(PyTango.DevState.FAULT)
self.set_status("ReloadAttrFile() failed (%s)" % (e),
important=True)
@AttrExc
def read_lastUpdateStatus(self, attr):
'''
'''
if self.get_state() == PyTango.DevState.FAULT or \
not self.has_data_available():
return # raise AttributeError("Not available in fault state!")
attr.set_value(self.read_lastUpdateStatus_attr)
@AttrExc
def read_lastUpdate(self, attr):
'''
'''
if self.get_state() == PyTango.DevState.FAULT or \
not self.has_data_available():
return # raise AttributeError("Not available in fault state!")
attr.set_value(self.read_lastUpdate_attr)
# Done Write Attr method for dynattrs ---
# PROTECTED REGION END --- LinacData.global_variables
def __init__(self, cl, name):
PyTango.Device_4Impl.__init__(self, cl, name)
self.log = self.get_logger()
LinacData.init_device(self)
def delete_device(self):
self.info_stream('deleting device '+self.get_name())
self._plcUpdateJoiner.set()
self._tangoEventsJoiner.set()
self._newDataAvailable.set()
self.attr_list.remove_all()
def init_device(self):
try:
self.debug_stream("In "+self.get_name()+"::init_device()")
self.set_change_event('State', True, False)
self.set_change_event('Status', True, False)
self.attr_IsSayAgainEnable_read = False
self.attr_IsTooFarEnable_read = True
self.attr_forceWriteDB_read = ""
self.attr_cpu_percent_read = 0.0
self.attr_mem_percent_read = 0.0
self.attr_mem_rss_read = 0
self.attr_mem_swap_read = 0
# The attributes Locking, Lock_ST, and HeartBeat have also
# events but this call is made in each of the AttrList method
# who dynamically build them.
self.set_state(PyTango.DevState.INIT)
self.set_status('inititalizing...')
self.get_device_properties(self.get_device_class())
self.debug_stream('AttrFile='+str(self.AttrFile))
self._locals = {'self': self}
self._globals = globals()
# String with human infomation about the last update
self.read_lastUpdateStatus_attr = ""
attr = PyTango.Attr('lastUpdateStatus',
PyTango.DevString, PyTango.READ)
attrProp = PyTango.UserDefaultAttrProp()
attrProp.set_label('Last Update Status')
attr.set_default_properties(attrProp)
self.add_attribute(attr, r_meth=self.read_lastUpdateStatus)
self.set_change_event('lastUpdateStatus', True, False)
# numeric attr about the lapsed time of the last update
self.read_lastUpdate_attr = None
attr = PyTango.Attr('lastUpdate',
PyTango.DevDouble, PyTango.READ)
attrProp = PyTango.UserDefaultAttrProp()
attrProp.set_format(latin1('%f'))
attrProp.set_label('Last Update')
attrProp.set_unit('s')
attr.set_default_properties(attrProp)
self.add_attribute(attr, r_meth=self.read_lastUpdate)
self.set_change_event('lastUpdate', True, False)
self._process = psutil.Process()
self.attr_list = AttrList(self)
########
# region to setup the network communication parameters
# restrictions and rename of PLC's ip address
if self.IpAddress == '' and self.PlcAddress == '':
self.error_stream("The PLC ip address must be set")
self.set_state(PyTango.DevState.FAULT)
self.set_status("Please set the PlcAddress property",
important=True)
return
elif not self.IpAddress == '' and self.PlcAddress == '':
self.warn_stream("Deprecated property IpAddress, "
"please use PlcAddress")
self.PlcAddress = self.IpAddress
elif not self.IpAddress == '' and not self.PlcAddress == '' \
and not self.IpAddress == self.PlcAddress:
self.warn_stream("Both PlcAddress and IpAddress "
"properties are defined and with "
"different values, prevail PlcAddress")
# get the ip address of the host where the device is running
# this to know if the device is running in local or remote
thisHostIp = get_ip()
if not thisHostIp == self.BindAddress:
if not self.BindAddress == '':
self.warn_stream("BindAddress property defined but "
"deprecated and it doesn't match "
"with the host where device runs. "
"Overwrite BindAddress with '%s'"
% thisHostIp)
else:
self.debug_stream("BindAddress of this host '%s'"
% (thisHostIp))
self.BindAddress = thisHostIp
# check if the port corresponds to local and remote modes
if thisHostIp == self.LocalAddress:
self.info_stream('Connection to the PLC will be '
'local mode')
self.set_status('Connection in local mode', important=True)
self._deviceIsInLocal = True
self._deviceIsInRemote = False
try:
if self.LocalPort is not None:
self.info_stream('Using specified local port %s'
% (self.LocalPort))
self.Port = self.LocalPort
else:
self.warn_stream('Local port not specified, '
'trying to use deprecated '
'definition')
if self.Port > 2010:
self.Port -= 10
self.warn_stream('converted the port to local'
' %s' % self.Port)
except:
self.error_stream('Error in the port setting')
elif thisHostIp == self.RemoteAddress:
self.info_stream('Connection to the PLC with be '
'remote mode')
self.set_status('Connection in remote mode',
important=True)
self._deviceIsInLocal = False
self._deviceIsInRemote = True
try:
if self.RemotePort is not None:
self.info_stream('Using specified remote port %s'
% (self.RemotePort))
self.Port = self.RemotePort
else:
self.warn_stream('Remote port not specified, '
'trying to use deprecated '
'definition')
if self.Port < 2010:
self.Port += 10
self.warn_stream('converted the port to '
'remote %s'
% (self.RemotePort))
except:
self.error_stream('Error in the port setting')
else:
self.warn_stream('Unrecognized IP for local/remote '
'modes (%s)' % thisHostIp)
self.set_status('Unrecognized connection for local/remote'
' mode', important=True)
self._deviceIsInLocal = False
self._deviceIsInRemote = False
# restrictions and renames of the Port's properties
if self.Port is None:
self.debug_stream("The PLC ip port must be set")
self.set_state(PyTango.DevState.FAULT)
self.set_status("Please set the plc ip port",
important=True)
return
# end the region to setup the network communication parameters
########
if self.ReadSize <= 0 or self.WriteSize <= 0:
self.set_state(PyTango.DevState.FAULT)
self.set_status("Block Read/Write sizes not well "
"set (r=%d,w=%d)" % (self.ReadSize,
self.WriteSize),
important=True)
return
# true when reading some attribute failed....
self.tainted = ''
# where the readback of the set points begins
self.offset_sp = self.ReadSize-self.WriteSize
self.attr_loaded = False
self.last_update_time = time.time()
try:
self.connect()
except Exception:
traceback.print_exc()
self.disconnect()
self.set_state(PyTango.DevState.UNKNOWN)
self.info_stream('initialized')
# self.set_state(PyTango.DevState.UNKNOWN)
self._threadingBuilder()
except Exception:
self.error_stream('initialization failed')
self.debug_stream(traceback.format_exc())
self.set_state(PyTango.DevState.FAULT)
self.set_status(traceback.format_exc())
# --------------------------------------------------------------------
# LinacData read/write attribute methods
# --------------------------------------------------------------------
# PROTECTED REGION ID(LinacData.initialize_dynamic_attributes) ---
def initialize_dynamic_attributes(self):
self.loadAttrFile()
self.attr_list._fileParsed.wait()
self.info_stream("with all the attributes build, proceed...")
# PROTECTED REGION END --- LinacData.initialize_dynamic_attributes
# ------------------------------------------------------------------
# Read EventsTime attribute
# ------------------------------------------------------------------
def read_EventsTime(self, attr):
# self.debug_stream("In " + self.get_name() + ".read_EventsTime()")
# PROTECTED REGION ID(LinacData.EventsTime_read) --
self.attr_EventsTime_read = self._tangoEventsTime.array
# PROTECTED REGION END --- LinacData.EventsTime_read
attr.set_value(self.attr_EventsTime_read)
# ------------------------------------------------------------------
# Read EventsTimeMix attribute
# ------------------------------------------------------------------
def read_EventsTimeMin(self, attr):
# self.debug_stream("In " + self.get_name() +
# ".read_EventsTimeMin()")
# PROTECTED REGION ID(LinacData.EventsTimeMin_read) --
self.attr_EventsTimeMin_read = self._tangoEventsTime.array.min()
if self._tangoEventsTime.array.size < HISTORY_EVENT_BUFFER:
attr.set_value_date_quality(self.attr_EventsTimeMin_read,
time.time(),
PyTango.AttrQuality.ATTR_CHANGING)
return
# PROTECTED REGION END --- LinacData.EventsTimeMin_read
attr.set_value(self.attr_EventsTimeMin_read)
# ------------------------------------------------------------------
# Read EventsTimeMax attribute
# ------------------------------------------------------------------
def read_EventsTimeMax(self, attr):
# self.debug_stream("In " + self.get_name() +
# ".read_EventsTimeMax()")
# PROTECTED REGION ID(LinacData.EventsTimeMax_read) --
self.attr_EventsTimeMax_read = self._tangoEventsTime.array.max()
if self._tangoEventsTime.array.size < HISTORY_EVENT_BUFFER:
attr.set_value_date_quality(self.attr_EventsTimeMax_read,
time.time(),
PyTango.AttrQuality.ATTR_CHANGING)
return
elif self.attr_EventsTimeMax_read >= self._getPlcUpdatePeriod()*3:
attr.set_value_date_quality(self.attr_EventsTimeMax_read,
time.time(),
PyTango.AttrQuality.ATTR_WARNING)
return
# PROTECTED REGION END --- LinacData.EventsTimeMax_read
attr.set_value(self.attr_EventsTimeMax_read)
# ------------------------------------------------------------------
# Read EventsTimeMean attribute
# ------------------------------------------------------------------
def read_EventsTimeMean(self, attr):
# self.debug_stream("In " + self.get_name() +
# ".read_EventsTimeMean()")
# PROTECTED REGION ID(LinacData.EventsTimeMean_read) --
self.attr_EventsTimeMean_read = self._tangoEventsTime.array.mean()
if self._tangoEventsTime.array.size < HISTORY_EVENT_BUFFER:
attr.set_value_date_quality(self.attr_EventsTimeMean_read,
time.time(),
PyTango.AttrQuality.ATTR_CHANGING)
return
elif self.attr_EventsTimeMean_read >= self._getPlcUpdatePeriod():
attr.set_value_date_quality(self.attr_EventsTimeMean_read,
time.time(),
PyTango.AttrQuality.ATTR_WARNING)
return
# PROTECTED REGION END --- LinacData.EventsTimeMean_read
attr.set_value(self.attr_EventsTimeMean_read)
# ------------------------------------------------------------------
# Read EventsTimeStd attribute
# ------------------------------------------------------------------
def read_EventsTimeStd(self, attr):
# self.debug_stream("In " + self.get_name() +
# ".read_EventsTimeStd()")
# PROTECTED REGION ID(LinacData.EventsTimeStd_read) --
self.attr_EventsTimeStd_read = self._tangoEventsTime.array.std()
if self._tangoEventsTime.array.size < HISTORY_EVENT_BUFFER:
attr.set_value_date_quality(self.attr_EventsTimeStd_read,
time.time(),
PyTango.AttrQuality.ATTR_CHANGING)
return
# PROTECTED REGION END --- LinacData.EventsTimeStd_read
attr.set_value(self.attr_EventsTimeStd_read)
# ------------------------------------------------------------------
# Read EventsNumber attribute
# ------------------------------------------------------------------
def read_EventsNumber(self, attr):
# self.debug_stream("In " + self.get_name() +
# ".read_EventsNumber()")
# PROTECTED REGION ID(LinacData.EventsNumber_read) ---
self.attr_EventsNumber_read = self._tangoEventsNumber.array
# PROTECTED REGION END --- LinacData.EventsNumber_read
attr.set_value(self.attr_EventsNumber_read)
# ------------------------------------------------------------------
# Read EventsNumberMin attribute
# ------------------------------------------------------------------
def read_EventsNumberMin(self, attr):
# self.debug_stream("In " + self.get_name() +
# ".read_EventsNumberMin()")
# PROTECTED REGION ID(LinacData.EventsNumberMin_read) ---
self.attr_EventsNumberMin_read = \
int(self._tangoEventsNumber.array.min())
if self._tangoEventsNumber.array.size < HISTORY_EVENT_BUFFER:
attr.set_value_date_quality(self.attr_EventsNumberMin_read,
time.time(),
PyTango.AttrQuality.ATTR_CHANGING)
return
# PROTECTED REGION END --- LinacData.EventsNumberMin_read
attr.set_value(self.attr_EventsNumberMin_read)
# ------------------------------------------------------------------
# Read EventsNumberMax attribute
# ------------------------------------------------------------------
def read_EventsNumberMax(self, attr):
# self.debug_stream("In " + self.get_name() +
# ".read_EventsNumberMax()")
# PROTECTED REGION ID(LinacData.EventsNumberMax_read) ---
self.attr_EventsNumberMax_read = \
int(self._tangoEventsNumber.array.max())
if self._tangoEventsNumber.array.size < HISTORY_EVENT_BUFFER:
attr.set_value_date_quality(self.attr_EventsNumberMax_read,
time.time(),
PyTango.AttrQuality.ATTR_CHANGING)
return
# PROTECTED REGION END --- LinacData.EventsNumberMax_read
attr.set_value(self.attr_EventsNumberMax_read)
# ------------------------------------------------------------------
# Read EventsNumberMean attribute
# ------------------------------------------------------------------
def read_EventsNumberMean(self, attr):
# self.debug_stream("In " + self.get_name() +
# ".read_EventsNumberMean()")
# PROTECTED REGION ID(LinacData.EventsNumberMean_read) ---
self.attr_EventsNumberMean_read = \
self._tangoEventsNumber.array.mean()
if self._tangoEventsNumber.array.size < HISTORY_EVENT_BUFFER:
attr.set_value_date_quality(self.attr_EventsNumberMean_read,
time.time(),
PyTango.AttrQuality.ATTR_CHANGING)
return
# PROTECTED REGION END --- LinacData.EventsNumberMean_read
attr.set_value(self.attr_EventsNumberMean_read)
# ------------------------------------------------------------------
# Read EventsNumberStd attribute
# ------------------------------------------------------------------
def read_EventsNumberStd(self, attr):
# self.debug_stream("In " + self.get_name() +
# ".read_EventsNumberStd()")
# PROTECTED REGION ID(LinacData.EventsNumberStd_read) ---
self.attr_EventsNumberStd_read = \
self._tangoEventsNumber.array.std()
if self._tangoEventsNumber.array.size < HISTORY_EVENT_BUFFER:
attr.set_value_date_quality(self.attr_EventsNumberStd_read,
time.time(),
PyTango.AttrQuality.ATTR_CHANGING)
return
# PROTECTED REGION END --- LinacData.EventsNumberStd_read
attr.set_value(self.attr_EventsNumberStd_read)
# ------------------------------------------------------------------
# Read IsTooFarEnable attribute
# ------------------------------------------------------------------
def read_IsTooFarEnable(self, attr):
self.debug_stream("In " + self.get_name() +
".read_IsTooFarEnable()")
# PROTECTED REGION ID(LinacData.IsTooFarEnable_read) ---
# PROTECTED REGION END --- LinacData.IsTooFarEnable_read
attr.set_value(self.attr_IsTooFarEnable_read)
# ------------------------------------------------------------------
# Write IsTooFarEnable attribute
# ------------------------------------------------------------------
def write_IsTooFarEnable(self, attr):
self.debug_stream("In " + self.get_name() +
".write_IsTooFarEnable()")
data = attr.get_write_value()
# PROTECTED REGION ID(LinacData.IsTooFarEnable_write) ---
self.attr_IsTooFarEnable_read = bool(data)
# PROTECTED REGION END -- LinacData.IsTooFarEnable_write
# ------------------------------------------------------------------
# Read forceWriteDB attribute
# ------------------------------------------------------------------
def read_forceWriteDB(self, attr):
self.debug_stream("In " + self.get_name() +
".read_forceWriteDB()")
# PROTECTED REGION ID(LinacData.forceWriteDB_read) ---
# PROTECTED REGION END --- LinacData.forceWriteDB_read
attr.set_value(self.attr_forceWriteDB_read)
# ------------------------------------------------------------------
# Read cpu_percent attribute
# ------------------------------------------------------------------
def read_cpu_percent(self, attr):
self.debug_stream("In " + self.get_name() +
".read_cpu_percent()")
# PROTECTED REGION ID(LinacData.cpu_percent_read) ---
self.attr_cpu_percent_read = self._process.cpu_percent()
# PROTECTED REGION END --- LinacData.cpu_percent_read
attr.set_value(self.attr_cpu_percent_read)
# ------------------------------------------------------------------
# Read mem_percent attribute
# ------------------------------------------------------------------
def read_mem_percent(self, attr):
self.debug_stream("In " + self.get_name() +
".read_mem_percent()")
# PROTECTED REGION ID(LinacData.mem_percent_read) ---
self.attr_mem_percent_read = self._process.memory_percent()
# PROTECTED REGION END --- LinacData.mem_percent_read
attr.set_value(self.attr_mem_percent_read)
# ------------------------------------------------------------------
# Read mem_rss attribute
# ------------------------------------------------------------------
def read_mem_rss(self, attr):
self.debug_stream("In " + self.get_name() +
".read_mem_rss()")
# PROTECTED REGION ID(LinacData.mem_rss_read) ---
self.attr_mem_rss_read = self._process.memory_info().rss
# PROTECTED REGION END --- LinacData.mem_rss_read
attr.set_value(self.attr_mem_rss_read)
# ------------------------------------------------------------------
# Read mem_swap attribute
# ------------------------------------------------------------------
def read_mem_swap(self, attr):
self.debug_stream("In " + self.get_name() +
".read_mem_swap()")
# PROTECTED REGION ID(LinacData.mem_swap_read) ---
self.attr_mem_swap_read = self._process.memory_full_info().swap
# PROTECTED REGION END --- LinacData.mem_swap_read
attr.set_value(self.attr_mem_swap_read)
# ---------------------------------------------------------------------
# LinacData command methods
# ---------------------------------------------------------------------
@CommandExc
def ReloadAttrFile(self):
"""Reload the file containing the attr description for a
particular plc
:param argin:
:type: PyTango.DevVoid
:return:
:rtype: PyTango.DevVoid """
self.debug_stream('In ReloadAttrFile()')
# PROTECTED REGION ID(LinacData.ReloadAttrFile) ---
self.loadAttrFile()
# PROTECTED REGION END --- LinacData.ReloadAttrFile
@CommandExc
def Exec(self, cmd):
""" Direct command to execute python with in the device, use it
very carefully it's good for debuging but it's a security
thread.
:param argin:
:type: PyTango.DevString
:return:
:rtype: PyTango.DevString """
self.debug_stream('In Exec()')
# PROTECTED REGION ID(LinacData.Exec) ---
L = self._locals
G = self._globals
try:
try:
# interpretation as expression
result = eval(cmd, G, L)
except SyntaxError:
# interpretation as statement
exec cmd in G, L
result = L.get("y")
except Exception as exc:
# handles errors on both eval and exec level
result = exc
if type(result) == StringType:
return result
elif isinstance(result, BaseException):
return "%s!\n%s" % (result.__class__.__name__, str(result))
else:
return pprint.pformat(result)
# PROTECTED REGION END --- LinacData.Exec
@CommandExc
def GetBit(self, args):
""" Command to direct Read a bit position from the PLC memory
:param argin:
:type: PyTango.DevVarShortArray
:return:
:rtype: PyTango.DevBoolean """
self.debug_stream('In GetBit()')
# PROTECTED REGION ID(LinacData.GetBit) ---
idx, bitno = args
if self.read_db is not None and hasattr(self.read_db, 'bit'):
return self.read_db.bit(idx, bitno)
raise IOError("No access to the hardware")
# PROTECTED REGION END --- LinacData.GetBit
@CommandExc
def GetByte(self, idx):
"""Command to direct Read a byte position from the PLC memory
:param argin:
:type: PyTango.DevShort
:return:
:rtype: PyTango.DevShort """
self.debug_stream('In GetByte()')
# PROTECTED REGION ID(LinacData.GetByte) ---
if self.read_db is not None and hasattr(self.read_db, 'b'):
return self.read_db.b(idx)
raise IOError("No access to the hardware")
# PROTECTED REGION END --- LinacData.GetByte
@CommandExc
def GetShort(self, idx):
"""Command to direct Read two consecutive byte positions from the
PLC memory and understand it as an integer
:param argin:
:type: PyTango.DevShort
:return:
:rtype: PyTango.DevShort """
self.debug_stream('In GetShort()')
# PROTECTED REGION ID(LinacData.GetShort) ---
if self.read_db is not None and hasattr(self.read_db, 'i16'):
return self.read_db.i16(idx)
raise IOError("No access to the hardware")
# PROTECTED REGION END --- LinacBData.GetShort
@CommandExc
def GetFloat(self, idx):
""" Command to direct Read four consecutive byte positions from the
PLC memory and understand it as an float
:param argin:
:type: PyTango.DevShort
:return:
:rtype: PyTango.DevFloat """
self.debug_stream('In GetFloat()')
# PROTECTED REGION ID(LinacData.GetFloat) ---
if self.read_db is not None and hasattr(self.read_db, 'f'):
return self.read_db.f(idx)
raise IOError("No access to the hardware")
# PROTECTED REGION END --- LinacData.GetFloat
@CommandExc
def HexDump(self):
""" Hexadecimal dump of all the registers in the plc
:param argin:
:type: PyTango.DevVoid
:return:
:rtype: PyTango.DevString """
self.debug_stream('In HexDump()')
# PROTECTED REGION ID(LinacData.HexDump) ---
rblock = self.read_db.buf[:]
wblock = self.write_db.buf[self.write_db.write_start:]
return hex_dump([rblock, wblock])
# PROTECTED REGION END --- LinacData.HexDump
@CommandExc
def Hex(self, idx):
""" Hexadecimal dump the given register of the plc
:param argin:
:type: PyTango.DevShort
:return:
:rtype: PyTango.DevString """
self.debug_stream('In Hex()')
# PROTECTED REGION ID(LinacData.Hex) ---
return hex(self.read_db.b(idx))
# PROTECTED REGION END --- LinacData.Hex
@CommandExc
def DumpTo(self, arg):
""" Hexadecimal dump of all the registers in the plc to a file
:param argin:
:type: PyTango.DevString
:return:
:rtype: PyTango.DevVoid """
self.debug_stream('In DumpTo()')
# PROTECTED REGION ID(LinacData.DumpTo) ---
fout = open(arg, 'w')
fout.write(self.read_db.buf.tostring())
# PROTECTED REGION END --- LinacData.DumpTo
@CommandExc
def WriteBit(self, args):
""" Write a single bit in the memory of the plc [reg,bit,value]
:param argin:
:type: PyTango.DevVarShortArray
:return:
:rtype: PyTango.DevVoid """
self.debug_stream('In WriteBit()')
# PROTECTED REGION ID(LinacData.WriteBit) ---
idx, bitno, v = args
idx += bitno / 8
bitno %= 8
v = bool(v)
b = self.write_db.b(idx) # Get the byte where the bit is
b = b & ~(1 << bitno) | (v << bitno)
# change only the expected bit
# The write operation of a bit, writes the Byte where it is
self.write_db.write(idx, b, TYPE_MAP[PyTango.DevUChar])
# PROTECTED REGION END --- LinacData.WriteBit
@CommandExc
def WriteByte(self, args):
""" Write a byte in the memory of the plc [reg,value]
:param argin:
:type: PyTango.DevVarShortArray
:return:
:rtype: PyTango.DevVoid """
self.debug_stream('In WriteByte()')
# PROTECTED REGION ID(LinacData.WriteByte) ---
# args[1] = c_uint8(args[1])
register = args[0]
value = uint8(args[1])
# self.write_db.write( *args )
self.write_db.write(register, value, TYPE_MAP[PyTango.DevUChar])
# PROTECTED REGION END --- LinacData.WriteByte
@CommandExc
def WriteShort(self, args):
""" Write two consecutive bytes in the memory of the plc
[reg,value]
:param argin:
:type: PyTango.DevVarShortArray
:return:
:rtype: PyTango.DevVoid """
self.debug_stream('In WriteShort()')
# PROTECTED REGION ID(LinacData.WriteShort) ---
# args[1] = c_int16(args[1])
register = args[0]
value = int16(args[1])
# self.write_db.write( *args )
self.write_db.write(register, value, TYPE_MAP[PyTango.DevShort])
# PROTECTED REGION END --- LinacData.WriteShort
@CommandExc
def WriteFloat(self, args):
""" Write the representation of a float in four consecutive bytes
in the memory of the plc [reg,value]
:param argin:
:type: PyTango.DevVarShortArray
:return:
:rtype: PyTango.DevVoid """
self.debug_stream('In WriteFloat()')
# PROTECTED REGION ID(LinacData.WriteFloat) ---
idx = int(args[0])
f = float32(args[1])
self.write_db.write(idx, f, TYPE_MAP[PyTango.DevFloat])
# PROTECTED REGION END --- LinacData.WriteFloat
@CommandExc
def ResetState(self):
""" Clean the information set in the Status message and restore
the state
:param argin:
:type: PyTango.DevVoid
:return:
:rtype: PyTango.DevVoid """
self.debug_stream('In ResetState()')
# PROTECTED REGION ID(LinacData.ResetState) ---
self.info_stream('resetting state %s...' % str(self.get_state()))
if self.get_state() == PyTango.DevState.FAULT:
if self.disconnect():
self.set_state(PyTango.DevState.OFF) # self.connect()
elif self.is_connected():
self.set_state(PyTango.DevState.ON)
self.clean_status()
else:
self.set_state(PyTango.DevState.UNKNOWN)
self.set_status("")
# PROTECTED REGION END --- LinacData.ResetState
@CommandExc
def RestoreReadDB(self):
self.forceWriteAttrs()
# To be moved ---
def _threadingBuilder(self):
# Threading joiners ---
self._plcUpdateJoiner = threading.Event()
self._plcUpdateJoiner.clear()
self._tangoEventsJoiner = threading.Event()
self._tangoEventsJoiner.clear()
# Threads declaration ---
self._plcUpdateThread = \
threading.Thread(name="PlcUpdater",
target=self.plcUpdaterThread)
self._tangoEventsThread = \
threading.Thread(name="EventManager",
target=self.newValuesThread)
self._tangoEventsTime = \
CircularBuffer([], maxlen=HISTORY_EVENT_BUFFER, owner=None)
self._tangoEventsNumber = \
CircularBuffer([], maxlen=HISTORY_EVENT_BUFFER, owner=None)
# Threads configuration ---
self._plcUpdateThread.setDaemon(True)
self._tangoEventsThread.setDaemon(True)
self._plcUpdatePeriod = PLC_MAX_UPDATE_PERIOD
self._newDataAvailable = threading.Event()
self._newDataAvailable.clear()
# Launch those threads ---
self._plcUpdateThread.start()
self._tangoEventsThread.start()
def plcUpdaterThread(self):
'''
'''
while not self._plcUpdateJoiner.isSet():
try:
start_t = time.time()
if self.is_connected():
self._readPlcRegisters()
self._addaptPeriod(time.time()-start_t)
else:
if self._plcUpdateJoiner.isSet():
return
self.info_stream('plc not connected')
self.reconnect()
time.sleep(self.ReconnectWait)
except Exception as e:
self.error_stream("In plcUpdaterThread() "
"exception: %s" % (e))
traceback.print_exc()
def _readPlcRegisters(self):
""" Do a read of all the registers in the plc and update the
mirrored memory
:param argin:
:type: PyTango.DevVoid
:return:
:rtype: PyTango.DevVoid """
# faults are critical and can not be recovered by restarting things
# INIT states mean something is going is on that interferes with
# updating, such as connecting
start_update_time = time.time()
if (self.get_state() == PyTango.DevState.FAULT) or \
not self.is_connected():
if start_update_time - self.last_update_time \
< self.ReconnectWait:
return
else:
if self.connect():
self.set_state(PyTango.DevState.UNKNOWN)
return
# relock if auto-recover from fault ---
try:
self.auto_local_lock()
self.dataBlockSemaphore.acquire()
try:
e = None
# The real reading to the hardware:
up = self.read_db.readall()
except Exception as e:
self.error_stream(
"Could not complete the readall()\n%s" % (e))
finally:
self.dataBlockSemaphore.release()
if e is not None:
raise e
if up:
self.last_update_time = time.time()
if self.get_state() == PyTango.DevState.ALARM:
# This warning would be because attributes with
# this quality, don't log because it happens too often.
self.set_state(PyTango.DevState.ON, log=False)
if not self.get_state() in [PyTango.DevState.ON]:
# Recover a ON state when it is responding and the
# state was showing something different.
self.set_state(PyTango.DevState.ON)
else:
self.set_state(PyTango.DevState.FAULT)
self.set_status("No data received from the PLC")
self.disconnect()
end_update_t = time.time()
diff_t = (end_update_t - start_update_time)
if end_update_t-self.last_update_time > self.TimeoutAlarm:
self.set_state(PyTango.DevState.ALARM)
self.set_status("Timeout alarm!")
return
# disconnect if no new information is send after long time
if end_update_t-self.last_update_time > self.TimeoutConnection:
self.disconnect()
self.set_state(PyTango.DevState.FAULT)
self.set_status("Timeout connection!")
return
self.read_lastUpdate_attr = diff_t
timeFormated = time.strftime('%F %T')
self.read_lastUpdateStatus_attr = "last updated at %s in %f s"\
% (timeFormated, diff_t)
attr2Event = [['lastUpdate', self.read_lastUpdate_attr],
['lastUpdateStatus',
self.read_lastUpdateStatus_attr]]
self.fireEventsList(attr2Event,
timestamp=self.last_update_time)
self._newDataAvailable.set()
# when an update goes fine, the period is reduced one step
# until the minumum
if self._getPlcUpdatePeriod() > PLC_MIN_UPDATE_PERIOD:
self._setPlcUpdatePeriod(self._plcUpdatePeriod -
PLC_STEP_UPDATE_PERIOD)
except tcpblock.Shutdown as exc:
self.set_state(PyTango.DevState.FAULT)
msg = 'communication shutdown requested '\
'at '+time.strftime('%F %T')
self.set_status(msg)
self.error_stream(msg)
self.disconnect()
except socket.error as exc:
self.set_state(PyTango.DevState.FAULT)
msg = 'broken socket at %s\n%s' % (time.strftime('%F %T'),
str(exc))
self.set_status(msg)
self.error_stream(msg)
self.disconnect()
except Exception as exc:
self.set_state(PyTango.DevState.FAULT)
msg = 'update failed at %s\n%s: %s' % (time.strftime('%F %T'),
str(type(exc)),
str(exc))
self.set_status(msg)
self.error_stream(msg)
self.disconnect()
self.last_update_time = time.time()
traceback.print_exc()
def _addaptPeriod(self, diff_t):
current_p = self._getPlcUpdatePeriod()
max_t = PLC_MAX_UPDATE_PERIOD
step_t = PLC_STEP_UPDATE_PERIOD
if diff_t > max_t:
if current_p < max_t:
self.warn_stream(
"plcUpdaterThread() has take too much time "
"(%3.3f seconds)" % (diff_t))
self._setPlcUpdatePeriod(current_p+step_t)
else:
self.error_stream(
"plcUpdaterThread() has take too much time "
"(%3.3f seconds), but period cannot be increased more "
"than %3.3f seconds" % (diff_t, current_p))
elif diff_t > current_p:
exceed_t = diff_t-current_p
factor = int(exceed_t/step_t)
increment_t = step_t+(step_t*factor)
if current_p+increment_t >= max_t:
self.error_stream(
"plcUpdaterThread() it has take %3.6f seconds "
"(%3.6f more than expected) and period will be "
"increased to the maximum (%3.6f)"
% (diff_t, exceed_t, max_t))
self._setPlcUpdatePeriod(max_t)
else:
self.warn_stream(
"plcUpdaterThread() it has take %3.6f seconds, "
"%f over the expected, increase period "
"(%3.3f + %3.3f seconds)" % (diff_t, exceed_t,
current_p, increment_t))
self._setPlcUpdatePeriod(current_p+increment_t)
else:
# self.debug_stream(
# "plcUpdaterThread() it has take %3.6f seconds, going to "
# "sleep %3.3f seconds (update period %3.3f seconds)"
# % (diff_t, current_p-diff_t, current_p))
time.sleep(current_p-diff_t)
def newValuesThread(self):
'''
'''
if not self.attr_list._fileParsed.isSet():
self.info_stream("Event generator thread will wait until "
"file is parsed")
self.attr_list._fileParsed.wait()
while not self.has_data_available():
time.sleep(self._getPlcUpdatePeriod()*2)
self.debug_stream("Event generator thread wait for connection")
event_ctr = EventCtr()
while not self._tangoEventsJoiner.isSet():
try:
if self._newDataAvailable.isSet():
start_t = time.time()
self.propagateNewValues()
diff_t = time.time() - start_t
n_events = event_ctr.ctr
event_ctr.clear()
self._tangoEventsTime.append(diff_t)
self._tangoEventsNumber.append(n_events)
if n_events > 0:
self.debug_stream(
"newValuesThread() it has take %3.6f seconds "
"for %d events" % (diff_t, n_events))
self._newDataAvailable.clear()
else:
self._newDataAvailable.wait()
except Exception as exc:
self.error_stream(
"In newValuesThread() exception: %s" % (exc))
traceback.print_exc()
def propagateNewValues(self):
"""
Check the attributes that comes directly from the PLC registers, to check if
the information stored in the device needs to be refresh, events emitted, as
well as for each of them, inter-attribute dependencies are required to be
triggered.
"""
attrs = self._plcAttrs.keys()[:]
for attrName in attrs:
attrStruct = self._plcAttrs[attrName]
if hasattr(attrStruct, 'hardwareRead'):
attrStruct.hardwareRead(self.read_db)
# def plcBasicAttrEvents(self):
# '''This method is used, after all reading from the PLC to update
# the most basic attributes to indicate everything is fine.
# Those attributes are:
# - lastUpdate{,Status}
# - HeartBeat
# - Lock_{ST,Status}
# - Locking
# '''
# # Heartbit
# if self.heartbeat_addr:
# self.read_heartbeat_attr =\
# self.read_db.bit(self.heartbeat_addr, 0)
# HeartBeatStruct = self._plcAttrs['HeartBeat']
# if not self.read_heartbeat_attr == HeartBeatStruct[READVALUE]:
# HeartBeatStruct[READTIME] = time.time()
# HeartBeatStruct[READVALUE] = self.read_heartbeat_attr
# # Locks
# if self.lock_ST:
# self.read_lock_ST_attr = self.read_db.get(self.lock_ST, 'B', 1)
# # lock_str, lock_quality = self.convert_Lock_ST()
# if self.read_lock_ST_attr not in [0, 1, 2]:
# self.warn_stream("<<<Invalid locker code %d>>>"
# % (self.read_lock_ST_attr))
# Lock_STStruct = self._getAttrStruct('Lock_ST')
# if not self.read_lock_ST_attr == Lock_STStruct[READVALUE]:
# # or (now - Lock_STStruct[READTIME]) > PERIODIC_EVENT:
# Lock_STStruct[READTIME] = time.time()
# Lock_STStruct[READVALUE] = self.read_lock_ST_attr
# # Lock_StatusStruct = self._getAttrStruct('Lock_Status')
# # if not lock_str == Lock_StatusStruct[READVALUE]:
# # or (now - Lock_StatusStruct[READTIME]) > PERIODIC_EVENT:
# # Lock_StatusStruct[READTIME] = time.time()
# # Lock_StatusStruct[READVALUE] = lock_str
# # locking = self.read_lock()
# LockingStruct = self._getAttrStruct('Locking')
# self._checkLocking()
# # if not self.is_lockedByTango == LockingStruct[READVALUE]:
# # # or (now - LockingStruct[READTIME]) > PERIODIC_EVENT:
# # LockingStruct[READTIME] = time.time()
# # LockingStruct[READVALUE] = self.is_lockedByTango
# def __attrHasEvents(self, attrName):
# '''
# '''
# attrStruct = self._getAttrStruct(attrName)
# if attrStruct._eventsObj:
# return True
# return False
# # if attrName in self._plcAttrs and \
# # EVENTS in self._plcAttrs[attrName]:
# # return True
# # elif attrName in self._internalAttrs and \
# # EVENTS in self._internalAttrs[attrName].keys():
# # return True
# # return False
# def __getAttrReadValue(self, attrName):
# '''
# '''
# attrStruct = self._getAttrStruct(attrName)
# if READVALUE in attrStruct:
# if type(attrStruct[READVALUE]) == CircularBuffer:
# return attrStruct[READVALUE].value
# elif type(attrStruct[READVALUE]) == HistoryBuffer:
# return attrStruct[READVALUE].array
# return attrStruct[READVALUE]
# return None
# def __lastEventHasChangingQuality(self, attrName):
# attrStruct = self._getAttrStruct(attrName)
# if MEANINGS in attrStruct or ISRESET in attrStruct:
# # To these attributes this doesn't apply
# return False
# if LASTEVENTQUALITY in attrStruct:
# if attrStruct[LASTEVENTQUALITY] == \
# PyTango.AttrQuality.ATTR_CHANGING:
# return True
# else:
# return False
# else:
# return False
# def __attrValueHasThreshold(self, attrName):
# if EVENTS in self._getAttrStruct(attrName) and \
# THRESHOLD in self._getAttrStruct(attrName)[EVENTS]:
# return True
# else:
# return False
# def __isRstAttr(self, attrName):
# self.warn_stream("DEPRECATED: __isRstAttr(%s)" % (attrName))
# if attrName.startswith('lastUpdate'):
# return False
# if ISRESET in self._getAttrStruct(attrName):
# return self._getAttrStruct(attrName)[ISRESET]
# else:
# return False
# def __checkAttrEmissionParams(self, attrName, newValue):
# if not self.__attrHasEvents(attrName):
# self.warn_stream("No events for the attribute %s" % (attrName))
# return False
# lastValue = self.__getAttrReadValue(attrName)
# if lastValue is None:
# # If there is no previous read, it has to be emitted
# return True
# # after that we know the values are different
# if self.__isRstAttr(attrName):
# writeValue = self._getAttrStruct(attrName)[WRITEVALUE]
# rst_t = self._getAttrStruct(attrName)[RESETTIME]
# if newValue and not lastValue and writeValue and \
# rst_t is not None:
# return True
# elif not newValue and lastValue and not writeValue \
# and rst_t is None:
# return True
# else:
# return False
# if self.__attrValueHasThreshold(attrName):
# diff = abs(lastValue - newValue)
# threshold = self._getAttrStruct(attrName)[EVENTS][THRESHOLD]
# if diff > threshold:
# return True
# elif self.__lastEventHasChangingQuality(attrName):
# # below the threshold and last quality changing is an
# # indicative that a movement has finish, then it's time
# # to emit an event with a quality valid.
# return True
# else:
# return False
# if self.__isHistoryBuffer(attrName):
# if len(lastValue) == 0 or \
# newValue != lastValue[len(lastValue)-1]:
# return True
# else:
# return False
# # At this point any special case has been treated, only avoid
# # to emit if value doesn't change
# if newValue != lastValue:
# return True
# # when non case before, no event
# return False
# def plcGeneralAttrEvents(self):
# '''This method is used to periodically loop to review the list of
# attribute (above the basics) and check if they need event
# emission.
# '''
# now = time.time()
# # attributeList = []
# # for attrName in self._plcAttrs.keys():
# # if attrName not in ['HeartBeat', 'Lock_ST', 'Lock_Status',
# # 'Locking']:
# # attributeList.append(attrName)
# attributeList = self._plcAttrs.keys()
# for exclude in ['HeartBeat', 'Lock_ST', 'Lock_Status', 'Locking']:
# if attributeList.count(exclude):
# attributeList.pop(attributeList.index(exclude))
# # Iterate the remaining to know if they need something to be done
# for attrName in attributeList:
# self.checkResetAttr(attrName)
# attrStruct = self._plcAttrs[attrName]
# if hasattr(attrStruct, 'hardwareRead'):
# attrStruct.hardwareRead(self.read_db)
#
#
# # First check if for this element, it's prepared for events
# # if self.__attrHasEvents(attrName):
# # try:
# # attrStruct = self._plcAttrs[attrName]
# # attrType = attrStruct[TYPE]
# # # lastValue = self.__getAttrReadValue(attrName)
# # last_read_t = attrStruct[READTIME]
# # if READADDR in attrStruct:
# # # read_addr = attrStruct[READADDR]
# # # if READBIT in attrStruct:
# # # read_bit = attrStruct[READBIT]
# # # newValue = self.read_db.bit(read_addr,
# # # read_bit)
# # # else:
# # # newValue = self.read_db.get(read_addr,
# # # *attrType)
# # newValue = attrStruct.hardwareRead(self.read_db)
# # if FORMULA in attrStruct and \
# # 'read' in attrStruct[FORMULA]:
# # newValue = \
# # self.__solveFormula(attrName, newValue,
# # attrStruct[FORMULA]
# # ['read'])
# # if self.__checkAttrEmissionParams(attrName, newValue):
# # self.__applyReadValue(attrName, newValue,
# # self.last_update_time)
# # if MEANINGS in attrStruct:
# # if BASESET in attrStruct:
# # attrValue = attrStruct[READVALUE].array
# # else:
# # attrValue = \
# # self.__buildAttrMeaning(attrName,
# # newValue)
# # attrQuality = \
# # self.__buildAttrQuality(attrName, newValue)
# # elif QUALITIES in attrStruct:
# # attrValue = newValue
# # attrQuality = \
# # self.__buildAttrQuality(attrName,
# # attrValue)
# # elif AUTOSTOP in attrStruct:
# # attrValue = attrStruct[READVALUE].array
# # attrQuality = PyTango.AttrQuality.ATTR_VALID
# # self._checkAutoStopConditions(attrName)
# # else:
# # attrValue = newValue
# # attrQuality = PyTango.AttrQuality.ATTR_VALID
# # # store the current quality to know an end of
# # # a movement: quality from changing to valid
# # attrStruct[LASTEVENTQUALITY] = attrQuality
# # # collect to launch fire event
# # self.__doTraceAttr(attrName,
# # "plcGeneralAttrEvents(%s)"
# # % (attrValue))
# # # elif self.__checkEventReEmission(attrName):
# # # Even there is no condition to emit an event
# # # Check the RE_EVENTS_PERIOD to know if a refresh
# # # would be nice
# # # self.__eventReEmission(attrName)
# # # attr2Reemit += 1
# # except Exception as e:
# # self.warn_stream("In plcGeneralAttrEvents(), "
# # "exception in attribute %s: %s"
# # % (attrName, e))
# # traceback.print_exc()
# # if len(attr2Event) > 0:
# # self.fireEventsList(attr2Event, timestamp=now, log=True)
# # if attr2Reemit > 0:
# # self.debug_stream("%d events due to periodic reemission"
# # % attr2Reemit)
# # self.debug_stream("plcGeneralAttrEvents(): %d events from %d "
# # "attributes" % (len(attr2Event),
# # len(attributeList)))
# def internalAttrEvents(self):
# '''
# '''
# now = time.time()
# attributeList = self._internalAttrs.keys()
# attr2Event = []
# for attrName in attributeList:
# if self.__attrHasEvents(attrName):
# try:
# # evaluate if emit is needed
# # internal attr types:
# # - logical
# # - sets
# attrStruct = self._getAttrStruct(attrName)
# attrType = attrStruct[TYPE]
# lastValue = self.__getAttrReadValue(attrName)
# last_read_t = attrStruct[READTIME]
# if LOGIC in attrStruct:
# # self.info_stream("Attribute %s is from logical "
# # "type"%(attrName))
# newValue = self._evalLogical(attrName)
# elif 'read_set' in attrStruct:
# # self.info_stream("Attribute %s is from group "
# # "type" % (attrName))
# newValue = \
# self.__getGrpBitValue(attrName,
# attrStruct['read_set'],
# self.read_db)
# elif AUTOSTOP in attrStruct:
# newValue = lastValue
# # FIXME: do it better.
# # Don't emit events on the loop, because they shall
# # be only emitted when they are written.
# self._refreshInternalAutostopParams(attrName)
# # FIXME: this is task for a internalUpdaterThread
# elif MEAN in attrStruct or STD in attrStruct:
# # self._updateStatistic(attrName)
# newValue = attrStruct[READVALUE]
# elif TRIGGERED in attrStruct:
# newValue = attrStruct[TRIGGERED]
# elif isinstance(attrStruct, EnumerationAttr):
# newValue = lastValue # avoid emit
# else:
# # self.warn_stream("In internalAttrEvents(): "
# # "unknown how to emit events "
# # "for %s attribute" % (attrName))
# newValue = lastValue
# emit = False
# if newValue != lastValue:
# # self.info_stream("Emit because %s!=%s"
# # % (str(newValue),
# # str(lastValue)))
# emit = True
# elif (last_read_t is None):
# # self.info_stream("Emit new value because it "
# # "wasn't read before")
# emit = True
# else:
# pass
# # self.info_stream("No event to emit "
# # "(lastValue %s (%s), "
# # "newValue %s)"
# # %(str(lastValue),
# # str(last_read_t),
# # str(newValue)))
# except Exception as e:
# self.error_stream("In internalAttrEvents(), "
# "exception reading attribute %s: %s"
# % (attrName, e))
# traceback.print_exc()
# else:
# # prepare to emit
# try:
# if emit:
# self.__applyReadValue(attrName,
# newValue,
# self.last_update_time)
# if MEANINGS in attrStruct:
# attrValue = \
# self.__buildAttrMeaning(attrName,
# newValue)
# attrQuality = \
# self.__buildAttrQuality(attrName,
# newValue)
# elif QUALITIES in attrStruct:
# attrValue = newValue
# attrQuality = \
# self.__buildAttrQuality(attrName,
# attrValue)
# else:
# attrValue = newValue
# attrQuality =\
# PyTango.AttrQuality.ATTR_VALID
# attr2Event.append([attrName, attrValue])
# self.__doTraceAttr(attrName,
# "internalAttrEvents(%s)"
# % (attrValue))
# except Exception as e:
# self.error_stream("In internalAttrEvents(), "
# "exception on emit attribute "
# "%s: %s" % (attrName, e))
# # if len(attr2Event) > 0:
# # self.fireEventsList(attr2Event, timestamp=now, log=True)
# # self.debug_stream("internalAttrEvents(): %d events from %d "
# # "attributes" % (len(attr2Event),
# # len(attributeList)))
# return len(attr2Event)
# def checkResetAttr(self, attrName):
# '''
# '''
# self.warn_stream("DEPRECATED: checkResetAttr(%s)" % (attrName))
# if not self.__isRstAttr(attrName):
# return
# # FIXME: ---
# # if this is moved to a new thread separated to the event
# # emit, the system must be changed to be passive waiting
# # (that it Threading.Event())
# if self.__isCleanResetNeed(attrName):
# self._plcAttrs[attrName][RESETTIME] = None
# readAddr = self._plcAttrs[attrName][READADDR]
# writeAddr = self._plcAttrs[attrName][WRITEADDR]
# writeBit = self._plcAttrs[attrName][WRITEBIT]
# writeValue = False
# self.__writeBit(attrName, readAddr,
# writeAddr, writeBit, writeValue)
# self._plcAttrs[attrName][WRITEVALUE] = writeValue
# self.info_stream("Set back to 0 a RST attr %s" % (attrName))
# # self._plcAttrs[attrName][READVALUE] = False
# # self.fireEvent([attrName, False], time.time())
# def __isCleanResetNeed(self, attrName):
# '''
# '''
# now = time.time()
# if self.__isResetAttr(attrName):
# read_value = self._plcAttrs[attrName][READVALUE]
# rst_t = self._plcAttrs[attrName][RESETTIME]
# if read_value and rst_t is not None:
# diff_t = now-rst_t
# if RESETACTIVE in self._plcAttrs[attrName]:
# activeRst_t = self._plcAttrs[attrName][RESETACTIVE]
# else:
# activeRst_t = ACTIVE_RESET_T
# if activeRst_t-diff_t < 0:
# self.info_stream("Attribute %s needs clean reset"
# % (attrName))
# return True
# self.info_stream("Do not clean reset flag yet for %s "
# "(%6.3f seconds left)"
# % (attrName, activeRst_t-diff_t))
# return False
# def __isResetAttr(self, attrName):
# '''
# '''
# if attrName in self._plcAttrs and \
# ISRESET in self._plcAttrs[attrName] and \
# self._plcAttrs[attrName][ISRESET]:
# return True
# return False
def auto_local_lock(self):
if self._deviceIsInLocal:
if 'Locking' in self._plcAttrs:
if not self._plcAttrs['Locking'].rvalue:
self.info_stream("Device is in Local mode and "
"not locked. Proceed to lock it")
with self.dataBlockSemaphore:
self.relock()
time.sleep(self._getPlcUpdatePeriod())
# else:
# self.info_stream("Device is in Local mode and locked")
else:
self.warn_stream("Device in Local mode but 'Locking' "
"attribute not yet present")
# else:
# self.info_stream("Device is not in Local mode")
def relock(self):
'''
'''
if 'Locking' in self._plcAttrs and \
not self._plcAttrs['Locking'].wvalue:
self.write_lock(True)
# end "To be moved" section
def write_lock(self, value):
'''
'''
if self.get_state() == PyTango.DevState.FAULT or \
not self.has_data_available():
return # raise AttributeError("Not available in fault state!")
if not isinstance(value, bool):
raise ValueError("write_lock argument must be a boolean")
if 'Locking' in self._plcAttrs:
raddr = self._plcAttrs['Locking'].read_addr
rbit = self._plcAttrs['Locking'].read_bit
rbyte = self.read_db.b(raddr)
waddr = self._plcAttrs['Locking'].write_bit
if value:
# sets bit 'bitno' of b
toWrite = rbyte | (int(value) << rbit)
# a byte of 0s with a unique 1 in the place to set this 1
else:
# clears bit 'bitno' of b
toWrite = rbyte & (0xFF) ^ (1 << rbit)
# a byte of 1s with a unique 0 in the place to set this 0
self.write_db.write(waddr, toWrite, TYPE_MAP[PyTango.DevUChar])
time.sleep(self._getPlcUpdatePeriod())
reRead = self.read_db.b(raddr)
self.info_stream("Writing Locking boolean to %s (%d.%d) byte "
"was %s; write %s; now %s"
% (" lock" if value else "unlock",
raddr, rbit, bin(rbyte), bin(toWrite),
bin(reRead)))
self._plcAttrs['Locking'].write_value = value
@CommandExc
def Update(self):
'''Deprecated
'''
pass
# PROTECTED REGION END --- LinacData.Update
# ==================================================================
#
# LinacDataClass class definition
#
# ==================================================================
class LinacDataClass(PyTango.DeviceClass):
# -------- Add you global class variables here ------------------------
# PROTECTED REGION ID(LinacData.global_class_variables) ---
# PROTECTED REGION END --- LinacData.global_class_variables
def dyn_attr(self, dev_list):
"""Invoked to create dynamic attributes for the given devices.
Default implementation calls
:meth:`LinacData.initialize_dynamic_attributes` for each device
:param dev_list: list of devices
:type dev_list: :class:`PyTango.DeviceImpl`"""
for dev in dev_list:
try:
dev.initialize_dynamic_attributes()
except:
dev.warn_stream("Failed to initialize dynamic attributes")
dev.debug_stream("Details: " + traceback.format_exc())
# PROTECTED REGION ID(LinacData.dyn_attr) ENABLED START ---
# PROTECTED REGION END --- LinacData.dyn_attr
# Class Properties ---
class_property_list = {}
# Device Properties ---
device_property_list = {'ReadSize': [PyTango.DevShort,
"how many bytes to read (should "
"be a multiple of 2)", 0
],
'WriteSize': [PyTango.DevShort,
"size of write data block", 0],
'IpAddress': [PyTango.DevString,
"ipaddress of linac PLC "
"(deprecated)", ''],
'PlcAddress': [PyTango.DevString,
"ipaddress of linac PLC", ''],
'Port': [PyTango.DevShort,
"port of linac PLC (deprecated)",
None],
'LocalPort': [PyTango.DevShort,
"port of linac PLC (deprecated)",
None],
'RemotePort': [PyTango.DevShort,
"port of linac PLC "
"(deprecated)", None],
'AttrFile': [PyTango.DevString,
"file that contains description "
"of attributes of this "
"Linac data block", ''],
'BindAddress': [PyTango.DevString,
'ip of the interface used to '
'communicate with plc '
'(deprecated)', ''],
'LocalAddress': [PyTango.DevString,
'ip of the interface used '
'to communicate with plc as '
'the local', '10.0.7.100'],
'RemoteAddress': [PyTango.DevString,
'ip of the interface used '
'to communicate with plc as '
'the remote', '10.0.7.1'],
'TimeoutAlarm': [PyTango.DevDouble,
"after how many seconds of "
"silence the state is set "
"to alarm, this should be "
"less than TimeoutConnection",
1.0],
'TimeoutConnection': [PyTango.DevDouble,
"after how many seconds "
"of silence the "
"connection is assumed "
"to be interrupted",
1.5],
'ReconnectWait': [PyTango.DevDouble,
"after how many seconds "
"since the last update the "
"next connection attempt is "
"made", 6.0],
}
class_property_list['TimeoutAlarm'] = \
device_property_list['TimeoutAlarm']
class_property_list['TimeoutConnection'] = \
device_property_list['TimeoutConnection']
class_property_list['ReconnectWait'] = \
device_property_list['ReconnectWait']
# Command definitions ---
cmd_list = {'ReloadAttrFile': [[PyTango.DevVoid, ""],
[PyTango.DevVoid, ""]],
'Exec': [[PyTango.DevString, "statement to executed"],
[PyTango.DevString, "result"],
{'Display level': PyTango.DispLevel.EXPERT, }],
'GetBit': [[PyTango.DevVarShortArray, "idx"],
[PyTango.DevBoolean, ""],
{'Display level': PyTango.DispLevel.EXPERT, }],
'GetByte': [[PyTango.DevShort, "idx"],
[PyTango.DevShort, ""],
{'Display level': PyTango.DispLevel.EXPERT, }],
'GetShort': [[PyTango.DevShort, "idx"],
[PyTango.DevShort, ""],
{'Display level':
PyTango.DispLevel.EXPERT, }],
'GetFloat': [[PyTango.DevShort, "idx"],
[PyTango.DevFloat, ""],
{'Display level':
PyTango.DispLevel.EXPERT, }],
'HexDump': [[PyTango.DevVoid, "idx"],
[PyTango.DevString, "hexdump of all data"]],
'Hex': [[PyTango.DevShort, "idx"],
[PyTango.DevString, ""]],
'DumpTo': [[PyTango.DevString, "target file"],
[PyTango.DevVoid, ""], {}],
'WriteBit': [[PyTango.DevVarShortArray,
"idx, bitno, value"],
[PyTango.DevVoid, ""],
{'Display level':
PyTango.DispLevel.EXPERT, }],
'WriteByte': [[PyTango.DevVarShortArray, "idx, value"],
[PyTango.DevVoid, ""],
{'Display level':
PyTango.DispLevel.EXPERT, }],
'WriteShort': [[PyTango.DevVarShortArray, "idx, value"],
[PyTango.DevVoid, ""],
{'Display level':
PyTango.DispLevel.EXPERT, }],
'WriteFloat': [[PyTango.DevVarFloatArray, "idx, value"],
[PyTango.DevVoid, ""],
{'Display level':
PyTango.DispLevel.EXPERT}],
'ResetState': [[PyTango.DevVoid, ""],
[PyTango.DevVoid, ""]],
'Update': [[PyTango.DevVoid, ""],
[PyTango.DevVoid, ""],
# { 'polling period' : 50 }
],
'RestoreReadDB': [[PyTango.DevVoid, ""],
[PyTango.DevVoid, ""],
{'Display level':
PyTango.DispLevel.EXPERT}],
}
# Attribute definitions ---
attr_list = {'EventsTime': [[PyTango.DevDouble,
PyTango.SPECTRUM,
PyTango.READ, 1800],
{'Display level':
PyTango.DispLevel.EXPERT}
],
'EventsTimeMin': [[PyTango.DevDouble,
PyTango.SCALAR,
PyTango.READ],
{'Display level':
PyTango.DispLevel.EXPERT}
],
'EventsTimeMax': [[PyTango.DevDouble,
PyTango.SCALAR,
PyTango.READ],
{'Display level':
PyTango.DispLevel.EXPERT}
],
'EventsTimeMean': [[PyTango.DevDouble,
PyTango.SCALAR,
PyTango.READ],
{'Display level':
PyTango.DispLevel.EXPERT}
],
'EventsTimeStd': [[PyTango.DevDouble,
PyTango.SCALAR,
PyTango.READ],
{'Display level':
PyTango.DispLevel.EXPERT}
],
'EventsNumber': [[PyTango.DevShort,
PyTango.SPECTRUM,
PyTango.READ, 1800],
{'Display level':
PyTango.DispLevel.EXPERT}
],
'EventsNumberMin': [[PyTango.DevUShort,
PyTango.SCALAR,
PyTango.READ],
{'Display level':
PyTango.DispLevel.EXPERT}
],
'EventsNumberMax': [[PyTango.DevUShort,
PyTango.SCALAR,
PyTango.READ],
{'Display level':
PyTango.DispLevel.EXPERT}
],
'EventsNumberMean': [[PyTango.DevDouble,
PyTango.SCALAR,
PyTango.READ],
{'Display level':
PyTango.DispLevel.EXPERT}
],
'EventsNumberStd': [[PyTango.DevDouble,
PyTango.SCALAR,
PyTango.READ],
{'Display level':
PyTango.DispLevel.EXPERT}
],
'IsTooFarEnable': [[PyTango.DevBoolean,
PyTango.SCALAR,
PyTango.READ_WRITE],
{'label':
"Is Too Far readback Feature "
"Enabled?",
'Display level':
PyTango.DispLevel.EXPERT,
'description':
"This boolean is to enable or "
"disable the feature to use the "
"quality warning for readback "
"attributes with setpoint too far",
'Memorized': "true"
}
],
'forceWriteDB': [[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ],
{'Display level':
PyTango.DispLevel.EXPERT}
],
'cpu_percent': [[PyTango.DevDouble,
PyTango.SCALAR,
PyTango.READ],
{'Display level':
PyTango.DispLevel.EXPERT}
],
'mem_percent': [[PyTango.DevDouble,
PyTango.SCALAR,
PyTango.READ],
{'Display level':
PyTango.DispLevel.EXPERT}
],
'mem_rss': [[PyTango.DevULong,
PyTango.SCALAR,
PyTango.READ],
{'Display level':
PyTango.DispLevel.EXPERT,
'unit': 'Bytes'}
],
'mem_swap': [[PyTango.DevULong,
PyTango.SCALAR,
PyTango.READ],
{'Display level':
PyTango.DispLevel.EXPERT,
'unit': 'Bytes'}
],
}
if __name__ == '__main__':
try:
py = PyTango.Util(sys.argv)
py.add_TgClass(LinacDataClass, LinacData, 'LinacData')
U = PyTango.Util.instance()
U.server_init()
U.server_run()
except PyTango.DevFailed as e:
PyTango.Except.print_exception(e)
except Exception as e:
traceback.print_exc()
| gpl-3.0 | -7,029,453,707,529,209,000 | 49.351995 | 83 | 0.471468 | false | 4.56776 | false | false | false |
az0/bleachbit | tests/TestMemory.py | 1 | 5354 | # vim: ts=4:sw=4:expandtab
# -*- coding: UTF-8 -*-
# BleachBit
# Copyright (C) 2008-2019 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test case for module Memory
"""
from __future__ import absolute_import, print_function
from tests import common
from bleachbit.Memory import *
import unittest
import sys
running_linux = sys.platform.startswith('linux')
class MemoryTestCase(common.BleachbitTestCase):
"""Test case for module Memory"""
@unittest.skipUnless(running_linux, 'not running linux')
def test_get_proc_swaps(self):
"""Test for method get_proc_swaps"""
ret = get_proc_swaps()
self.assertGreater(len(ret), 10)
if not re.search('Filename\s+Type\s+Size', ret):
raise RuntimeError("Unexpected first line in swap summary '%s'" % ret)
@unittest.skipUnless(running_linux, 'not running linux')
def test_make_self_oom_target_linux(self):
"""Test for method make_self_oom_target_linux"""
# preserve
euid = os.geteuid()
# Minimally test there is no traceback
make_self_oom_target_linux()
# restore
os.seteuid(euid)
@unittest.skipUnless(running_linux, 'not running linux')
def test_count_linux_swap(self):
"""Test for method count_linux_swap"""
n_swaps = count_swap_linux()
self.assertIsInteger(n_swaps)
self.assertTrue(0 <= n_swaps < 10)
def test_physical_free_darwin(self):
# TODO: use mock
self.assertEqual(physical_free_darwin(lambda:
"""Mach Virtual Memory Statistics: (page size of 4096 bytes)
Pages free: 836891.
Pages active: 588004.
Pages inactive: 16985.
Pages speculative: 89776.
Pages throttled: 0.
Pages wired down: 468097.
Pages purgeable: 58313.
"Translation faults": 3109985921.
Pages copy-on-write: 25209334.
Pages zero filled: 537180873.
Pages reactivated: 132264973.
Pages purged: 11567935.
File-backed pages: 184609.
Anonymous pages: 510156.
Pages stored in compressor: 784977.
Pages occupied by compressor: 96724.
Decompressions: 66048421.
Compressions: 90076786.
Pageins: 758631430.
Pageouts: 30477017.
Swapins: 19424481.
Swapouts: 20258188.
"""), 3427905536)
self.assertRaises(RuntimeError, physical_free_darwin, lambda: "Invalid header")
def test_physical_free(self):
"""Test for method physical_free"""
ret = physical_free()
self.assertIsInteger(ret, 'physical_free() returns variable type %s' % type(ret))
self.assertGreater(physical_free(), 0)
report_free()
@unittest.skipUnless(running_linux, 'not running linux')
def test_get_swap_size_linux(self):
"""Test for get_swap_size_linux()"""
with open('/proc/swaps') as f:
swapdev = f.read().split('\n')[1].split(' ')[0]
if 0 == len(swapdev):
self.skipTest('no active swap device detected')
size = get_swap_size_linux(swapdev)
self.assertIsInteger(size)
self.assertGreater(size, 1024 ** 2)
logger.debug("size of swap '%s': %d B (%d MB)", swapdev, size, size / (1024 ** 2))
with open('/proc/swaps') as f:
proc_swaps = f.read()
size2 = get_swap_size_linux(swapdev, proc_swaps)
self.assertEqual(size, size2)
@unittest.skipUnless(running_linux, 'not running linux')
def test_get_swap_uuid(self):
"""Test for method get_swap_uuid"""
self.assertEqual(get_swap_uuid('/dev/doesnotexist'), None)
def test_parse_swapoff(self):
"""Test for method parse_swapoff"""
tests = (
# Ubuntu 15.10 has format "swapoff /dev/sda3"
('swapoff /dev/sda3', '/dev/sda3'),
('swapoff for /dev/sda6', '/dev/sda6'),
('swapoff on /dev/mapper/lubuntu-swap_1', '/dev/mapper/lubuntu-swap_1'))
for test in tests:
self.assertEqual(parse_swapoff(test[0]), test[1])
@unittest.skipUnless(running_linux, 'skipping test on non-linux')
def test_swap_off_swap_on(self):
"""Test for disabling and enabling swap"""
if not General.sudo_mode() or os.getuid() > 0:
self.skipTest('not enough privileges')
disable_swap_linux()
enable_swap_linux()
| gpl-3.0 | -4,627,705,048,792,550,000 | 36.971631 | 90 | 0.59619 | false | 3.849029 | true | false | false |
sauliusl/scipy | scipy/stats/tests/test_continuous_basic.py | 18 | 13750 | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
import numpy.testing as npt
from scipy import integrate
from scipy import stats
from scipy.special import betainc
from common_tests import (check_normalization, check_moment, check_mean_expect,
check_var_expect, check_skew_expect, check_kurt_expect,
check_entropy, check_private_entropy, NUMPY_BELOW_1_7,
check_edge_support, check_named_args, check_random_state_property)
from scipy.stats._distr_params import distcont
"""
Test all continuous distributions.
Parameters were chosen for those distributions that pass the
Kolmogorov-Smirnov test. This provides safe parameters for each
distributions so that we can perform further testing of class methods.
These tests currently check only/mostly for serious errors and exceptions,
not for numerically exact results.
"""
## Note that you need to add new distributions you want tested
## to _distr_params
DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5
## Last four of these fail all around. Need to be checked
distcont_extra = [
['betaprime', (100, 86)],
['fatiguelife', (5,)],
['mielke', (4.6420495492121487, 0.59707419545516938)],
['invweibull', (0.58847112119264788,)],
# burr: sample mean test fails still for c<1
['burr', (0.94839838075366045, 4.3820284068855795)],
# genextreme: sample mean test, sf-logsf test fail
['genextreme', (3.3184017469423535,)],
]
# for testing only specific functions
# distcont = [
## ['fatiguelife', (29,)], #correction numargs = 1
## ['loggamma', (0.41411931826052117,)]]
# for testing ticket:767
# distcont = [
## ['genextreme', (3.3184017469423535,)],
## ['genextreme', (0.01,)],
## ['genextreme', (0.00001,)],
## ['genextreme', (0.0,)],
## ['genextreme', (-0.01,)]
## ]
# distcont = [['gumbel_l', ()],
## ['gumbel_r', ()],
## ['norm', ()]
## ]
# distcont = [['norm', ()]]
distmissing = ['wald', 'gausshyper', 'genexpon', 'rv_continuous',
'loglaplace', 'rdist', 'semicircular', 'invweibull', 'ksone',
'cosine', 'kstwobign', 'truncnorm', 'mielke', 'recipinvgauss', 'levy',
'johnsonsu', 'levy_l', 'powernorm', 'wrapcauchy',
'johnsonsb', 'truncexpon', 'invgauss', 'invgamma',
'powerlognorm']
distmiss = [[dist,args] for dist,args in distcont if dist in distmissing]
distslow = ['rdist', 'gausshyper', 'recipinvgauss', 'ksone', 'genexpon',
'vonmises', 'vonmises_line', 'mielke', 'semicircular',
'cosine', 'invweibull', 'powerlognorm', 'johnsonsu', 'kstwobign']
# distslow are sorted by speed (very slow to slow)
# NB: not needed anymore?
def _silence_fp_errors(func):
# warning: don't apply to test_ functions as is, then those will be skipped
def wrap(*a, **kw):
olderr = np.seterr(all='ignore')
try:
return func(*a, **kw)
finally:
np.seterr(**olderr)
wrap.__name__ = func.__name__
return wrap
def test_cont_basic():
# this test skips slow distributions
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=integrate.IntegrationWarning)
for distname, arg in distcont[:]:
if distname in distslow:
continue
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
np.random.seed(765456)
sn = 500
rvs = distfn.rvs(size=sn, *arg)
sm = rvs.mean()
sv = rvs.var()
m, v = distfn.stats(*arg)
yield check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn, \
distname + 'sample mean test'
yield check_cdf_ppf, distfn, arg, distname
yield check_sf_isf, distfn, arg, distname
yield check_pdf, distfn, arg, distname
yield check_pdf_logpdf, distfn, arg, distname
yield check_cdf_logcdf, distfn, arg, distname
yield check_sf_logsf, distfn, arg, distname
if distname in distmissing:
alpha = 0.01
yield check_distribution_rvs, distname, arg, alpha, rvs
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
spec_x = {'frechet_l': -0.5, 'weibull_max': -0.5, 'levy_l': -0.5,
'pareto': 1.5, 'tukeylambda': 0.3}
x = spec_x.get(distname, 0.5)
yield check_named_args, distfn, x, arg, locscale_defaults, meths
yield check_random_state_property, distfn, arg
# Entropy
skp = npt.dec.skipif
yield check_entropy, distfn, arg, distname
if distfn.numargs == 0:
yield skp(NUMPY_BELOW_1_7)(check_vecentropy), distfn, arg
if distfn.__class__._entropy != stats.rv_continuous._entropy:
yield check_private_entropy, distfn, arg, stats.rv_continuous
yield check_edge_support, distfn, arg
knf = npt.dec.knownfailureif
yield knf(distname == 'truncnorm')(check_ppf_private), distfn, \
arg, distname
@npt.dec.slow
def test_cont_basic_slow():
# same as above for slow distributions
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=integrate.IntegrationWarning)
for distname, arg in distcont[:]:
if distname not in distslow:
continue
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
np.random.seed(765456)
sn = 500
rvs = distfn.rvs(size=sn,*arg)
sm = rvs.mean()
sv = rvs.var()
m, v = distfn.stats(*arg)
yield check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn, \
distname + 'sample mean test'
yield check_cdf_ppf, distfn, arg, distname
yield check_sf_isf, distfn, arg, distname
yield check_pdf, distfn, arg, distname
yield check_pdf_logpdf, distfn, arg, distname
yield check_cdf_logcdf, distfn, arg, distname
yield check_sf_logsf, distfn, arg, distname
# yield check_oth, distfn, arg # is still missing
if distname in distmissing:
alpha = 0.01
yield check_distribution_rvs, distname, arg, alpha, rvs
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
x = 0.5
if distname == 'invweibull':
arg = (1,)
elif distname == 'ksone':
arg = (3,)
yield check_named_args, distfn, x, arg, locscale_defaults, meths
yield check_random_state_property, distfn, arg
# Entropy
skp = npt.dec.skipif
ks_cond = distname in ['ksone', 'kstwobign']
yield skp(ks_cond)(check_entropy), distfn, arg, distname
if distfn.numargs == 0:
yield skp(NUMPY_BELOW_1_7)(check_vecentropy), distfn, arg
if distfn.__class__._entropy != stats.rv_continuous._entropy:
yield check_private_entropy, distfn, arg, stats.rv_continuous
yield check_edge_support, distfn, arg
@npt.dec.slow
def test_moments():
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=integrate.IntegrationWarning)
knf = npt.dec.knownfailureif
fail_normalization = set(['vonmises', 'ksone'])
fail_higher = set(['vonmises', 'ksone', 'ncf'])
for distname, arg in distcont[:]:
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
m, v, s, k = distfn.stats(*arg, moments='mvsk')
cond1, cond2 = distname in fail_normalization, distname in fail_higher
msg = distname + ' fails moments'
yield knf(cond1, msg)(check_normalization), distfn, arg, distname
yield knf(cond2, msg)(check_mean_expect), distfn, arg, m, distname
yield knf(cond2, msg)(check_var_expect), distfn, arg, m, v, distname
yield knf(cond2, msg)(check_skew_expect), distfn, arg, m, v, s, \
distname
yield knf(cond2, msg)(check_kurt_expect), distfn, arg, m, v, k, \
distname
yield check_loc_scale, distfn, arg, m, v, distname
yield check_moment, distfn, arg, m, v, distname
def check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, msg):
# this did not work, skipped silently by nose
if not np.isinf(m):
check_sample_mean(sm, sv, sn, m)
if not np.isinf(v):
check_sample_var(sv, sn, v)
def check_sample_mean(sm,v,n, popmean):
# from stats.stats.ttest_1samp(a, popmean):
# Calculates the t-obtained for the independent samples T-test on ONE group
# of scores a, given a population mean.
#
# Returns: t-value, two-tailed prob
df = n-1
svar = ((n-1)*v) / float(df) # looks redundant
t = (sm-popmean) / np.sqrt(svar*(1.0/n))
prob = betainc(0.5*df, 0.5, df/(df + t*t))
# return t,prob
npt.assert_(prob > 0.01, 'mean fail, t,prob = %f, %f, m, sm=%f,%f' %
(t, prob, popmean, sm))
def check_sample_var(sv,n, popvar):
# two-sided chisquare test for sample variance equal to hypothesized variance
df = n-1
chi2 = (n-1)*popvar/float(popvar)
pval = stats.distributions.chi2.sf(chi2, df) * 2
npt.assert_(pval > 0.01, 'var fail, t, pval = %f, %f, v, sv=%f, %f' %
(chi2, pval, popvar, sv))
def check_cdf_ppf(distfn,arg,msg):
values = [0.001, 0.5, 0.999]
npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
values, decimal=DECIMAL, err_msg=msg +
' - cdf-ppf roundtrip')
def check_sf_isf(distfn,arg,msg):
npt.assert_almost_equal(distfn.sf(distfn.isf([0.1,0.5,0.9], *arg), *arg),
[0.1,0.5,0.9], decimal=DECIMAL, err_msg=msg +
' - sf-isf roundtrip')
npt.assert_almost_equal(distfn.cdf([0.1,0.9], *arg),
1.0-distfn.sf([0.1,0.9], *arg),
decimal=DECIMAL, err_msg=msg +
' - cdf-sf relationship')
def check_pdf(distfn, arg, msg):
# compares pdf at median with numerical derivative of cdf
median = distfn.ppf(0.5, *arg)
eps = 1e-6
pdfv = distfn.pdf(median, *arg)
if (pdfv < 1e-4) or (pdfv > 1e4):
# avoid checking a case where pdf is close to zero or huge (singularity)
median = median + 0.1
pdfv = distfn.pdf(median, *arg)
cdfdiff = (distfn.cdf(median + eps, *arg) -
distfn.cdf(median - eps, *arg))/eps/2.0
# replace with better diff and better test (more points),
# actually, this works pretty well
npt.assert_almost_equal(pdfv, cdfdiff,
decimal=DECIMAL, err_msg=msg + ' - cdf-pdf relationship')
def check_pdf_logpdf(distfn, args, msg):
# compares pdf at several points with the log of the pdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
pdf = distfn.pdf(vals, *args)
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[pdf != 0]
logpdf = logpdf[np.isfinite(logpdf)]
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg + " - logpdf-log(pdf) relationship")
def check_sf_logsf(distfn, args, msg):
# compares sf at several points with the log of the sf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
sf = distfn.sf(vals, *args)
logsf = distfn.logsf(vals, *args)
sf = sf[sf != 0]
logsf = logsf[np.isfinite(logsf)]
npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg + " - logsf-log(sf) relationship")
def check_cdf_logcdf(distfn, args, msg):
# compares cdf at several points with the log of the cdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
cdf = distfn.cdf(vals, *args)
logcdf = distfn.logcdf(vals, *args)
cdf = cdf[cdf != 0]
logcdf = logcdf[np.isfinite(logcdf)]
npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg + " - logcdf-log(cdf) relationship")
def check_distribution_rvs(dist, args, alpha, rvs):
# test from scipy.stats.tests
# this version reuses existing random variables
D,pval = stats.kstest(rvs, dist, args=args, N=1000)
if (pval < alpha):
D,pval = stats.kstest(dist,'',args=args, N=1000)
npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
def check_vecentropy(distfn, args):
npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args))
@npt.dec.skipif(NUMPY_BELOW_1_7)
def check_loc_scale(distfn, arg, m, v, msg):
loc, scale = 10.0, 10.0
mt, vt = distfn.stats(loc=loc, scale=scale, *arg)
npt.assert_allclose(m*scale + loc, mt)
npt.assert_allclose(v*scale*scale, vt)
def check_ppf_private(distfn, arg, msg):
#fails by design for truncnorm self.nb not defined
ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg)
npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan')
if __name__ == "__main__":
npt.run_module_suite()
| bsd-3-clause | 1,748,733,670,844,875,000 | 37.300836 | 108 | 0.589018 | false | 3.218633 | true | false | false |
fedora-conary/rmake-2 | rmake_test/functional_test/cmdlinetest/chroottest.py | 2 | 9233 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Command line chroot manipulation command tests.
"""
import errno
import re
import os
import select
import sys
import time
from conary_test import recipes
from rmake_test import rmakehelp
from conary.lib import coveragehook
def _readIfReady(fd):
if select.select([fd], [], [], 1.0)[0]:
return os.read(fd, 8096)
return ''
class ChrootTest(rmakehelp.RmakeHelper):
def testChrootManagement(self):
self.openRmakeRepository()
client = self.startRmakeServer()
helper = self.getRmakeHelper(client.uri)
self.buildCfg.cleanAfterCook = False
trv = self.addComponent('simple:source', '1-1', '',
[('simple.recipe', recipes.simpleRecipe)])
jobId = self.discardOutput(helper.buildTroves, ['simple'])
helper.waitForJob(jobId)
chroot = helper.listChroots()[0]
assert(chroot.path == 'simple')
assert(chroot.jobId == jobId)
assert(helper.client.getJob(jobId).getTrove(*chroot.troveTuple))
path = self.rmakeCfg.getChrootDir() + '/' + chroot.path
assert(os.path.exists(path))
self.stopRmakeServer()
client = self.startRmakeServer()
helper = self.getRmakeHelper(client.uri)
chroot = helper.listChroots()[0]
assert(chroot.path == 'simple')
assert(chroot.jobId == jobId)
assert(helper.client.getJob(jobId).getTrove(*chroot.troveTuple))
self.captureOutput(helper.archiveChroot,'_local_', 'simple', 'foo')
archivedPath = self.rmakeCfg.getChrootArchiveDir() + '/foo'
assert(os.path.exists(archivedPath))
archivedChroot = helper.listChroots()[0]
assert(archivedChroot.path == 'archive/foo')
self.stopRmakeServer()
client = self.startRmakeServer()
helper = self.getRmakeHelper(client.uri)
archivedChroot = helper.listChroots()[0]
assert(archivedChroot.path == 'archive/foo')
self.captureOutput(helper.deleteChroot ,'_local_', 'archive/foo')
assert(not helper.listChroots())
assert(not os.path.exists(archivedPath))
def testChrootManagementMultinode(self):
def _getChroot(helper):
data = helper.listChroots()
started = time.time()
while not data:
if time.time() - started > 60:
raise RuntimeError("timeout waiting for chroot to appear")
time.sleep(.2)
data = helper.listChroots()
chroot, = data
return chroot
self.openRmakeRepository()
client = self.startRmakeServer(multinode=True)
helper = self.getRmakeHelper(client.uri)
self.startNode()
self.buildCfg.cleanAfterCook = False
trv = self.addComponent('simple:source', '1-1', '',
[('simple.recipe', recipes.simpleRecipe)])
jobId = self.discardOutput(helper.buildTroves, ['simple'])
helper.waitForJob(jobId)
chroot = helper.listChroots()[0]
assert(chroot.path == 'simple')
assert(chroot.jobId == jobId)
self.stopNodes()
self.startNode()
chroot = _getChroot(helper)
assert(chroot.path == 'simple')
assert(chroot.jobId == jobId)
self.stopNodes()
self.stopRmakeServer()
client = self.startRmakeServer(multinode=True)
self.startNode()
helper = self.getRmakeHelper(client.uri)
chroot = _getChroot(helper)
assert(chroot.path == 'simple')
assert(chroot.jobId == jobId)
self.captureOutput(helper.archiveChroot, self.nodeCfg.name, 'simple', 'foo')
archivedPath = self.nodeCfg.getChrootArchiveDir() + '/foo'
assert(os.path.exists(archivedPath))
archivedChroot = helper.listChroots()[0]
assert(archivedChroot.path == 'archive/foo')
self.stopNodes()
self.stopRmakeServer()
client = self.startRmakeServer(multinode=True)
helper = self.getRmakeHelper(client.uri)
self.startNode()
archivedChroot = _getChroot(helper)
assert(archivedChroot.path == 'archive/foo')
pid, master_fd = os.forkpty()
if not pid:
try:
coveragehook.install()
helper.startChrootSession(jobId, 'simple', ['/bin/sh'])
sys.stdout.flush()
coveragehook.save()
finally:
os._exit(0)
try:
count = 0
data = ''
while not data and count < 60:
data = _readIfReady(master_fd)
count += 1
assert(data)
os.write(master_fd, 'exit\n')
data = _readIfReady(master_fd)
while True:
try:
data += _readIfReady(master_fd)
except OSError:
os.waitpid(pid, 0)
break
finally:
os.close(master_fd)
def testDeleteAllChrootsMultinode(self):
self.openRmakeRepository()
client = self.startRmakeServer(multinode=True)
return
self.startNode()
helper = self.getRmakeHelper(client.uri)
self.buildCfg.cleanAfterCook = False
try:
trv = self.addComponent('simple:source', '1-1', '',
[('simple.recipe', recipes.simpleRecipe)])
jobId = self.discardOutput(helper.buildTroves, ['simple'])
finally:
self.buildCfg.cleanAfterCook = True
helper.waitForJob(jobId)
chroot = helper.listChroots()[0]
assert(chroot.path == 'simple')
assert(chroot.jobId == jobId)
assert(helper.client.getJob(jobId).getTrove(*chroot.troveTuple))
self.captureOutput(helper.deleteAllChroots)
assert(not helper.listChroots())
def testChrootSession(self):
# NOTE: This test is prone to race conditions. The chroot
# process will occasionally quit right away, probably due to
# a (hidden) error.
self.openRmakeRepository()
client = self.startRmakeServer()
helper = self.getRmakeHelper(client.uri)
oldStdin = sys.stdin
self.buildCfg.cleanAfterCook = False
self.buildCfg.configLine('[context1]')
try:
trv = self.addComponent('simple:source', '1-1', '',
[('simple.recipe', recipes.simpleRecipe)])
jobId = self.discardOutput(helper.buildTroves, ['simple{context1}'])
helper.waitForJob(jobId)
finally:
self.buildCfg.cleanAfterCook = True
pid, master_fd = os.forkpty()
if not pid:
try:
coveragehook.install()
helper.startChrootSession(jobId, 'simple', ['/bin/sh'])
sys.stdout.flush()
coveragehook.save()
finally:
os._exit(0)
try:
count = 0
data = ''
while not data and count < 30:
try:
data = _readIfReady(master_fd)
except OSError, err:
if err.errno == errno.EIO:
os.waitpid(pid, 0)
raise testsuite.SkipTestException(
"testChrootSession failed yet again")
raise
count += 1
assert(data)
os.write(master_fd, 'echo "this is a test"\n')
data = ''
# White out bash version
r = re.compile(r"sh-[^$]*\$")
expected = 'echo "this is a test"\r\r\nthis is a test\r\r\nsh-X.XX$ '
count = 0
while not data == expected and count < 60:
data += r.sub("sh-X.XX$", str(_readIfReady(master_fd)), 1)
count += 1
self.assertEquals(data, expected)
os.write(master_fd, 'exit\n')
data = _readIfReady(master_fd)
while True:
try:
data += _readIfReady(master_fd)
except OSError:
os.waitpid(pid, 0)
break
expected = 'exit\r\r\nexit\r\r\n*** Connection closed by remote host ***\r\n'
count = 0
while not data == expected and count < 60:
try:
data += _readIfReady(master_fd)
except OSError:
break
count += 1
self.assertEquals(data, expected)
finally:
os.close(master_fd)
| apache-2.0 | -3,949,263,921,289,999,000 | 34.375479 | 89 | 0.56428 | false | 4.012603 | true | false | false |
gdanezis/rousseau-chain | rousseau-package/consensus.py | 1 | 10893 | # This is a convergence simulation for gossip based consensus.
import json
import time
import logging
from os import urandom
from random import sample, shuffle
from binascii import hexlify
from collections import defaultdict, Counter
from hashlib import sha256
from struct import pack
def make_shard_map(num = 100):
""" Makes a map for 'num' shards (defaults to 100). """
limits = []
MAX = 2**16
for l in range(0, MAX - 1, MAX / num):
l_lower = hexlify(pack(">H", l)) + ("00" * 20)
limits.append(l_lower)
limits = limits + ["f" * 64]
shard_map = []
for i, (b0, b1) in enumerate(zip(limits[:-1],limits[1:])):
shard_map.append((i, (b0, b1)))
shard_map = dict(shard_map)
return shard_map
def within_ID(idx, b0, b1):
""" Tests whether an object identifer is within the
remit of the shard bounds. """
return b0 <= idx < b1
def within_TX(Tx, b0, b1):
""" Test whether the transaction and its dependencies are
within the shard bounds. """
idx, deps, outs, txdata = Tx
if within_ID(idx, b0, b1):
return True
if any(within_ID(d, b0, b1) for d in deps):
return True
if any(within_ID(d, b0, b1) for d in outs):
return True
return False
def h(data):
""" Define the hash function used in the system. This is used to
derive transaction and object identifiers. """
return hexlify(sha256(data).digest()[:20])
def packageTx(data, deps, num_out):
""" Package some transaction data into an appropriate identifier,
and resulting new object identifiers. """
hx = sha256(data)
for d in sorted(deps):
hx.update(d)
actualID = hx.digest()
actualID = actualID[:-2] + pack("H", 0)
out = []
for i in range(num_out):
out.append(actualID[:-2] + pack("H", i+1))
return (hexlify(actualID), sorted(deps), map(hexlify,out), data)
class Node:
""" A class representing an authority participating in the consensus. """
def __init__(self, start = [], quorum=1, name = None, shard=None):
self.transactions = {}
self.quorum = quorum
self.name = name if name is not None else urandom(16)
self.pending_vote = defaultdict(set)
if shard is None:
self.shard = ["0"*64, "f"*64]
else:
self.shard = shard
self.pending_available = set(o for o in start if self._within_ID(o))
self.pending_used = set()
self.commit_yes = set()
self.commit_no = set()
# self.commit_available = set(start)
self.commit_used = set()
self.quiet = False
if __debug__:
self.start = set(o for o in start if self._within_ID(o))
self.cache = { }
def _within_ID(self, idx):
""" Tests whether an object identifer is within the
remit of this Node. """
return within_ID(idx, self.shard[0], self.shard[1])
def _within_TX(self, Tx):
""" Test whether the transaction and its dependencies are
within the remit of this Node. """
## Tests whether a transaction is related to this node in
## any way. If not there is no case for processing it.
return within_TX(Tx, self.shard[0], self.shard[1])
def gossip_towards(self, other_node):
""" A primitive way to probagate information. """
for k, v in self.pending_vote.iteritems():
other_node.pending_vote[k] |= v
# Should we process votes again here?
other_node.commit_yes |= self.commit_yes
other_node.commit_no |= self.commit_no
assert other_node.commit_yes & other_node.commit_no == set()
# other_node.commit_available |= self.commit_available
other_node.commit_used |= self.commit_used
def on_vote(self, full_tx, vote):
""" What the Node does when a transaction vote is cast. """
pass
def on_commit(self, full_tx, yesno):
""" What to do when a transaction commit is cast. """
pass
def process(self, Tx):
""" Process a transaction to vote or commit it. """
if not self._within_TX(Tx):
return
# Cache the transaction
self.transactions[Tx[0]] = Tx
# Process the transaction
logging.info("Process %s (%s)" % (Tx[0][:8], self.name))
x = True
while(x):
x = self._process(Tx)
def do_commit_yes(self, Tx):
""" What to do when commiting a transaction to the positive log. """
if __debug__:
self.cache[Tx[0]] = Tx
idx, deps, new_obj, txdata = Tx
self.commit_yes.add(idx)
self.pending_available |= set(o for o in new_obj if self._within_ID(o)) ## Add new transactions here
self.commit_used |= set(o for o in deps if self._within_ID(o))
def _check_invariant(self):
""" An internal debugging function to ensure all invariants hold. """
all_objects = set(self.start)
used_objects = set()
for txa in self.commit_yes:
assert txa in self.cache
idx, deps, new_obj, data = self.cache[txa]
all_objects |= set(o for o in new_obj if self._within_ID(o))
used_objects |= set(o for o in deps if self._within_ID(o))
assert all_objects == self.pending_available
assert used_objects == self.commit_used
for o in self.commit_used:
assert self._within_ID(o)
assert used_objects <= all_objects
potentially_used = { xd for xd, xtx in self.pending_used if xtx not in self.commit_no}
actually_available = self.pending_available - potentially_used
assert (all_objects - used_objects) - potentially_used == actually_available
return True
def _process(self, Tx):
if __debug__:
self.cache[Tx[0]] = Tx
self._check_invariant()
if not self._within_TX(Tx):
return False
idx, deps, new_obj, txdata = Tx
all_deps = set(deps)
deps = {d for d in deps if self._within_ID(d)}
new_obj = set(new_obj) # By construction no repeats & fresh names
if (idx in self.commit_yes or idx in self.commit_no):
# Do not process twice
logging.info("Do nothing for %s (%s)" % (idx[:6], self.name))
return False # No further progress can be made
else:
if deps & self.commit_used != set():
# Some dependencies are used already!
# So there is no way we will ever accept this
# and neither will anyone else
self.commit_no.add(idx)
self.on_commit( Tx, False )
logging.info("Commit no for %s (%s)" % (idx[:6], self.name))
return False # there is no further work on this.
# If we cannot exclude it out of hand then we kick in
# the consensus protocol by considering it a candidate.
xdeps = tuple(sorted(list(deps)))
if not ( (self.name, xdeps, True) in self.pending_vote[idx] or (self.name, xdeps, False) in self.pending_vote[idx]):
# We have not considered this as a pending candidate before
# So now we have to vote on it.
if deps.issubset(self.pending_available):
# We have enough information on the transactions this
# depends on, so we can vote.
# Make a list of used transactions:
used = { xd for xd, xtx in self.pending_used if xtx not in self.commit_no}
# and xd not in self.commit_used }
## CHECK CORRECTNESS: Do we update on things that are eventually used?
if set(deps) & used == set() and set(deps) & self.commit_used == set():
# We cast a 'yes' vote -- since it seems that there
# are no conflicts for this transaction in our pending list.
self.pending_vote[idx].add( (self.name, xdeps, True) )
self.pending_used |= set((d, idx) for d in deps)
self.on_vote( Tx, (self.name, xdeps, True) )
# TODO: add new transactions to available here
# Hm, actually we should not until it is confirmed.
# self.pending_available |= new_obj ## Add new transactions here
logging.info("Pending yes for %s (%s)" % (idx[:6], self.name))
return True
else:
# We cast a 'no' vote since there is a conflict in our
# history of transactions.
self.pending_vote[idx].add( (self.name, xdeps, False) )
self.on_vote( Tx, (self.name, xdeps, False) )
logging.info("Pending no for %s (%s)" % (idx[:6], self.name))
return True
else:
logging.info("Unknown prerequisites for %s (%s)" % (idx[:6], self.name))
# We continue in case voting helps move things. This
# happens in case others know about this transaction.
if self.shard[0] <= idx < self.shard[1] or deps != set():
# Only process the final votes if we are in charde of this
# shard for the transaction or any dependencies.
Votes = Counter()
for oname, odeps, ovote in self.pending_vote[idx]:
for d in odeps:
Votes.update( [(d, ovote)] )
yes_vote = all( Votes[(d, True)] >= self.quorum for d in all_deps )
no_vote = any( Votes[(d, False)] >= self.quorum for d in all_deps )
## Time to count votes for this transaction
if yes_vote: # Counter(x for _,x in self.pending_vote[idx])[True] >= self.quorum:
# We have a Quorum for including the transaction. So we update
# all the committed state monotonically.
self.do_commit_yes(Tx)
self.on_commit( Tx, True )
## CHECK CORRECT: Should I add the used transactions to self.pending_used?
logging.info("Commit yes for %s (%s)" % (idx[:6], self.name))
return False
if no_vote: #Counter(x for _,x in self.pending_vote[idx])[False] >= self.quorum:
# So sad: there is a quorum for rejecting this transaction
# so we will now add it to the 'no' bucket.
# Optional TODO: invalidate in the pending lists
self.commit_no.add(idx)
self.on_commit( Tx, False )
logging.info("Commit no for %s (%s)" % (idx[:6], self.name))
return False
return False # No further work
| bsd-2-clause | -6,033,557,547,291,339,000 | 32.829193 | 124 | 0.560543 | false | 3.907102 | false | false | false |
drjnmrh/oztoolz | streams/__main__.py | 1 | 2459 | """
Runs a code review tool pylint for each script in the 'streams' package.
Author: O.Z.
"""
# imports
import os
import sys
from pathlib import Path
from subprocess import Popen
from subprocess import PIPE
from oztoolz.ioutils import select_all_scripts
from oztoolz.ioutils import safe_write_log as write_log
from oztoolz.ioutils import get_current_package_path as find_package_sources
# utility methods
def run_pylint(script_name, package_path):
"""Executes a pylint code analyzer for the given script in a separate
subprocess and writes the stdout of the subprocess into the .txt file.
The created .txt file is named as the script name + '_report' suffix.
Args:
script_name: the name of the script to check.
package_path: the path string to the package, which scripts are
analyzed.
"""
script_path = os.path.join(package_path, script_name)
with Popen(['pylint.exe', script_path], stdout=PIPE) as proc:
script_file_name = str(Path(script_path).relative_to(package_path))
write_log(script_file_name[:-3] + '_report.txt',
os.path.abspath(os.path.join(package_path, 'reports')),
proc.stdout.read(),
sys.stdout)
def review_code():
"""Runs an automatic code review tool 'pylint' for each script of the
'streams' package.
Returns:
list of strings, each entry is a name of the checked script.
"""
package_path = find_package_sources(sys.stdout)
sys.stdout.write("# 'streams' package was found in [" +
package_path + "];\n")
scripts = select_all_scripts(package_path, sys.stdout)
for script_name in scripts:
run_pylint(script_name, package_path)
return scripts
# the main method
def main():
"""Runs code review for all scripts of the 'streams' package and logs out
which scripts were checked.
This method is executed when the whole package is executed.
"""
sys.stdout.write("\nPyLint code review of the 'streams' package:\n")
reviewed_scripts = review_code()
sys.stdout.write("\treviewed scripts:\n")
for script_name in reviewed_scripts:
sys.stdout.write("\t\t" + script_name + "\n")
sys.stdout.write("\ttotal number of the reviewed scripts: " +
str(len(reviewed_scripts)) + ";\n")
sys.stdout.write("OK.\n")
if __name__ == '__main__':
main()
| mit | 664,546,982,584,732,800 | 26.943182 | 77 | 0.647418 | false | 3.812403 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.