repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
dwaiter/django-filebrowser-old | filebrowser/base.py | 1 | 5606 | # coding: utf-8
# imports
import os, re, datetime
from time import gmtime, strftime
# django imports
from django.conf import settings
# filebrowser imports
from filebrowser.settings import *
from filebrowser.functions import get_file_type, url_join, is_selectable, get_version_path
# PIL import
if STRICT_PIL:
from PIL import Image
else:
try:
from PIL import Image
except ImportError:
import Image
class FileObject(object):
"""
The FileObject represents a File on the Server.
PATH has to be relative to MEDIA_ROOT.
"""
def __init__(self, path):
self.path = path
self.url_rel = path.replace("\\","/")
self.head = os.path.split(path)[0]
self.filename = os.path.split(path)[1]
self.filename_lower = self.filename.lower() # important for sorting
self.filetype = get_file_type(self.filename)
def _filesize(self):
"""
Filesize.
"""
if os.path.isfile(os.path.join(MEDIA_ROOT, self.path)) or os.path.isdir(os.path.join(MEDIA_ROOT, self.path)):
return os.path.getsize(os.path.join(MEDIA_ROOT, self.path))
return ""
filesize = property(_filesize)
def _date(self):
"""
Date.
"""
if os.path.isfile(os.path.join(MEDIA_ROOT, self.path)) or os.path.isdir(os.path.join(MEDIA_ROOT, self.path)):
return os.path.getmtime(os.path.join(MEDIA_ROOT, self.path))
return ""
date = property(_date)
def _datetime(self):
"""
Datetime Object.
"""
return datetime.datetime.fromtimestamp(self.date)
datetime = property(_datetime)
def _extension(self):
"""
Extension.
"""
return u"%s" % os.path.splitext(self.filename)[1]
extension = property(_extension)
def _filetype_checked(self):
if self.filetype == "Folder" and os.path.isdir(self.path_full):
return self.filetype
elif self.filetype != "Folder" and os.path.isfile(self.path_full):
return self.filetype
else:
return ""
filetype_checked = property(_filetype_checked)
def _path_full(self):
"""
Full server PATH including MEDIA_ROOT.
"""
return u"%s" % os.path.join(MEDIA_ROOT, self.path)
path_full = property(_path_full)
def _path_relative(self):
return self.path
path_relative = property(_path_relative)
def _path_relative_directory(self):
"""
Path relative to initial directory.
"""
directory_re = re.compile(r'^(%s)' % (DIRECTORY))
value = directory_re.sub('', self.path)
return u"%s" % value
path_relative_directory = property(_path_relative_directory)
def _folder(self):
directory_re = re.compile(r'^(%s)' % (DIRECTORY.rstrip('/')))
return u"%s/" % directory_re.sub('', self.head)
folder = property(_folder)
def _url_relative(self):
return self.url_rel
url_relative = property(_url_relative)
def _url_full(self):
"""
Full URL including MEDIA_URL.
"""
return u"%s" % url_join(MEDIA_URL, self.url_rel)
url_full = property(_url_full)
def _url_save(self):
"""
URL used for the filebrowsefield.
"""
if SAVE_FULL_URL:
return self.url_full
else:
return self.url_rel
url_save = property(_url_save)
def _url_thumbnail(self):
"""
Thumbnail URL.
"""
if self.filetype == "Image":
return u"%s" % url_join(MEDIA_URL, get_version_path(self.path, 'fb_thumb'))
else:
return ""
url_thumbnail = property(_url_thumbnail)
def url_admin(self):
if self.filetype_checked == "Folder":
directory_re = re.compile(r'^(%s)' % (DIRECTORY))
value = directory_re.sub('', self.path)
return u"%s" % value
else:
return u"%s" % url_join(MEDIA_URL, self.path)
def _dimensions(self):
"""
Image Dimensions.
"""
if self.filetype == 'Image':
try:
im = Image.open(os.path.join(MEDIA_ROOT, self.path))
return im.size
except:
pass
else:
return False
dimensions = property(_dimensions)
def _width(self):
"""
Image Width.
"""
return self.dimensions[0]
width = property(_width)
def _height(self):
"""
Image Height.
"""
return self.dimensions[1]
height = property(_height)
def _orientation(self):
"""
Image Orientation.
"""
if self.dimensions:
if self.dimensions[0] >= self.dimensions[1]:
return "Landscape"
else:
return "Portrait"
else:
return None
orientation = property(_orientation)
def _is_empty(self):
"""
True if Folder is empty, False if not.
"""
if os.path.isdir(self.path_full):
if not os.listdir(self.path_full):
return True
else:
return False
else:
return None
is_empty = property(_is_empty)
def __repr__(self):
return u"%s" % self.url_save
def __str__(self):
return u"%s" % self.url_save
def __unicode__(self):
return u"%s" % self.url_save
| bsd-3-clause | 1,552,659,356,722,771,000 | 26.082126 | 117 | 0.539422 | false |
thingsboard/thingsboard-gateway | thingsboard_gateway/extensions/mqtt/custom_mqtt_uplink_converter.py | 1 | 2643 | # Copyright 2021. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from simplejson import dumps
from thingsboard_gateway.connectors.mqtt.mqtt_uplink_converter import MqttUplinkConverter, log
class CustomMqttUplinkConverter(MqttUplinkConverter):
def __init__(self, config):
self.__config = config.get('converter')
self.dict_result = {}
def convert(self, topic, body):
try:
self.dict_result["deviceName"] = topic.split("/")[-1] # getting all data after last '/' symbol in this case: if topic = 'devices/temperature/sensor1' device name will be 'sensor1'.
self.dict_result["deviceType"] = "Thermostat" # just hardcode this
self.dict_result["telemetry"] = [] # template for telemetry array
bytes_to_read = body.replace("0x", "") # Replacing the 0x (if '0x' in body), needs for converting to bytearray
converted_bytes = bytearray.fromhex(bytes_to_read) # Converting incoming data to bytearray
if self.__config.get("extension-config") is not None:
for telemetry_key in self.__config["extension-config"]: # Processing every telemetry key in config for extension
value = 0
for _ in range(self.__config["extension-config"][telemetry_key]): # reading every value with value length from config
value = value*256 + converted_bytes.pop(0) # process and remove byte from processing
telemetry_to_send = {telemetry_key.replace("Bytes", ""): value} # creating telemetry data for sending into Thingsboard
self.dict_result["telemetry"].append(telemetry_to_send) # adding data to telemetry array
else:
self.dict_result["telemetry"] = {"data": int(body, 0)} # if no specific configuration in config file - just send data which received
return self.dict_result
except Exception as e:
log.exception('Error in converter, for config: \n%s\n and message: \n%s\n', dumps(self.__config), body)
log.exception(e)
| apache-2.0 | -568,845,879,882,896,600 | 59.068182 | 192 | 0.655316 | false |
derekjchow/models | research/object_detection/utils/label_map_util_test.py | 1 | 9044 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.label_map_util."""
import os
import tensorflow as tf
from google.protobuf import text_format
from object_detection.protos import string_int_label_map_pb2
from object_detection.utils import label_map_util
class LabelMapUtilTest(tf.test.TestCase):
def _generate_label_map(self, num_classes):
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
for i in range(1, num_classes + 1):
item = label_map_proto.item.add()
item.id = i
item.name = 'label_' + str(i)
item.display_name = str(i)
return label_map_proto
def test_get_label_map_dict(self):
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
label_map_dict = label_map_util.get_label_map_dict(label_map_path)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['cat'], 2)
def test_get_label_map_dict_display(self):
label_map_string = """
item {
id:2
display_name:'cat'
}
item {
id:1
display_name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
label_map_dict = label_map_util.get_label_map_dict(
label_map_path, use_display_name=True)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['cat'], 2)
def test_load_bad_label_map(self):
label_map_string = """
item {
id:0
name:'class that should not be indexed at zero'
}
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
with self.assertRaises(ValueError):
label_map_util.load_labelmap(label_map_path)
def test_load_label_map_with_background(self):
label_map_string = """
item {
id:0
name:'background'
}
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
label_map_dict = label_map_util.get_label_map_dict(label_map_path)
self.assertEqual(label_map_dict['background'], 0)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['cat'], 2)
def test_get_label_map_dict_with_fill_in_gaps_and_background(self):
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
label_map_dict = label_map_util.get_label_map_dict(
label_map_path, fill_in_gaps_and_background=True)
self.assertEqual(label_map_dict['background'], 0)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['2'], 2)
self.assertEqual(label_map_dict['cat'], 3)
self.assertEqual(len(label_map_dict), max(label_map_dict.values()) + 1)
def test_keep_categories_with_unique_id(self):
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'child'
}
item {
id:1
name:'person'
}
item {
id:1
name:'n00007846'
}
"""
text_format.Merge(label_map_string, label_map_proto)
categories = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=3)
self.assertListEqual([{
'id': 2,
'name': u'cat'
}, {
'id': 1,
'name': u'child'
}], categories)
def test_convert_label_map_to_categories_no_label_map(self):
categories = label_map_util.convert_label_map_to_categories(
None, max_num_classes=3)
expected_categories_list = [{
'name': u'category_1',
'id': 1
}, {
'name': u'category_2',
'id': 2
}, {
'name': u'category_3',
'id': 3
}]
self.assertListEqual(expected_categories_list, categories)
def test_convert_label_map_to_categories(self):
label_map_proto = self._generate_label_map(num_classes=4)
categories = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=3)
expected_categories_list = [{
'name': u'1',
'id': 1
}, {
'name': u'2',
'id': 2
}, {
'name': u'3',
'id': 3
}]
self.assertListEqual(expected_categories_list, categories)
def test_convert_label_map_to_categories_with_few_classes(self):
label_map_proto = self._generate_label_map(num_classes=4)
cat_no_offset = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=2)
expected_categories_list = [{
'name': u'1',
'id': 1
}, {
'name': u'2',
'id': 2
}]
self.assertListEqual(expected_categories_list, cat_no_offset)
def test_get_max_label_map_index(self):
num_classes = 4
label_map_proto = self._generate_label_map(num_classes=num_classes)
max_index = label_map_util.get_max_label_map_index(label_map_proto)
self.assertEqual(num_classes, max_index)
def test_create_category_index(self):
categories = [{'name': u'1', 'id': 1}, {'name': u'2', 'id': 2}]
category_index = label_map_util.create_category_index(categories)
self.assertDictEqual({
1: {
'name': u'1',
'id': 1
},
2: {
'name': u'2',
'id': 2
}
}, category_index)
def test_create_categories_from_labelmap(self):
label_map_string = """
item {
id:1
name:'dog'
}
item {
id:2
name:'cat'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
categories = label_map_util.create_categories_from_labelmap(label_map_path)
self.assertListEqual([{
'name': u'dog',
'id': 1
}, {
'name': u'cat',
'id': 2
}], categories)
def test_create_category_index_from_labelmap(self):
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
category_index = label_map_util.create_category_index_from_labelmap(
label_map_path)
self.assertDictEqual({
1: {
'name': u'dog',
'id': 1
},
2: {
'name': u'cat',
'id': 2
}
}, category_index)
def test_create_category_index_from_labelmap_display(self):
label_map_string = """
item {
id:2
name:'cat'
display_name:'meow'
}
item {
id:1
name:'dog'
display_name:'woof'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
self.assertDictEqual({
1: {
'name': u'dog',
'id': 1
},
2: {
'name': u'cat',
'id': 2
}
}, label_map_util.create_category_index_from_labelmap(
label_map_path, False))
self.assertDictEqual({
1: {
'name': u'woof',
'id': 1
},
2: {
'name': u'meow',
'id': 2
}
}, label_map_util.create_category_index_from_labelmap(label_map_path))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -1,022,607,647,608,463,600 | 26.077844 | 80 | 0.560593 | false |
iotile/coretools | iotilesensorgraph/test/test_datastream.py | 1 | 8473 | """Tests for DataStream objects."""
import pytest
from iotile.core.exceptions import InternalError
from iotile.sg import DataStream, DataStreamSelector
def test_stream_type_parsing():
"""Make sure we can parse each type of stream."""
# Make sure parsing stream type works
stream = DataStream.FromString('buffered 1')
assert stream.stream_type == stream.BufferedType
stream = DataStream.FromString(u'buffered 1')
assert stream.stream_type == stream.BufferedType
stream = DataStream.FromString('unbuffered 1')
assert stream.stream_type == stream.UnbufferedType
stream = DataStream.FromString(u'unbuffered 1')
assert stream.stream_type == stream.UnbufferedType
stream = DataStream.FromString('counter 1')
assert stream.stream_type == stream.CounterType
stream = DataStream.FromString(u'counter 1')
assert stream.stream_type == stream.CounterType
stream = DataStream.FromString('constant 1')
assert stream.stream_type == stream.ConstantType
stream = DataStream.FromString(u'constant 1')
assert stream.stream_type == stream.ConstantType
stream = DataStream.FromString('output 1')
assert stream.stream_type == stream.OutputType
stream = DataStream.FromString(u'output 1')
assert stream.stream_type == stream.OutputType
def test_stream_id_parsing():
"""Make sure we can parse stream ids."""
stream = DataStream.FromString('buffered 1')
assert stream.stream_id == 1
stream = DataStream.FromString('buffered 0x100')
assert stream.stream_id == 0x100
stream = DataStream.FromString(u'buffered 1')
assert stream.stream_id == 1
stream = DataStream.FromString(u'buffered 0x100')
assert stream.stream_id == 0x100
def test_system_parsing():
"""Make sure we can parse the system prefix."""
stream = DataStream.FromString('buffered 1')
assert stream.system is False
stream = DataStream.FromString(u'buffered 1')
assert stream.system is False
stream = DataStream.FromString('system buffered 1')
assert stream.system is True
stream = DataStream.FromString(u'system buffered 1')
assert stream.system is True
def test_stringification():
"""Make sure we can stringify DataStream objects."""
stream1 = DataStream.FromString('system buffered 1')
stream2 = DataStream.FromString('buffered 0xF')
assert str(stream1) == str('system buffered 1')
assert str(stream2) == str('buffered 15')
def test_selector_parsing():
"""Make sure we can parse DataStreamSelector strings."""
# Make sure parsing stream type works
stream = DataStreamSelector.FromString('buffered 1')
assert stream.match_type == DataStream.BufferedType
stream = DataStreamSelector.FromString(u'buffered 1')
assert stream.match_type == DataStream.BufferedType
stream = DataStreamSelector.FromString('unbuffered 1')
assert stream.match_type == DataStream.UnbufferedType
stream = DataStreamSelector.FromString(u'unbuffered 1')
assert stream.match_type == DataStream.UnbufferedType
stream = DataStreamSelector.FromString('counter 1')
assert stream.match_type == DataStream.CounterType
stream = DataStreamSelector.FromString(u'counter 1')
assert stream.match_type == DataStream.CounterType
stream = DataStreamSelector.FromString('constant 1')
assert stream.match_type == DataStream.ConstantType
stream = DataStreamSelector.FromString(u'constant 1')
assert stream.match_type == DataStream.ConstantType
stream = DataStreamSelector.FromString('output 1')
assert stream.match_type == DataStream.OutputType
stream = DataStreamSelector.FromString(u'output 1')
assert stream.match_type == DataStream.OutputType
def test_stream_selector_id_parsing():
"""Make sure we can parse stream ids."""
stream = DataStreamSelector.FromString('buffered 1')
assert stream.match_id == 1
assert stream.match_spec == DataStreamSelector.MatchUserOnly
stream = DataStreamSelector.FromString('buffered 0x100')
assert stream.match_id == 0x100
assert stream.match_spec == DataStreamSelector.MatchUserOnly
stream = DataStreamSelector.FromString(u'buffered 1')
assert stream.match_id == 1
assert stream.match_spec == DataStreamSelector.MatchUserOnly
stream = DataStreamSelector.FromString(u'buffered 0x100')
assert stream.match_id == 0x100
assert stream.match_spec == DataStreamSelector.MatchUserOnly
stream = DataStreamSelector.FromString(u'system buffered 0x100')
assert stream.match_id == 0x100
assert stream.match_spec == DataStreamSelector.MatchSystemOnly
stream = DataStreamSelector.FromString(u'all buffered')
assert stream.match_id is None
assert stream.match_spec == DataStreamSelector.MatchUserAndBreaks
stream = DataStreamSelector.FromString(u'all user buffered')
assert stream.match_id is None
assert stream.match_spec == DataStreamSelector.MatchUserOnly
stream = DataStreamSelector.FromString(u'all combined buffered')
assert stream.match_id is None
assert stream.match_spec == DataStreamSelector.MatchCombined
stream = DataStreamSelector.FromString(u'all system buffered')
assert stream.match_id is None
assert stream.match_spec == DataStreamSelector.MatchSystemOnly
def test_matching():
"""Test selector stream matching."""
sel = DataStreamSelector.FromString(u'all system buffered')
assert sel.matches(DataStream.FromString('system buffered 1'))
assert not sel.matches(DataStream.FromString('buffered 1'))
assert not sel.matches(DataStream.FromString('counter 1'))
sel = DataStreamSelector.FromString(u'all user outputs')
assert sel.matches(DataStream.FromString('output 1'))
assert not sel.matches(DataStream.FromString('system output 1'))
assert not sel.matches(DataStream.FromString('counter 1'))
sel = DataStreamSelector.FromString(u'all combined outputs')
assert sel.matches(DataStream.FromString('output 1'))
assert sel.matches(DataStream.FromString('system output 1'))
assert not sel.matches(DataStream.FromString('counter 1'))
sel = DataStreamSelector.FromString(u'all outputs')
assert sel.matches(DataStream.FromString('output 1'))
assert sel.matches(DataStream.FromString('system output 1024'))
assert not sel.matches(DataStream.FromString('system output 1'))
assert not sel.matches(DataStream.FromString('counter 1'))
def test_encoding():
"""Test data stream and selector encoding."""
sel = DataStreamSelector.FromString(u'all system output')
assert sel.encode() == 0x5FFF
sel = DataStreamSelector.FromString(u'all user output')
assert sel.encode() == 0x57FF
sel = DataStreamSelector.FromString(u'all output')
assert sel.encode() == 0xD7FF
sel = DataStreamSelector.FromString(u'all combined output')
assert sel.encode() == 0xDFFF
stream = DataStream.FromString('output 1')
assert stream.encode() == 0x5001
stream = DataStream.FromString('unbuffered 10')
assert stream.encode() == 0x100a
def test_selector_from_encoded():
"""Make sure we can create a selector from an encoded value."""
sel = DataStreamSelector.FromEncoded(0x5FFF)
assert str(sel) == 'all system outputs'
sel = DataStreamSelector.FromEncoded(0xD7FF)
assert str(sel) == 'all outputs'
sel = DataStreamSelector.FromEncoded(0x100a)
assert str(sel) == 'unbuffered 10'
assert str(DataStreamSelector.FromEncoded(DataStreamSelector.FromString('all combined output').encode())) == 'all combined outputs'
def test_buffered_pluralization():
"""Make sure we don't incorrectly pluralize buffered streams."""
sel = DataStreamSelector.FromString('all buffered')
assert str(sel) == 'all buffered'
def test_important_inputs():
"""Make sure we support matching important inputs and outputs."""
imp_stream = DataStream.FromString('system input 1024')
imp_store_stream = DataStream.FromString('system input 1536')
assert imp_stream.important is True
assert imp_store_stream.important is True
assert imp_stream.associated_stream() == DataStream.FromString('system output 1024')
assert imp_store_stream.associated_stream() == DataStream.FromString('system buffered 1536')
random_stream = DataStream.FromString('unbuffered 1024')
assert random_stream.important is False
with pytest.raises(InternalError):
random_stream.associated_stream()
| gpl-3.0 | -1,828,324,736,327,478,800 | 35.364807 | 135 | 0.732208 | false |
gsi-upm/senpy | senpy/__init__.py | 1 | 1141 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Sentiment analysis server in Python
"""
from .version import __version__
import logging
logger = logging.getLogger(__name__)
logger.info('Using senpy version: {}'.format(__version__))
from .utils import easy, easy_load, easy_test # noqa: F401
from .models import * # noqa: F401,F403
from .plugins import * # noqa: F401,F403
from .extensions import * # noqa: F401,F403
__all__ = ['api', 'blueprints', 'cli', 'extensions', 'models', 'plugins']
| apache-2.0 | 761,096,362,766,294,100 | 31.6 | 77 | 0.696757 | false |
grengojbo/st2 | st2client/st2client/models/core.py | 1 | 11692 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import logging
from functools import wraps
import six
from six.moves import urllib
from st2client.utils import httpclient
LOG = logging.getLogger(__name__)
def add_auth_token_to_kwargs_from_env(func):
@wraps(func)
def decorate(*args, **kwargs):
if not kwargs.get('token') and os.environ.get('ST2_AUTH_TOKEN', None):
kwargs['token'] = os.environ.get('ST2_AUTH_TOKEN')
return func(*args, **kwargs)
return decorate
class Resource(object):
# An alias to use for the resource if different than the class name.
_alias = None
# Display name of the resource. This may be different than its resource
# name specifically when the resource name is composed of multiple words.
_display_name = None
# URL path for the resource.
_url_path = None
# Plural form of the resource name. This will be used to build the
# latter part of the REST URL.
_plural = None
# Plural form of the resource display name.
_plural_display_name = None
# A list of class attributes which will be included in __repr__ return value
_repr_attributes = []
def __init__(self, *args, **kwargs):
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
def to_dict(self, exclude_attributes=None):
"""
Return a dictionary representation of this object.
:param exclude_attributes: Optional list of attributes to exclude.
:type exclude_attributes: ``list``
:rtype: ``dict``
"""
exclude_attributes = exclude_attributes or []
attributes = self.__dict__.keys()
attributes = [attr for attr in attributes if not attr.startswith('__') and
attr not in exclude_attributes]
result = {}
for attribute in attributes:
value = getattr(self, attribute, None)
result[attribute] = value
return result
@classmethod
def get_alias(cls):
return cls._alias if cls._alias else cls.__name__
@classmethod
def get_display_name(cls):
return cls._display_name if cls._display_name else cls.__name__
@classmethod
def get_plural_name(cls):
if not cls._plural:
raise Exception('The %s class is missing class attributes '
'in its definition.' % cls.__name__)
return cls._plural
@classmethod
def get_plural_display_name(cls):
return (cls._plural_display_name
if cls._plural_display_name
else cls._plural)
@classmethod
def get_url_path_name(cls):
if cls._url_path:
return cls._url_path
return cls.get_plural_name().lower()
def serialize(self):
return dict((k, v)
for k, v in six.iteritems(self.__dict__)
if not k.startswith('_'))
@classmethod
def deserialize(cls, doc):
if type(doc) is not dict:
doc = json.loads(doc)
return cls(**doc)
def __str__(self):
return str(self.__repr__())
def __repr__(self):
if not self._repr_attributes:
return super(Resource, self).__repr__()
attributes = []
for attribute in self._repr_attributes:
value = getattr(self, attribute, None)
attributes.append('%s=%s' % (attribute, value))
attributes = ','.join(attributes)
class_name = self.__class__.__name__
result = '<%s %s>' % (class_name, attributes)
return result
class ResourceManager(object):
def __init__(self, resource, endpoint, cacert=None, debug=False):
self.resource = resource
self.debug = debug
self.client = httpclient.HTTPClient(endpoint, cacert=cacert, debug=debug)
@staticmethod
def handle_error(response):
try:
content = response.json()
fault = content.get('faultstring', '') if content else ''
if fault:
response.reason += '\nMESSAGE: %s' % fault
except Exception as e:
response.reason += ('\nUnable to retrieve detailed message '
'from the HTTP response. %s\n' % str(e))
response.raise_for_status()
@add_auth_token_to_kwargs_from_env
def get_all(self, **kwargs):
# TODO: This is ugly, stop abusing kwargs
url = '/%s' % self.resource.get_url_path_name()
limit = kwargs.pop('limit', None)
pack = kwargs.pop('pack', None)
prefix = kwargs.pop('prefix', None)
params = {}
if limit and limit <= 0:
limit = None
if limit:
params['limit'] = limit
if pack:
params['pack'] = pack
if prefix:
params['prefix'] = prefix
response = self.client.get(url=url, params=params, **kwargs)
if response.status_code != 200:
self.handle_error(response)
return [self.resource.deserialize(item)
for item in response.json()]
@add_auth_token_to_kwargs_from_env
def get_by_id(self, id, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), id)
response = self.client.get(url, **kwargs)
if response.status_code == 404:
return None
if response.status_code != 200:
self.handle_error(response)
return self.resource.deserialize(response.json())
@add_auth_token_to_kwargs_from_env
def get_property(self, id_, property_name, self_deserialize=True, **kwargs):
"""
Gets a property of a Resource.
id_ : Id of the resource
property_name: Name of the property
self_deserialize: #Implies use the deserialize method implemented by this resource.
"""
token = None
if kwargs:
token = kwargs.pop('token', None)
url = '/%s/%s/%s/?%s' % (self.resource.get_url_path_name(), id_, property_name,
urllib.parse.urlencode(kwargs))
else:
url = '/%s/%s/%s/' % (self.resource.get_url_path_name(), id_, property_name)
response = self.client.get(url, token=token) if token else self.client.get(url)
if response.status_code == 404:
return None
if response.status_code != 200:
self.handle_error(response)
if self_deserialize:
return [self.resource.deserialize(item) for item in response.json()]
else:
return response.json()
@add_auth_token_to_kwargs_from_env
def get_by_ref_or_id(self, ref_or_id, **kwargs):
return self.get_by_id(id=ref_or_id, **kwargs)
@add_auth_token_to_kwargs_from_env
def query(self, **kwargs):
if not kwargs:
raise Exception('Query parameter is not provided.')
if 'limit' in kwargs and kwargs.get('limit') <= 0:
kwargs.pop('limit')
token = kwargs.get('token', None)
params = {}
for k, v in six.iteritems(kwargs):
if k != 'token':
params[k] = v
url = '/%s/?%s' % (self.resource.get_url_path_name(),
urllib.parse.urlencode(params))
response = self.client.get(url, token=token) if token else self.client.get(url)
if response.status_code == 404:
return []
if response.status_code != 200:
self.handle_error(response)
items = response.json()
instances = [self.resource.deserialize(item) for item in items]
return instances
@add_auth_token_to_kwargs_from_env
def get_by_name(self, name_or_id, **kwargs):
instances = self.query(name=name_or_id, **kwargs)
if not instances:
return None
else:
if len(instances) > 1:
raise Exception('More than one %s named "%s" are found.' %
(self.resource.__name__.lower(), name_or_id))
return instances[0]
@add_auth_token_to_kwargs_from_env
def create(self, instance, **kwargs):
url = '/%s' % self.resource.get_url_path_name()
response = self.client.post(url, instance.serialize(), **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
@add_auth_token_to_kwargs_from_env
def update(self, instance, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance.id)
response = self.client.put(url, instance.serialize(), **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
@add_auth_token_to_kwargs_from_env
def delete(self, instance, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance.id)
response = self.client.delete(url, **kwargs)
if response.status_code not in [200, 204, 404]:
self.handle_error(response)
return False
return True
@add_auth_token_to_kwargs_from_env
def delete_by_id(self, instance_id, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance_id)
response = self.client.delete(url, **kwargs)
if response.status_code not in [200, 204, 404]:
self.handle_error(response)
return False
try:
resp_json = response.json()
if resp_json:
return resp_json
except:
pass
return True
class ActionAliasResourceManager(ResourceManager):
def __init__(self, resource, endpoint, cacert=None, debug=False):
endpoint = endpoint.replace('v1', 'exp')
self.resource = resource
self.debug = debug
self.client = httpclient.HTTPClient(root=endpoint, cacert=cacert, debug=debug)
class LiveActionResourceManager(ResourceManager):
@add_auth_token_to_kwargs_from_env
def re_run(self, execution_id, parameters=None, **kwargs):
url = '/%s/%s/re_run' % (self.resource.get_url_path_name(), execution_id)
data = {}
if parameters:
data['parameters'] = parameters
response = self.client.post(url, data, **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
class TriggerInstanceResourceManager(ResourceManager):
@add_auth_token_to_kwargs_from_env
def re_emit(self, trigger_instance_id):
url = '/%s/%s/re_emit' % (self.resource.get_url_path_name(), trigger_instance_id)
response = self.client.post(url, None)
if response.status_code != 200:
self.handle_error(response)
return response.json()
| apache-2.0 | 1,653,460,699,954,385,700 | 33.187135 | 91 | 0.595621 | false |
lammps/lammps-packages | mingw-cross/cmake-win-on-linux.py | 1 | 14980 | #!/usr/bin/env python
# Script to build windows installer packages for LAMMPS
# (c) 2017,2018,2019,2020 Axel Kohlmeyer <[email protected]>
from __future__ import print_function
import sys,os,shutil,glob,re,subprocess,tarfile,gzip,time,inspect
try: from urllib.request import urlretrieve as geturl
except: from urllib import urlretrieve as geturl
try:
import multiprocessing
numcpus = multiprocessing.cpu_count()
except:
numcpus = 1
# helper functions
def error(str=None):
if not str: print(helpmsg)
else: print(sys.argv[0],"ERROR:",str)
sys.exit()
def getbool(arg,keyword):
if arg in ['yes','Yes','Y','y','on','1','True','true']:
return True
elif arg in ['no','No','N','n','off','0','False','false']:
return False
else:
error("Unknown %s option: %s" % (keyword,arg))
def fullpath(path):
return os.path.abspath(os.path.expanduser(path))
def getexe(url,name):
gzname = name + ".gz"
geturl(url,gzname)
with gzip.open(gzname,'rb') as gz_in:
with open(name,'wb') as f_out:
shutil.copyfileobj(gz_in,f_out)
gz_in.close()
f_out.close()
os.remove(gzname)
def system(cmd):
try:
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
except subprocess.CalledProcessError as e:
print("Command '%s' returned non-zero exit status" % e.cmd)
error(e.output.decode('UTF-8'))
return txt.decode('UTF-8')
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# record location and name of python script
homedir, exename = os.path.split(os.path.abspath(inspect.getsourcefile(lambda:0)))
# default settings help message and default settings
bitflag = '64'
parflag = 'no'
pythonflag = False
thrflag = 'omp'
revflag = 'stable'
verbose = False
gitdir = os.path.join(homedir,"lammps")
adminflag = True
msixflag = False
helpmsg = """
Usage: python %s -b <bits> -j <cpus> -p <mpi> -t <thread> -y <yes|no> -r <rev> -v <yes|no> -g <folder> -a <yes|no>
Flags (all flags are optional, defaults listed below):
-b : select Windows variant (default value: %s)
-b 32 : build for 32-bit Windows
-b 64 : build for 64-bit Windows
-j : set number of CPUs for parallel make (default value: %d)
-j <num> : set to any reasonable number or 1 for serial make
-p : select message passing parallel build (default value: %s)
-p mpi : build an MPI parallel version with MPICH2 v1.4.1p1
-p no : build a serial version using MPI STUBS library
-t : select thread support (default value: %s)
-t omp : build with threads via OpenMP enabled
-t no : build with thread support disabled
-y : select python support (default value: %s)
-y yes : build with python included
-y no : build without python
-r : select LAMMPS source revision to build (default value: %s)
-r stable : download and build the latest stable LAMMPS version
-r unstable : download and build the latest patch release LAMMPS version
-r master : download and build the latest development snapshot
-r patch_<date> : download and build a specific patch release
-r <sha256> : download and build a specific snapshot version
-v : select output verbosity
-v yes : print progress messages and output of make commands
-v no : print only progress messages
-g : select folder with git checkout of LAMMPS sources
-g <folder> : use LAMMPS checkout in <folder> (default value: %s)
-a : select admin level installation (default value: yes)
-a yes : the created installer requires to be run at admin level
and LAMMPS is installed to be accessible by all users
-a no : the created installer runs without admin privilege and
LAMMPS is installed into the current user's appdata folder
-a msix : same as "no" but adjust for creating an MSIX package
Example:
python %s -r unstable -t omp -p mpi
""" % (exename,bitflag,numcpus,parflag,thrflag,pythonflag,revflag,gitdir,exename)
# parse arguments
argv = sys.argv
argc = len(argv)
i = 1
while i < argc:
if i+1 >= argc:
print("\nMissing argument to flag:",argv[i])
error()
if argv[i] == '-b':
bitflag = argv[i+1]
elif argv[i] == '-j':
numcpus = int(argv[i+1])
elif argv[i] == '-p':
parflag = argv[i+1]
elif argv[i] == '-t':
thrflag = argv[i+1]
elif argv[i] == '-y':
pythonflag = getbool(argv[i+1],"python")
elif argv[i] == '-r':
revflag = argv[i+1]
elif argv[i] == '-v':
verbose = getbool(argv[i+1],"verbose")
elif argv[i] == '-a':
if argv[i+1] in ['msix','MSIX']:
adminflag = False
msixflag = True
else:
msixflag = False
adminflag = getbool(argv[i+1],"admin")
elif argv[i] == '-g':
gitdir = fullpath(argv[i+1])
else:
print("\nUnknown flag:",argv[i])
error()
i+=2
# checks
if bitflag != '32' and bitflag != '64':
error("Unsupported bitness flag %s" % bitflag)
if parflag != 'no' and parflag != 'mpi':
error("Unsupported parallel flag %s" % parflag)
if thrflag != 'no' and thrflag != 'omp':
error("Unsupported threading flag %s" % thrflag)
# test for valid revision name format: branch names, release tags, or commit hashes
rev1 = re.compile("^(stable|unstable|master)$")
rev2 = re.compile(r"^(patch|stable)_\d+(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\d{4}$")
rev3 = re.compile(r"^[a-f0-9]{40}$")
if not rev1.match(revflag) and not rev2.match(revflag) and not rev3.match(revflag):
error("Unsupported revision flag %s" % revflag)
# create working directory
if adminflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s" % (bitflag,parflag,thrflag,revflag))
else:
if pythonflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-python" % (bitflag,parflag,thrflag,revflag))
elif msixflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-msix" % (bitflag,parflag,thrflag,revflag))
else:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-noadmin" % (bitflag,parflag,thrflag,revflag))
shutil.rmtree(builddir,True)
try:
os.mkdir(builddir)
except:
error("Cannot create temporary build folder: %s" % builddir)
# check for prerequisites and set up build environment
if bitflag == '32':
cc_cmd = which('i686-w64-mingw32-gcc')
cxx_cmd = which('i686-w64-mingw32-g++')
fc_cmd = which('i686-w64-mingw32-gfortran')
ar_cmd = which('i686-w64-mingw32-ar')
size_cmd = which('i686-w64-mingw32-size')
nsis_cmd = which('makensis')
lmp_size = 'smallsmall'
else:
cc_cmd = which('x86_64-w64-mingw32-gcc')
cxx_cmd = which('x86_64-w64-mingw32-g++')
fc_cmd = which('x86_64-w64-mingw32-gfortran')
ar_cmd = which('x86_64-w64-mingw32-ar')
size_cmd = which('x86_64-w64-mingw32-size')
nsis_cmd = which('makensis')
lmp_size = 'smallbig'
print("""
Settings: building LAMMPS revision %s for %s-bit Windows
Message passing : %s
Multi-threading : %s
Home folder : %s
Source folder : %s
Build folder : %s
C compiler : %s
C++ compiler : %s
Fortran compiler : %s
Library archiver : %s
""" % (revflag,bitflag,parflag,thrflag,homedir,gitdir,builddir,cc_cmd,cxx_cmd,fc_cmd,ar_cmd))
# create/update git checkout
if not os.path.exists(gitdir):
txt = system("git clone https://github.com/lammps/lammps.git %s" % gitdir)
if verbose: print(txt)
os.chdir(gitdir)
txt = system("git fetch origin")
if verbose: print(txt)
txt = system("git checkout %s" % revflag)
if verbose: print(txt)
if revflag == "master" or revflag == "stable" or revflag == "unstable":
txt = system("git pull")
if verbose: print(txt)
# switch to build folder
os.chdir(builddir)
# download what is not automatically downloaded by CMake
print("Downloading third party tools")
url='http://download.lammps.org/thirdparty'
print("FFMpeg")
getexe("%s/ffmpeg-win%s.exe.gz" % (url,bitflag),"ffmpeg.exe")
print("gzip")
getexe("%s/gzip.exe.gz" % url,"gzip.exe")
if parflag == "mpi":
mpiflag = "on"
else:
mpiflag = "off"
if thrflag == "omp":
ompflag = "on"
else:
ompflag = "off"
print("Configuring build with CMake")
cmd = "mingw%s-cmake -G Ninja -D CMAKE_BUILD_TYPE=Release" % bitflag
cmd += " -D ADD_PKG_CONFIG_PATH=%s/mingw%s-pkgconfig" % (homedir,bitflag)
cmd += " -C %s/mingw%s-pkgconfig/addpkg.cmake" % (homedir,bitflag)
cmd += " -C %s/cmake/presets/mingw-cross.cmake %s/cmake" % (gitdir,gitdir)
cmd += " -DBUILD_SHARED_LIBS=on -DBUILD_MPI=%s -DBUILD_OPENMP=%s" % (mpiflag,ompflag)
cmd += " -DWITH_GZIP=on -DWITH_FFMPEG=on -DLAMMPS_EXCEPTIONS=on"
cmd += " -DINTEL_LRT_MODE=c++11 -DBUILD_LAMMPS_SHELL=on"
cmd += " -DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
if pythonflag: cmd += " -DPKG_PYTHON=yes"
print("Running: ",cmd)
txt = system(cmd)
if verbose: print(txt)
print("Compiling")
system("ninja")
print("Done")
print("Building PDF manual")
os.chdir(os.path.join(gitdir,"doc"))
txt = system("make pdf")
if verbose: print(txt)
shutil.move("Manual.pdf",os.path.join(builddir,"LAMMPS-Manual.pdf"))
print("Done")
# switch back to build folder and copy/process files for inclusion in installer
print("Collect and convert files for the Installer package")
os.chdir(builddir)
shutil.copytree(os.path.join(gitdir,"examples"),os.path.join(builddir,"examples"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"bench"),os.path.join(builddir,"bench"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"tools"),os.path.join(builddir,"tools"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"python","lammps"),os.path.join(builddir,"python","lammps"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"potentials"),os.path.join(builddir,"potentials"),symlinks=False)
shutil.copy(os.path.join(gitdir,"README"),os.path.join(builddir,"README.txt"))
shutil.copy(os.path.join(gitdir,"LICENSE"),os.path.join(builddir,"LICENSE.txt"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","colvars-refman-lammps.pdf"),os.path.join(builddir,"Colvars-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"tools","createatoms","Manual.pdf"),os.path.join(builddir,"CreateAtoms-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","kspace.pdf"),os.path.join(builddir,"Kspace-Extra-Info.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","pair_gayberne_extra.pdf"),os.path.join(builddir,"PairGayBerne-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","pair_resquared_extra.pdf"),os.path.join(builddir,"PairReSquared-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_overview.pdf"),os.path.join(builddir,"PDLAMMPS-Overview.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_EPS.pdf"),os.path.join(builddir,"PDLAMMPS-EPS.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_VES.pdf"),os.path.join(builddir,"PDLAMMPS-VES.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","SPH_LAMMPS_userguide.pdf"),os.path.join(builddir,"SPH-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","SMD_LAMMPS_userguide.pdf"),os.path.join(builddir,"SMD-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","USER-CGDNA.pdf"),os.path.join(builddir,"CGDNA-Manual.pdf"))
# prune outdated inputs, too large files, or examples of packages we don't bundle
for d in ['accelerate','kim','mscg','USER/quip','USER/vtk']:
shutil.rmtree(os.path.join("examples",d),True)
for d in ['FERMI','KEPLER']:
shutil.rmtree(os.path.join("bench",d),True)
shutil.rmtree("tools/msi2lmp/test",True)
os.remove("potentials/C_10_10.mesocnt")
os.remove("potentials/TABTP_10_10.mesont")
os.remove("examples/USER/mesont/C_10_10.mesocnt")
os.remove("examples/USER/mesont/TABTP_10_10.mesont")
# convert text files to CR-LF conventions
txt = system("unix2dos LICENSE.txt README.txt tools/msi2lmp/README")
if verbose: print(txt)
txt = system("find bench examples potentials python tools/msi2lmp/frc_files -type f -print | xargs unix2dos")
if verbose: print(txt)
# mass rename README to README.txt
txt = system('for f in $(find tools bench examples potentials python -name README -print); do mv -v $f $f.txt; done')
if verbose: print(txt)
# mass rename in.<name> to in.<name>.lmp
txt = system('for f in $(find bench examples -name in.\* -print); do mv -v $f $f.lmp; done')
if verbose: print(txt)
print("Done")
print("Configuring and building installer")
os.chdir(builddir)
if pythonflag:
nsisfile = os.path.join(homedir,"installer","lammps-python.nsis")
elif adminflag:
nsisfile = os.path.join(homedir,"installer","lammps-admin.nsis")
else:
if msixflag:
nsisfile = os.path.join(homedir,"installer","lammps-msix.nsis")
else:
nsisfile = os.path.join(homedir,"installer","lammps-noadmin.nsis")
shutil.copy(nsisfile,os.path.join(builddir,"lammps.nsis"))
shutil.copy(os.path.join(homedir,"installer","FileAssociation.nsh"),os.path.join(builddir,"FileAssociation.nsh"))
shutil.copy(os.path.join(homedir,"installer","lammps.ico"),os.path.join(builddir,"lammps.ico"))
shutil.copy(os.path.join(homedir,"installer","lammps-text-logo-wide.bmp"),os.path.join(builddir,"lammps-text-logo-wide.bmp"))
shutil.copytree(os.path.join(homedir,"installer","envvar"),os.path.join(builddir,"envvar"),symlinks=False)
# define version flag of the installer:
# - use current timestamp, when pulling from master (for daily builds)
# - parse version from src/version.h when pulling from stable, unstable, or specific tag
# - otherwise use revflag, i.e. the commit hash
version = revflag
if revflag == 'stable' or revflag == 'unstable' or rev2.match(revflag):
with open(os.path.join(gitdir,"src","version.h"),'r') as v_file:
verexp = re.compile(r'^.*"(\w+) (\w+) (\w+)".*$')
vertxt = v_file.readline()
verseq = verexp.match(vertxt).groups()
version = "".join(verseq)
elif revflag == 'master':
version = time.strftime('%Y-%m-%d')
if bitflag == '32':
mingwdir = '/usr/i686-w64-mingw32/sys-root/mingw/bin/'
elif bitflag == '64':
mingwdir = '/usr/x86_64-w64-mingw32/sys-root/mingw/bin/'
if parflag == 'mpi':
txt = system("makensis -DMINGW=%s -DVERSION=%s-MPI -DBIT=%s -DLMPREV=%s lammps.nsis" % (mingwdir,version,bitflag,revflag))
if verbose: print(txt)
else:
txt = system("makensis -DMINGW=%s -DVERSION=%s -DBIT=%s -DLMPREV=%s lammps.nsis" % (mingwdir,version,bitflag,revflag))
if verbose: print(txt)
# clean up after successful build
os.chdir('..')
print("Cleaning up...")
shutil.rmtree(builddir,True)
print("Done.")
| mit | -3,950,353,748,060,748,000 | 38.21466 | 128 | 0.672029 | false |
tinybike/catchcoin | qa/rpc-tests/listtransactions.py | 1 | 5892 | #!/usr/bin/env python
# Exercise the listtransactions API
# Add python-catchcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-catchcoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from catchcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def run_test(nodes):
# Simple send, 0 to 1:
txid = nodes[0].sendtoaddress(nodes[1].getnewaddress(), 0.1)
sync_mempools(nodes)
check_array_result(nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
nodes[0].setgenerate(True, 1)
sync_blocks(nodes)
check_array_result(nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = nodes[0].sendtoaddress(nodes[0].getnewaddress(), 0.2)
check_array_result(nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { nodes[0].getnewaddress() : 0.11, nodes[1].getnewaddress() : 0.22,
nodes[0].getaccountaddress("from1") : 0.33, nodes[1].getaccountaddress("toself") : 0.44 }
txid = nodes[1].sendmany("", send_to)
sync_mempools(nodes)
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave catchcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing catchcoind/catchcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(2, options.tmpdir)
connect_nodes(nodes[1], 0)
sync_blocks(nodes)
run_test(nodes)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
stack = traceback.extract_tb(sys.exc_info()[2])
print(stack[-1])
if not options.nocleanup:
print("Cleaning up")
stop_nodes()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| mit | -3,156,501,602,563,602,400 | 38.019868 | 105 | 0.563306 | false |
Kopachris/seshet | seshet/bot.py | 1 | 18891 | """Implement SeshetBot as subclass of ircutils3.bot.SimpleBot."""
import logging
import os
from io import StringIO
from datetime import datetime
from ircutils3 import bot, client
from .utils import KVStore, Storage, IRCstr
class SeshetUser(object):
"""Represent one IRC user."""
def __init__(self, nick, user, host):
logging.debug("Building new SeshetUser, %s", nick)
self.nick = IRCstr(nick)
self.user = user
self.host = host
self.channels = []
def join(self, channel):
"""Add this user to the channel's user list and add the channel to this
user's list of joined channels.
"""
if channel not in self.channels:
channel.users.add(self.nick)
self.channels.append(channel)
def part(self, channel):
"""Remove this user from the channel's user list and remove the channel
from this user's list of joined channels.
"""
if channel in self.channels:
channel.users.remove(self.nick)
self.channels.remove(channel)
def quit(self):
"""Remove this user from all channels and reinitialize the user's list
of joined channels.
"""
for c in self.channels:
c.users.remove(self.nick)
self.channels = []
def change_nick(self, nick):
"""Update this user's nick in all joined channels."""
old_nick = self.nick
self.nick = IRCstr(nick)
for c in self.channels:
c.users.remove(old_nick)
c.users.add(self.nick)
def __str__(self):
return "{}!{}@{}".format(self.nick, self.user, self.host)
def __repr__(self):
temp = "<SeshetUser {}!{}@{} in channels {}>"
return temp.format(self.nick, self.user, self.host, self.channels)
class SeshetChannel(object):
"""Represent one IRC channel."""
def __init__(self, name, users, log_size=100):
self.name = IRCstr(name)
self.users = users
self.message_log = []
self._log_size = log_size
def log_message(self, user, message):
"""Log a channel message.
This log acts as a sort of cache so that recent activity can be searched
by the bot and command modules without querying the database.
"""
if isinstance(user, SeshetUser):
user = user.nick
elif not isinstance(user, IRCstr):
user = IRCstr(user)
time = datetime.utcnow()
self.message_log.append((time, user, message))
while len(self.message_log) > self._log_size:
del self.message_log[0]
def __str__(self):
return str(self.name)
def __repr__(self):
temp = "<SeshetChannel {} with {} users>"
return temp.format(self.name, len(self.users))
class SeshetBot(bot.SimpleBot):
"""Extend `ircutils3.bot.SimpleBot`.
Each instance represents one bot, connected to one IRC network.
Each instance should have its own database, but can make use of
any shared command modules. The modules may have to be added to
the bot's database if the bot wasn't created using the
`seshet --config` or `seshet --new` commands.
"""
def __init__(self, nick='Seshet', db=None, debug_file=None, verbosity=99):
"""Extend `ircutils3.bot.SimpleBot.__init__()`.
Keyword argument `db` is required for running commands other
than core commands and should be an instance of pydal.DAL.
"""
# initialize debug logging
if debug_file is None:
logging.basicConfig(level=verbosity)
else:
logging.basicConfig(filename=os.path.expanduser(debug_file),
level=verbosity
)
logging.debug("Running `SimpleBot.__init__`...")
bot.SimpleBot.__init__(self, nick, auto_handle=False)
# define defaults
self.session = Storage()
self.log_file = 'seshet.log'
self.log_formats = {}
self.locale = {}
self.channels = {}
self.users = {}
if db is None:
# no database connection, only log to file and run
# core command modules
logging.info("No db, IRC logging will be done to file")
self.log = self._log_to_file
self.run_modules = self._run_only_core
# dummy KV store since no db
self.storage = Storage()
else:
logging.info("Using database %s", db)
self.db = db
self.storage = KVStore(db)
# Add default handlers
logging.debug("Adding default handlers...")
self.events["any"].add_handler(client._update_client_info)
self.events["ctcp_version"].add_handler(client._reply_to_ctcp_version)
self.events["name_reply"].add_handler(_add_channel_names)
def log(self, etype, source, msg='', target='', hostmask='', params=''):
"""Log an event in the database.
Required:
`etype` - event type. One of 'PRIVMSG', 'QUIT', 'PART', 'ACTION',
'NICK', 'JOIN', 'MODE', 'KICK', 'CTCP', or 'ERROR'. Enforced
by database model.
`source` - source of the event. Usually a user. For NICK events,
the user's original nickname. For ERROR events, this should be
the exception name, the module name, and the line number. The
full traceback will be logged in `msg`.
Optional:
`msg` - a message associated with the event.
`target` - the target the message was directed to. For MODE and KICK
events, this will be the user the event was performed on. For
NICK events, this will be channel the event was seen in (an event
will be created for each channel the user is seen by the bot in).
`hostmask` - a hostmask associated with the event.
`parms` - any additional parameters associated with the event, such as
a new nickname (for NICK events), mode switches (for MODE events),
or a dump of local variables (for ERROR events).
"""
self.db.event_log.insert(event_type=etype,
event_time=datetime.utcnow(),
source=source,
target=target,
message=msg,
host=hostmask,
params=params,
)
self.db.commit()
def run_modules(self, e):
# grab local pointer to self.db for faster lookup
db = self.db
# get initial list of modules handling this event type
event_types = db.modules.event_types
mod_enabled = db.modules.enabled
init_mods = db(event_types.contains(e.command) & mod_enabled).select()
logging.debug(("Running modules for {} command. "
"Initial module list:\n{}").format(e.command, init_mods)
)
if e.command in ('PRIVMSG', 'CTCP_ACTION', 'NOTICE'):
# narrow down list of modules to run based on event parameters
# lowercase for non-caps comparisons
m_low = e.message.lower()
bot_n = self.nickname.lower()
bot_u = self.user.lower()
bot_r = self.real_name.lower()
# indicates whether or not name has already been stripped from
# original message
for_us = False
if e.target.startswith('#'):
chan_msg = True
chan_nicks = self.channels[e.target].users
else:
chan_msg = False
fin_mods = list() # final list of modules to run
for mod in init_mods:
if e.source in mod.whitelist:
fin_mods.append(mod)
elif e.source in mod.blacklist:
pass
if self.nickname in mod.enicks:
if e.target == self.nickname or for_us:
fin_mods.append(mod)
elif m_low.startswith(bot_n):
# strip nickname from original message so modules can
# process it correctly
e.message = e.message[len(bot_n):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_u):
e.message = e.message[len(bot_u):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_r):
e.message = e.message[len(bot_r):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
if chan_msg:
if e.target in mod.dchannels:
pass
elif set(mod.dnicks) & chan_nicks:
pass
elif e.target in mod.echannels:
fin_mods.append(mod)
elif set(mod.enicks) & chan_nicks:
fin_mods.append(mod)
argv = m_low.split()
for mod in fin_mods:
# run each module
m = __import__(mod.name) # TODO: use importlib
# TODO: add authentication and rate limiting
for cmd, fun in m.commands.items():
if (mod.cmd_prefix + cmd) == argv[0]:
fun(self, e)
break
def get_unique_users(self, chan):
"""Get the set of users that are unique to the given channel (i.e. not
present in any other channel the bot is in).
"""
chan = IRCstr(chan)
these_users = self.channels[chan].users
other_users = set()
for c in self.channels.values():
if c.name != chan:
other_users |= c.users
return these_users - other_users
def on_message(self, e):
self.log('privmsg',
source=e.source,
msg=e.message,
target=e.target,
)
if e.target in self.channels:
# TODO: move this to self.log() so we don't have to get time twice?
self.channels[e.target].log_message(e.source, e.message)
self.run_modules(e)
def on_join(self, e):
self.log('join',
source=e.source,
target=e.target,
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
if e.source != self.nickname:
if nick not in self.users:
self.users[nick] = SeshetUser(nick, e.user, e.host)
self.users[nick].join(self.channels[chan])
self.run_modules(e)
def on_part(self, e):
self.log('part',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params[1:]),
target=e.target,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_quit(self, e):
nick = IRCstr(e.source)
for chan in self.channels.values():
if nick in chan.users:
self.log('quit',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params),
target=chan.name,
)
self.users[nick].quit()
del self.users[nick]
def on_disconnect(self, e):
pass
def on_kick(self, e):
self.log('kick',
source=e.source,
target=e.target,
params=e.params[0],
msg=' '.join(e.params[1:]),
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_nick_change(self, e):
new_nick = IRCstr(e.target)
old_nick = IRCstr(e.source)
for chan in self.channels.values():
if e.source in chan.user_list:
self.log('nick',
source=e.source,
hostmask=e.user+'@'+e.host,
target=chan.name,
params=e.target,
)
self.users[old_nick].change_nick(new_nick)
self.users[new_nick] = self.users[old_nick]
del self.users[old_nick]
def on_ctcp_action(self, e):
self.log('action',
source=e.source,
target=e.target,
msg=' '.join(e.params),
)
def on_welcome(self, e):
pass
def on_mode(self, e):
self.log('mode',
source=e.source,
msg=' '.join(e.params),
target=e.target,
)
def before_poll(self):
"""Called each loop before polling sockets for I/O."""
pass
def after_poll(self):
"""Called each loop after polling sockets for I/O and
handling any queued events.
"""
pass
def connect(self, *args, **kwargs):
"""Extend `client.SimpleClient.connect()` with defaults"""
defaults = {}
for i, k in enumerate(('host', 'port', 'channel', 'use_ssl', 'password')):
if i < len(args):
defaults[k] = args[i]
elif k in kwargs:
defaults[k] = kwargs[k]
else:
def_k = 'default_' + k
defaults[k] = getattr(self, def_k, None)
if defaults['use_ssl'] is None:
defaults['use_ssl'] = False
if defaults['host'] is None:
raise TypeError("missing 1 required positional argument: 'host'")
logging.info("Connecting to %s:%s and joining channels %s",
defaults['host'],
defaults['port'],
defaults['channel'],
)
client.SimpleClient.connect(self, **defaults)
def start(self):
logging.debug("Beginning poll loop")
self._loop(self.conn._map)
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''):
"""Override `log()` if bot is not initialized with a database
connection. Do not call this method directly.
"""
today = datetime.utcnow()
# TODO: Use self.locale['timezone'] for changing time
date = today.strftime(self.locale['date_fmt'])
time = today.strftime(self.locale['time_fmt'])
datetime_s = today.strftime(self.locale['short_datetime_fmt'])
datetime_l = today.strftime(self.locale['long_datetime_fmt'])
if target == self.nickname and etype in ('privmsg', 'action'):
target = source
if etype in self.log_formats:
file_path = os.path.expanduser(self.log_file.format(**locals()))
file_dir = os.path.dirname(file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
line = self.log_formats[etype].format(**locals())
with open(file_path, 'a') as log:
log.write(line+'\n')
# else do nothing
def _run_only_core(self, *args, **kwargs):
"""Override `_run_commands()` if bot is not initialized with a
database connection. Do not call this method directly.
Rather than getting a list of enabled modules from the database,
Seshet will only run the commands defined by `core` in this package.
The bot will only run commands given in private message ("query")
by either an authenticated user defined in the instance's config file,
or by any user with the same hostmask if authentication isn't set up
in the instance's config file.
The `core` command module from this package can only be overridden if
the bot is initialized with a database connection and a new `core`
module is entered into the database.
"""
pass
def _loop(self, map):
"""The main loop. Poll sockets for I/O and run any other functions
that need to be run every loop.
"""
try:
from asyncore import poll
except ImportError:
raise Exception("Couldn't find poll function. Cannot start bot.")
while map:
self.before_poll()
poll(timeout=30.0, map=map)
self.after_poll()
def _add_channel_names(client, e):
"""Add a new channel to self.channels and initialize its user list.
Called as event handler for RPL_NAMES events. Do not call directly.
"""
chan = IRCstr(e.channel)
names = set([IRCstr(n) for n in e.name_list])
client.channels[chan] = SeshetChannel(chan, names) | bsd-3-clause | 1,934,334,764,152,155,100 | 34.984762 | 85 | 0.505214 | false |
Naeka/vosae-app | www/organizer/api/resources/event.py | 1 | 10316 | # -*- coding:Utf-8 -*-
from django.conf.urls import url
from django.core.exceptions import ObjectDoesNotExist
from tastypie import fields as base_fields, http
from tastypie.utils import trailing_slash
from tastypie.validation import Validation
from tastypie_mongoengine import fields
from dateutil.parser import parse
from core.api.utils import TenantResource
from organizer.models import VosaeEvent, DATERANGE_FILTERS
from organizer.api.doc import HELP_TEXT
__all__ = (
'VosaeEventResource',
)
class EventValidation(Validation):
def is_valid(self, bundle, request=None):
from django.utils.timezone import is_naive
errors = {}
for field in ['start', 'end']:
data = bundle.data.get(field)
if not data.get('date', None) and not data.get('datetime', None):
errors['__all__'] = ["One of 'date' and 'datetime' must be set."]
elif data.get('date', None) and data.get('datetime', None):
errors['__all__'] = ["Only one of 'date' and 'datetime' must be set. The 'date' field is used for all-day events."]
elif data.get('datetime', None) and is_naive(parse(data.get('datetime'))) and not data.get('timezone', None):
errors['datetime'] = ["A timezone offset is required if not specified in the 'timezone' field"]
return errors
class VosaeEventResource(TenantResource):
status = base_fields.CharField(
attribute='status',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['status']
)
created_at = base_fields.DateTimeField(
attribute='created_at',
readonly=True,
help_text=HELP_TEXT['vosae_event']['created_at']
)
updated_at = base_fields.DateTimeField(
attribute='updated_at',
readonly=True,
help_text=HELP_TEXT['vosae_event']['updated_at']
)
summary = base_fields.CharField(
attribute='summary',
help_text=HELP_TEXT['vosae_event']['summary']
)
description = base_fields.CharField(
attribute='description',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['description']
)
location = base_fields.CharField(
attribute='location',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['location']
)
color = base_fields.CharField(
attribute='color',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['color']
)
start = fields.EmbeddedDocumentField(
embedded='organizer.api.resources.EventDateTimeResource',
attribute='start',
help_text=HELP_TEXT['vosae_event']['start']
)
end = fields.EmbeddedDocumentField(
embedded='organizer.api.resources.EventDateTimeResource',
attribute='end',
help_text=HELP_TEXT['vosae_event']['end']
)
recurrence = base_fields.CharField(
attribute='recurrence',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['recurrence']
)
original_start = fields.EmbeddedDocumentField(
embedded='organizer.api.resources.EventDateTimeResource',
attribute='original_start',
readonly=True,
help_text=HELP_TEXT['vosae_event']['original_start']
)
instance_id = base_fields.CharField(
attribute='instance_id',
readonly=True,
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['instance_id']
)
transparency = base_fields.CharField(
attribute='transparency',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['transparency']
)
calendar = fields.ReferenceField(
to='organizer.api.resources.VosaeCalendarResource',
attribute='calendar',
help_text=HELP_TEXT['vosae_event']['calendar']
)
creator = fields.ReferenceField(
to='core.api.resources.VosaeUserResource',
attribute='creator',
readonly=True,
help_text=HELP_TEXT['vosae_event']['creator']
)
organizer = fields.ReferenceField(
to='core.api.resources.VosaeUserResource',
attribute='organizer',
readonly=True,
help_text=HELP_TEXT['vosae_event']['organizer']
)
attendees = fields.EmbeddedListField(
of='organizer.api.resources.AttendeeResource',
attribute='attendees',
null=True,
blank=True,
full=True,
help_text=HELP_TEXT['vosae_event']['attendees']
)
reminders = fields.EmbeddedDocumentField(
embedded='organizer.api.resources.ReminderSettingsResource',
attribute='reminders',
blank=True,
help_text=HELP_TEXT['vosae_event']['reminders']
)
class Meta(TenantResource.Meta):
resource_name = 'vosae_event'
queryset = VosaeEvent.objects.all()
excludes = ('tenant', 'occurrences', 'next_reminder', 'ical_uid', 'ical_data')
filtering = {
'start': ('exact', 'gt', 'gte'),
'end': ('exact', 'lt', 'lte'),
'calendar': ('exact')
}
validation = EventValidation()
def prepend_urls(self):
"""Add urls for resources actions."""
urls = super(VosaeEventResource, self).prepend_urls()
urls.extend((
url(r'^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/instances%s$' % (self._meta.resource_name, trailing_slash()), self.wrap_view('event_instances'), name='api_vosae_event_instances'),
))
return urls
def build_filters(self, filters=None):
qs_filters = super(VosaeEventResource, self).build_filters(filters)
for filter_name, filter_value in qs_filters.iteritems():
if filter_name.endswith('__exact'):
new_name = filter_name[:filter_name.index('__exact')]
qs_filters[new_name] = filter_value
del qs_filters[filter_name]
filter_name = new_name
if filter_name in DATERANGE_FILTERS:
if isinstance(filter_value, basestring):
qs_filters[filter_name] = parse(filter_value)
return qs_filters
def get_object_list(self, request):
"""Filters events based on calendar accesses (extracted from request user)"""
from organizer.models import VosaeCalendar
object_list = super(VosaeEventResource, self).get_object_list(request)
principals = [request.vosae_user] + request.vosae_user.groups
calendars = VosaeCalendar.objects.filter(acl__read_list__in=principals, acl__negate_list__nin=principals)
return object_list.filter(calendar__in=list(calendars))
def apply_filters(self, request, applicable_filters):
object_list = super(VosaeEventResource, self).apply_filters(request, applicable_filters)
filters = request.GET
if 'single_events' in filters and filters['single_events'] in ['true', 'True', True]:
start = None
end = None
for filter_name, filter_value in filters.iteritems():
try:
if filter_name.startswith('start'):
start = parse(filter_value)
elif filter_name.startswith('end'):
end = parse(filter_value)
except:
pass
return object_list.with_instances(start, end)
return object_list
def event_instances(self, request, **kwargs):
"""List all instances of the event"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
try:
bundle = self.build_bundle(request=request)
objects = self.obj_get_list(bundle, **self.remove_api_resource_names(kwargs)).with_instances()
except ObjectDoesNotExist:
return http.HttpNotFound()
if objects.count() < 2:
return http.HttpNotFound()
sorted_objects = self.apply_sorting(objects, options=request.GET)
first_objects_bundle = self.build_bundle(obj=objects[0], request=request)
instances_resource_uri = '%sinstances/' % self.get_resource_uri(first_objects_bundle)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=instances_resource_uri, limit=self._meta.limit)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = [self.build_bundle(obj=obj, request=request) for obj in to_be_serialized['objects']]
to_be_serialized['objects'] = [self.full_dehydrate(b) for b in bundles]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def full_hydrate(self, bundle):
"""Set event's creator and organizer"""
bundle = super(VosaeEventResource, self).full_hydrate(bundle)
bundle.obj.creator = bundle.request.vosae_user
# Organizer should be the user owner of the calendar
try:
organizer = bundle.obj.calendar.acl.get_owner()
except:
organizer = bundle.request.vosae_user
bundle.obj.organizer = organizer
return bundle
def full_dehydrate(self, bundle, for_list=False):
bundle = super(VosaeEventResource, self).full_dehydrate(bundle, for_list=for_list)
if not bundle.data['instance_id']:
del bundle.data['instance_id']
return bundle
def dehydrate(self, bundle):
"""Dehydrates the appropriate CalendarList which differs according to user (extracted from request)"""
from organizer.models import CalendarList
from organizer.api.resources import CalendarListResource
bundle = super(VosaeEventResource, self).dehydrate(bundle)
calendar_list = CalendarList.objects.get(calendar=bundle.obj.calendar, vosae_user=bundle.request.vosae_user)
calendar_list_resource = CalendarListResource()
calendar_list_resource_bundle = calendar_list_resource.build_bundle(obj=calendar_list, request=bundle.request)
bundle.data['calendar_list'] = calendar_list_resource.get_resource_uri(calendar_list_resource_bundle)
return bundle
| agpl-3.0 | -4,812,843,896,607,923,000 | 38.830116 | 190 | 0.630186 | false |
m-tmatma/svnmailer | src/lib/svnmailer/settings.py | 1 | 19703 | # -*- coding: utf-8 -*-
#
# Copyright 2004-2006 André Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Runtime settings for the svnmailer
==================================
This module defines one public class, called L{Settings}. This class is the
storage container for all settings used by the svnmailer. L{Settings} is an
abstract class. There is just one method that must be implemented --
L{Settings.init}. This method is responsible for filling the container
properly. An implementor of the L{Settings} class can be found in the
L{svnmailer.config} module.
This module further defines the Settings subcontainers
L{GroupSettingsContainer}, L{GeneralSettingsContainer} and
L{RuntimeSettingsContainer}, but you should not instantiate them directly --
L{Settings} provides methods that return instances of these containers.
"""
__author__ = "André Malo"
__docformat__ = "epytext en"
__all__ = ['Settings', 'modes']
# global imports
from svnmailer import typedstruct, struct_accessors
class _Tokens(object):
""" Generic token container
@ivar valid_tokens: The valid mode tokens (str, str, ...)
@type valid_tokens: C{tuple}
"""
valid_tokens = ()
def __init__(self, *args):
""" Initialization """
self.valid_tokens = args
for token in self.valid_tokens:
setattr(self, token.encode('us-ascii'), token)
modes = _Tokens('commit', 'propchange', 'lock', 'unlock')
xpath = _Tokens(u'yes', u'no', u'ignore')
showenc = _Tokens(u'yes', u'no', u'nondefault')
def groupMembers(space):
""" Define the members of the group settings
The following members are defined:
- C{_name}: Name of the group
- C{_def_for_repos}: default for_repos regex
- C{_def_for_paths}: default for_paths regex
- C{for_repos}: Repository regex
- C{for_paths}: Path regex (inside the repos)
- C{exclude_paths}: Exclude path regex to prevent for_paths from
being applied
- C{ignore_if_other_matches}: this group will be ignored if there
are any other groups selected for a particular path
- C{show_nonmatching_paths}: How to deal with paths that are not
matched by the group
- C{commit_subject_template}: Subject template for commit mail
- C{propchange_subject_template}: Subject template for revpropchanges
- C{lock_subject_template}: Subject template for locks
- C{unlock_subject_template}: Subject template for unlocks
- C{commit_subject_prefix}: Subject prefix for commit mail
- C{propchange_subject_prefix}: Subject prefix for revpropchanges
- C{lock_subject_prefix}: Subject prefix for locks
- C{unlock_subject_prefix}: Subject prefix for unlocks
- C{max_subject_length}: Maximum subject length
- C{from_addr}: C{From:} address format string
- C{to_addr}: C{To:} address format string
- C{to_fake}: C{To:} non-address format string
- C{bcc_addr}: C{Bcc:} address format string
- C{reply_to_addr}: C{Reply-To:} address format string
- C{diff_command}: The diff command to use
- C{generate_diffs}: List of actions for which diffs are generated
- C{browser_base_url}: type and format string of the repository
browser base url
- C{custom_header}: custom header name and format template
- C{to_newsgroup}: The newsgroup where the notification should be
posted to
- C{long_news_action}: The action to take on huge commit postings
- C{long_mail_action}: The action to take on huge commit mails
- C{mail_transfer_encoding}: Content-Transfer-Encoding for mails
- C{news_transfer_encoding}: Content-Transfer-Encoding for news
- C{mail_type}: The mail construction type
- C{extract_x509_author}: Treat author as x509 subject and try to
extract author's real name and email address
- C{cia_project_name}: The project name used for CIA notifications
- C{cia_project_module}: The project module used for CIA
notifications
- C{cia_project_branch}: The project branch used for CIA
notifications
- C{cia_project_submodule}: The project submodule used for CIA
notifications
- C{cia_project_path}: The project path, which will be stripped from
the absolute node path
- C{apply_charset_property}: Should svnmailer:content-charset
properties be recognized?
- C{show_applied_charset}: Show the encoding of the files in the
diff?
- C{viewcvs_base_url}: (I{deprecated}, use C{browser_base_url}
instead) format string for the viewcvs URL
@param space: The namespace to pollute
@type space: C{dict}
@return: The members definition
@rtype: C{dict}
"""
args = {
'space' : space,
'typemap' : struct_accessors.typemap,
'the_members': {
'_name' : 'unicode',
'_def_for_repos' : 'regex',
'_def_for_paths' : 'regex',
'for_repos' : ('regex', {'map': True}),
'for_paths' : ('regex', {'map': True}),
'exclude_paths' : ('regex', {'map': True}),
'ignore_if_other_matches' : 'humanbool',
'show_nonmatching_paths' : ('token',
{'map': True,
'allowed': xpath.valid_tokens}),
'commit_subject_template' : ('unicode', {'map': True}),
'propchange_subject_template': ('unicode', {'map': True}),
'lock_subject_template' : ('unicode', {'map': True}),
'unlock_subject_template' : ('unicode', {'map': True}),
'commit_subject_prefix' : ('unicode',
{'subst': True, 'map': True}),
'propchange_subject_prefix' : ('unicode',
{'subst': True, 'map': True}),
'lock_subject_prefix' : ('unicode',
{'subst': True, 'map': True}),
'unlock_subject_prefix' : ('unicode',
{'subst': True, 'map': True}),
'max_subject_length' : 'int',
'from_addr' : ('tokenlist',
{'subst': True, 'map': True}),
'to_addr' : ('tokenlist',
{'subst': True, 'map': True}),
'to_fake' : ('unicode',
{'subst': True, 'map': True}),
'bcc_addr' : ('tokenlist',
{'subst': True, 'map': True}),
'reply_to_addr' : ('unicode',
{'subst': True, 'map': True}),
'to_newsgroup' : ('tokenlist',
{'subst': True, 'map': True}),
'diff_command' : ('unicommand', {'map': True}),
'generate_diffs' : 'tokenlist',
'browser_base_url' : ('unicode',
{'subst': True, 'map': True}),
'long_mail_action' : ('mailaction', {'map': True}),
'long_news_action' : ('mailaction', {'map': True}),
'mail_type' : ('unicode', {'map': True}),
'mail_transfer_encoding' : 'unicode',
'news_transfer_encoding' : 'unicode',
'custom_header' : ('unicode',
{'subst': True, 'map': True}),
'extract_x509_author' : 'humanbool',
'cia_rpc_server' : ('unicode', {'map': True}),
'cia_project_name' : ('unicode',
{'subst': True, 'map': True}),
'cia_project_module' : ('unicode',
{'subst': True, 'map': True}),
'cia_project_branch' : ('unicode',
{'subst': True, 'map': True}),
'cia_project_submodule' : ('unicode',
{'subst': True, 'map': True}),
'cia_project_path' : ('unicode',
{'subst': True, 'map': True}),
'apply_charset_property' : 'humanbool',
'show_applied_charset' : ('token',
{'allowed': showenc.valid_tokens}),
# deprecated
'viewcvs_base_url' : ('unicode',
{'subst': True, 'map': True}),
},
'aliases': {
'suppress_if_match' : 'ignore_if_other_matches',
'fallback' : 'ignore_if_other_matches',
'reply_to' : 'reply_to_addr',
'x509_author' : 'extract_x509_author',
'charset_property' : 'apply_charset_property',
'truncate_subject' : 'max_subject_length',
'subject_length' : 'max_subject_length',
'diff' : 'diff_command',
'nonmatching_paths' : 'show_nonmatching_paths',
'nongroup_paths' : 'show_nonmatching_paths',
'show_nongroup_paths': 'show_nonmatching_paths',
},
}
return typedstruct.members(**args)
def generalMembers(space):
""" Define the members of the general settings
The following members are defined:
- C{diff_command}: The diff command
- C{sendmail_command}: The sendmail compatible command
- C{ssl_mode}: ssl mode
- C{smtp_host}: The smtp host (C{host[:port]})
- C{smtp_user}: The smtp auth. user
- C{smtp_pass}: The smtp auth. password
- C{debug_all_mails_to}: All mails are sent to these addresses
(for debugging purposes)
- C{cia_rpc_server}: The XML-RPC server running the CIA tracker
- C{tempdir}: The directory to use for temporary files
@param space: The namespace to pollute
@type space: C{dict}
@return: The members definition
@rtype: C{dict}
"""
args = {
'space' : space,
'typemap' : struct_accessors.typemap,
'the_members': {
'sendmail_command' : ('unicommand', {'map': True}),
'ssl_mode' : ('unicode', {'map': True}),
'smtp_host' : ('unicode', {'map': True}),
'smtp_user' : ('quotedstr', {'map': True}),
'smtp_pass' : ('quotedstr', {'map': True}),
'nntp_host' : ('unicode', {'map': True}),
'nntp_user' : ('quotedstr', {'map': True}),
'nntp_pass' : ('quotedstr', {'map': True}),
'debug_all_mails_to': ('tokenlist', {'map': True}),
'tempdir' : ('filename', {'map': True}),
# deprecated
'cia_rpc_server' : ('unicode', {'map': True}),
'diff_command' : ('unicommand', {'map': True}),
},
'aliases' : {
'mail_command' : 'sendmail_command',
'smtp_hostname': 'smtp_host',
'diff' : 'diff_command',
},
}
return typedstruct.members(**args)
def runtimeMembers(space):
""" Define the members of the runtime settings
The following members are defined:
- C{_repos}: The repository object
- C{stdin}: The stdin, read once
- C{path_encoding}: The path-encoding parameter
- C{debug}: debug mode (True/False)
- C{revision}: committed revision number
- C{repository}: path to the repository
- C{config}: supplied config file name
- C{mode}: running mode (see L{modes})
- C{author}: Author of the commit or revpropchange
- C{propname}: Property changed (in revpropchange)
- C{action}: The revprop action (M, A, D)
@param space: The namespace to pollute
@type space: C{dict}
@return: The members definition
@rtype: C{dict}
"""
args = {
'space' : space,
'typemap' : struct_accessors.typemap,
'the_members': {
'_repos' : None, # internal usage (Repository object)
'stdin' : 'stdin',
'path_encoding': 'string',
'debug' : 'bool',
'revision' : 'int',
'repository' : 'filename',
'config' : 'filename',
'mode' : 'string',
'author' : 'unicode',
'propname' : 'unicode',
'action' : 'unicode', # >= svn 1.2
},
'aliases' : None,
}
return typedstruct.members(**args)
class GroupSettingsContainer(typedstruct.Struct):
""" Container for group settings
@see: L{groupMembers} for the actual member list
"""
__slots__ = groupMembers(locals())
def _compare(self, other):
""" compare some of the attributes
@note: It uses a list of attributes that are compared if two
of these types are tested for equality. Keep in mind that
this comparision takes place, when the decision is made
whether a mail for more than one group should be sent more
than once (if the groups are not equal). All attributes, but
the ones returned by L{_getIgnorableMembers} are compared.
@see: L{_getIgnorableMembers}
@param other: The object compared to
@type other: C{GroupSettingsContainer}
@return: Are the objects equal?
@rtype: C{bool}
"""
if type(self) != type(other):
return False
attrs = [name for name in self._members_
if name not in self._getIgnorableMembers()
]
for name in attrs:
if getattr(self, name) != getattr(other, name):
return False
return True
def _getIgnorableMembers(self):
""" Returns the list of member names that be ignored in comparisons
This method called by L{_compare}. Override this method to modify
the list.
@return: The list
@rtype: C{list}
"""
return [
'_name', '_def_for_repos', '_def_for_paths',
'for_repos', 'for_paths', 'exclude_paths',
'ignore_if_other_matches', 'to_addr', 'from_addr',
'to_newsgroup', 'custom_header', 'cia_rpc_server',
'cia_project_name', 'cia_project_module', 'cia_project_branch',
'cia_project_submodule', 'cia_project_path',
]
class GeneralSettingsContainer(typedstruct.Struct):
""" Container for general settings
@see: L{generalMembers} for the actual member list
"""
__slots__ = generalMembers(locals())
class RuntimeSettingsContainer(typedstruct.Struct):
""" Container for runtime settings
@see: L{runtimeMembers} for the actual member list
"""
__slots__ = runtimeMembers(locals())
class Settings(object):
""" Settings management
@note: The C{init} method must be overridden to do the actual
initialization.
@ivar groups: group settings list
@type groups: C{list} of C{GroupSettingsContainer}
@ivar general: General settings
@type general: C{GeneralSettingsContainer}
@ivar runtime: Runtime settigs
@type runtime: C{RuntimeSettingsContainer}
@ivar debug: Debug state
@type debug: C{bool}
@ivar _charset_: The charset used for settings recoding
@type _charset_: C{str}
@ivar _maps_: The value mappers to use or C{None}
@type _maps_: C{dict}
"""
def __init__(self, *args, **kwargs):
""" Constructor
Don't override this one. Override C{init()} instead.
"""
# supply default values
self._charset_ = 'us-ascii'
self._fcharset_ = None
self._maps_ = None
self.groups = []
self.general = None
self.runtime = None
# parameter initialization
self.init(*args, **kwargs)
# sanity check
self._checkInitialization()
def _checkInitialization(self):
""" Checks if all containers are filled """
if not(self.general and self.runtime and self.groups):
raise RuntimeError("Settings are not completely initialized")
def init(self, *args, **kwargs):
""" Abstract initialization method """
raise NotImplementedError()
def _getArgs(self):
""" Returns the basic arguments for container initialization
@return: The args
@rtype: C{list}
"""
return [
self._maps_,
{'encoding': self._charset_, 'path_encoding': self._fcharset_}
]
def getGroupContainer(self, **kwargs):
""" Returns an initialized group settings container
@return: The container object
@rtype: C{GroupSettingsContainer}
"""
return GroupSettingsContainer(*self._getArgs(), **kwargs)
def getDefaultGroupContainer(self, **kwargs):
""" Returns an initialized default group settings container
@return: The container object
@rtype: C{GroupSettingsContainer}
"""
args = self._getArgs()
args[0] = None # no maps
return GroupSettingsContainer(*args, **kwargs)
def getGeneralContainer(self, **kwargs):
""" Returns an initialized general settings container
@return: The container object
@rtype: C{GeneralSettingsContainer}
"""
return GeneralSettingsContainer(*self._getArgs(), **kwargs)
def getRuntimeContainer(self, **kwargs):
""" Returns an initialized runtime settings container
Note that the runtime settings (from commandline)
are always assumed to be utf-8 encoded.
@return: The container object
@rtype: C{RuntimeSettingsContainer}
"""
args = self._getArgs()
args[0] = None
args[1]["encoding"] = "utf-8"
return RuntimeSettingsContainer(*args, **kwargs)
| apache-2.0 | 704,259,331,109,030,700 | 39.206122 | 81 | 0.526115 | false |
polyaxon/polyaxon-api | polyaxon_lib/estimators/hooks/general_hooks.py | 1 | 2631 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from polyaxon_lib.estimators.hooks.utils import can_run_hook
class GlobalStepWaiterHook(basic_session_run_hooks.GlobalStepWaiterHook):
"""Delay execution until global step reaches to wait_until_step.
(A mirror to tensorflow.python.training.basic_session_run_hooks GlobalStepWaiterHook.)
This hook delays execution until global step reaches to `wait_until_step`. It
is used to gradually start workers in distributed settings. One example usage
would be setting `wait_until_step=int(K*log(task_id+1))` assuming that
task_id=0 is the chief.
Args:
wait_until_step: an `int` shows until which global step should we wait.
"""
pass
class FinalOpsHook(basic_session_run_hooks.FinalOpsHook):
"""A run hook which evaluates `Tensors` at the end of a session.
(A mirror to tensorflow.python.training.basic_session_run_hooks GlobalStepWaiterHook.)
Args:
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when running `final_ops_dict`.
"""
pass
class StopAfterNEvalsHook(evaluation._StopAfterNEvalsHook): # pylint: disable=protected-access
"""Run hook used by the evaluation routines to run the `eval_ops` N times."""
pass
class NanTensorHook(basic_session_run_hooks.NanTensorHook):
"""NaN Loss monitor.
A modified version of tensorflow.python.training.basic_session_run_hooks NanTensorHook.
Checks the context for `no_run_hooks_op` before calling the the hook.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
def before_run(self, run_context): # pylint: disable=unused-argument
if can_run_hook(run_context):
return super(NanTensorHook, self).before_run(run_context)
return None
def after_run(self, run_context, run_values):
if can_run_hook(run_context):
return super(NanTensorHook, self).after_run(run_context, run_values)
GENERAL_HOOKS = OrderedDict([
('GlobalStepWaiterHook', GlobalStepWaiterHook),
('FinalOpsHook', FinalOpsHook),
('StopAfterNEvalsHook', StopAfterNEvalsHook),
('NanTensorHook', NanTensorHook)
])
| mit | -773,701,700,634,766,200 | 34.08 | 96 | 0.717598 | false |
sigma-geosistemas/django-tenants | docs/conf.py | 1 | 6550 | # -*- coding: utf-8 -*-
#
# dinnertime documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 19 10:27:46 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django_tenants'
copyright = '%d, Thomas Turner & Bernardo Pires Carneiro' % datetime.date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
try:
from compressor import __version__
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
except ImportError:
version = release = 'dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'tenantschemasdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tenantschema.tex', 'tenantschemaDocumentation',
'Thomas Turner', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| mit | -1,764,868,686,530,571,300 | 31.914573 | 86 | 0.710992 | false |
ShovanSarker/mango_office | actions/views.py | 1 | 15639 | from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from users.models import AllUsers, ACL
from status.models import Status
from task.models import Task
import datetime
from attendance.models import AttendanceInOffice, AttendanceInHome
from django.contrib.auth.models import User
# Create your views here.
@csrf_exempt
def login_page(request):
return render(request, 'login.html')
@csrf_exempt
def login_auth(request):
post_data = request.POST
print(post_data)
if 'username' and 'password' in post_data:
print(post_data['username'])
print(post_data['password'])
user = authenticate(username=post_data['username'], password=post_data['password'])
if user is not None:
if user.is_active:
login(request, user)
request.session['user'] = post_data['username']
if user.is_superuser:
res = redirect('/admin')
else:
res = redirect('/')
else:
res = render(request, 'login.html',
{'wrong': True,
'text': 'The password is valid, but the account has been disabled!'})
else:
res = render(request, 'login.html',
{'wrong': True,
'text': 'The username and password you have entered is not correct. Please retry'})
else:
res = render(request, 'login.html', {'wrong': False})
res['Access-Control-Allow-Origin'] = "*"
res['Access-Control-Allow-Headers'] = "Origin, X-Requested-With, Content-Type, Accept"
res['Access-Control-Allow-Methods'] = "PUT, GET, POST, DELETE, OPTIONS"
return res
def logout_now(request):
logout(request)
return redirect('/login')
@login_required(login_url='/login/')
def home(request):
page_title = 'Home'
user = request.session['user']
if not AllUsers.objects.exists():
print(request.session['user'])
new_status = Status.objects.get(StatusKey='office')
new_user = AllUsers(username=user, Name=user, Email=user + '@inflack.com', Status=new_status)
new_user.save()
new_user_acl = ACL(user=new_user,
CanSeeOthersTaskList=True,
CanSeeOthersAttendance=True,
CanAddMoreEmployee=True,
CanSeeOthersDetails=True,
CanSeeOthersStatus=True)
new_user_acl.save()
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
if this_user.Active:
all_status = Status.objects.all()
display = render(request, 'client_dashboard.html', {'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee,
'all_status': all_status,
'page_title': page_title})
else:
logout(request)
display = render(request, 'login.html',
{'wrong': True,
'text': 'You are not authorized to login. Please contact administrator for more details'})
else:
logout(request)
display = render(request, 'login.html',
{'wrong': True,
'text': 'Something went wrong. Please LOGIN again.'})
return display
@login_required(login_url='/login/')
def add_employee(request):
user = request.session['user']
post_data = request.POST
this_user = AllUsers.objects.get(username__exact=user)
# login_user = this_user.Name
# print(post_data['super-admin'])
if 'username' in post_data and 'csrfmiddlewaretoken' in post_data:
if AllUsers.objects.filter(username__exact=user).exists():
if this_user.Active and this_user.acl.CanAddMoreEmployee:
if AllUsers.objects.filter(username__exact=post_data['username']).exists() or \
post_data['username'] == 'admin':
# This username is already taken
print(post_data)
display = render(request, 'add_admin.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee,
'wrong': True,
'text': 'This USERNAME is already taken.'
'Please try with a different one'})
else:
if post_data['password'] == post_data['re-password']:
# password matches
print(post_data)
new_status = Status.objects.get(StatusKey='office')
new_user = AllUsers(username=post_data['username'],
Name=post_data['name'],
Designation=post_data['designation'],
Phone=post_data['phone'],
Email=post_data['email'],
Status=new_status)
new_user.save()
new_user_acl = ACL(user=new_user)
new_user_acl.save()
new_user_login = User.objects.create_user(post_data['username'],
post_data['email'],
post_data['password'])
new_user_login.save()
display = render(request, 'add_admin.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee,
'success': True,
'text': 'New employee has been '
'added successfully.'})
else:
display = render(request, 'add_admin.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee,
'wrong': True,
'text': 'The passwords do not match.'
'Please try again'})
else:
logout(request)
display = render(request, 'login.html',
{'wrong': True,
'text': 'You are not authorized to login.'
' Please contact administrator for more details'})
else:
display = redirect('/')
else:
if this_user.acl.CanAddMoreEmployee:
display = render(request, 'add_admin.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = render(request, 'access_denied.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
return display
@login_required(login_url='/login/')
def change_status(request):
user = request.session['user']
get_data = request.GET
if AllUsers.objects.filter(username__exact=user).exists():
new_status = Status.objects.get(StatusKey=get_data['to'])
this_user = AllUsers.objects.get(username__exact=user)
current_status = this_user.Status
print(current_status.StatusKey)
print(get_data['to'])
if ((get_data['to'] == 'office' or get_data['to'] == 'away' or
get_data['to'] == 'meeting' or get_data['to'] == 'out') and current_status.StatusKey != 'home') or \
get_data['to'] == 'home' and current_status.StatusKey == 'out' or \
get_data['to'] == 'out' and current_status.StatusKey == 'home':
if (get_data['to'] == 'office' or get_data['to'] == 'away' or get_data['to'] == 'meeting') \
and current_status.StatusKey == 'out':
new_office_attendance = AttendanceInOffice(User=this_user)
new_office_attendance.save()
elif get_data['to'] == 'home'and current_status.StatusKey == 'out':
new_home_attendance = AttendanceInHome(User=this_user)
new_home_attendance.save()
elif get_data['to'] == 'out'and current_status.StatusKey == 'home':
new_home_attendance = AttendanceInHome.objects.get(User=this_user, ExitTime=None)
print(datetime.datetime.now())
new_home_attendance.ExitTime = datetime.datetime.now()
new_home_attendance.save()
elif get_data['to'] == 'out'and (current_status.StatusKey == 'office' or
current_status.StatusKey == 'away' or
current_status.StatusKey == 'meeting'):
new_office_attendance = AttendanceInOffice.objects.get(User=this_user, ExitTime=None)
print(datetime.datetime.now())
new_office_attendance.ExitTime = datetime.datetime.now()
new_office_attendance.save()
this_user.Status = new_status
this_user.save()
display = redirect('/')
else:
display = redirect('/logout')
return display
@login_required(login_url='/login/')
def employee_list(request):
user = request.session['user']
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
if this_user.acl.CanSeeOthersStatus:
all_employees = AllUsers.objects.all()
display = render(request, 'admin_list.html', {'page_title': 'Add Employee',
'login_user': this_user,
'all_employees': all_employees,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = render(request, 'access_denied.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = redirect('/logout')
return display
@login_required(login_url='/login/')
def all_task(request):
user = request.session['user']
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
# if this_user.acl.CanSeeOthersStatus:
all_tasks = Task.objects.filter(AssignedTo=this_user)
assigned_tasks = Task.objects.filter(AssignedBy=this_user)
display = render(request, 'all_task.html', {'page_title': 'Task List',
'login_user': this_user,
'all_tasks': all_tasks,
'assigned_tasks': assigned_tasks,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = redirect('/logout')
return display
@login_required(login_url='/login/')
def attendance(request):
user = request.session['user']
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
# if this_user.acl.CanSeeOthersStatus:
office_work = AttendanceInOffice.objects.filter(User=this_user)
home_work = AttendanceInHome.objects.filter(User=this_user)
display = render(request, 'attendance.html', {'page_title': 'Attendance',
'login_user': this_user,
'office_work': office_work,
'home_work': home_work,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = redirect('/logout')
return display
@login_required(login_url='/login/')
def profile(request):
user = request.session['user']
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
display = render(request, 'profile.html', {'page_title': 'Profile',
'login_user': this_user,
'this_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = redirect('/logout')
return display
| gpl-2.0 | -5,272,535,817,592,566,000 | 53.114187 | 127 | 0.475478 | false |
qingtech/weibome | weibome/settings.py | 1 | 5454 | # Django settings for weibome project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
import os
if 'SERVER_SOFTWARE' in os.environ:
from sae.const import (
MYSQL_HOST, MYSQL_PORT, MYSQL_USER, MYSQL_PASS, MYSQL_DB
)
else:
# Make `python manage.py syncdb` works happy!
MYSQL_HOST = 'localhost'
MYSQL_PORT = '3306'
MYSQL_USER = 'root'
MYSQL_PASS = '123'
MYSQL_DB = 'weibome'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_DB,
'USER': MYSQL_USER,
'PASSWORD': MYSQL_PASS,
'HOST': MYSQL_HOST,
'PORT': MYSQL_PORT,
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = 'static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#7-*s-)n!pnjrlv@f%f4&pn+#lr8)3o!5j-d-(is2accw!9x5p'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'weibome.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'weibome.wsgi.application'
import os.path
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'weime',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| gpl-2.0 | 3,294,211,815,736,853,500 | 30.526012 | 88 | 0.688302 | false |
vbraun/SageUI | src/sageui/view/trac_window.py | 1 | 13199 | """
Window showing a Trac Ticket
"""
##############################################################################
# SageUI: A graphical user interface to Sage, Trac, and Git.
# Copyright (C) 2013 Volker Braun <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import logging
import gtk
import gobject
import pango
from gtksourceview2 import View as GtkSourceView
from buildable import Buildable
from window import Window
from terminal_widget import TerminalWidget
class TracWindowUpdater(object):
def __init__(self, trac_window, timeout=1):
self.trac_window = trac_window
self.counter = 0
gobject.timeout_add_seconds(timeout, self.callback)
def callback(self):
self.counter += 1
#print 'updating trac window', str(self.counter)
if not self.trac_window.window.get_visible():
return False
self.trac_window.update_ticket_age()
return True
class TracWindow(Buildable, Window):
def __init__(self, presenter, glade_file):
self.presenter = presenter
Buildable.__init__(self, ['trac_window', 'trac_menubar', 'trac_toolbar',
'trac_tool_web', 'trac_tool_git', 'trac_tool_refresh',
'trac_tool_git_icon',
'trac_ticketlist_store', 'trac_ticketlist_view',
'trac_search_entry',
'trac_comments',
'trac_comment_text', 'trac_comment_buffer'])
builder = self.get_builder(glade_file)
Window.__init__(self, builder, 'trac_window')
self.menu = builder.get_object('trac_menubar')
self.toolbar = builder.get_object('trac_toolbar')
self.search_entry = builder.get_object('trac_search_entry')
self.ticketlist_store = builder.get_object('trac_ticketlist_store')
self.ticketlist_view = builder.get_object('trac_ticketlist_view')
self._init_ticketlist(self.ticketlist_view)
self.comments = builder.get_object('trac_comments')
self._init_comments(self.comments)
self.comment_text = builder.get_object('trac_comment_text')
self.comment_buffer = builder.get_object('trac_comment_buffer')
self.toolbar_web = builder.get_object('trac_tool_web')
self.toolbar_refresh = builder.get_object('trac_tool_refresh')
self.toolbar_git = builder.get_object('trac_tool_git')
builder.connect_signals(self)
self.ticket_list = None
self.current_ticket = None
def _init_ticketlist(self, listview):
listview.get_selection().set_mode(gtk.SELECTION_BROWSE)
# add two columns
self.col_title = gtk.TreeViewColumn('Description')
self.col_time = gtk.TreeViewColumn('Last seen')
listview.append_column(self.col_title)
listview.append_column(self.col_time)
# create a CellRenderers to render the data
self.cell_title = gtk.CellRendererText()
self.cell_title.set_property('ellipsize', pango.ELLIPSIZE_END)
self.cell_time = gtk.CellRendererText()
# add the cells to the columns - 2 in the first
self.col_title.pack_start(self.cell_title, True)
self.col_title.set_attributes(self.cell_title, markup=1)
self.col_title.set_resizable(True)
self.col_title.set_expand(True)
self.col_time.pack_end(self.cell_time, True)
self.col_time.set_attributes(self.cell_time, markup=2)
#self.col_time.set_expand(True)
def _init_comments(self, comments):
color = gtk.gdk.color_parse('#F0EAD6')
comments.modify_base(gtk.STATE_NORMAL, color)
tag_table = comments.get_buffer().get_tag_table()
tag = gtk.TextTag('warning')
tag.set_property('foreground', 'red')
tag_table.add(tag)
tag = gtk.TextTag('label')
tag.set_property('foreground', 'blue')
tag.set_property('style', pango.STYLE_ITALIC)
tag_table.add(tag)
tag = gtk.TextTag('description')
tag.set_property('foreground', 'black')
tag.set_property('family', 'monospace')
tag.set_property('wrap-mode', gtk.WRAP_WORD)
tag_table.add(tag)
tag = gtk.TextTag('trac_field')
tag.set_property('foreground', 'black')
tag.set_property('family', 'monospace')
tag.set_property('weight', pango.WEIGHT_SEMIBOLD)
tag_table.add(tag)
tag = gtk.TextTag('comment')
tag.set_property('foreground', 'black')
tag.set_property('family', 'monospace')
tag.set_property('wrap-mode', gtk.WRAP_WORD)
tag_table.add(tag)
tag = gtk.TextTag('title')
tag.set_property('foreground', 'black')
tag.set_property('weight', pango.WEIGHT_BOLD)
tag.set_property('scale', pango.SCALE_X_LARGE)
tag_table.add(tag)
tag = gtk.TextTag('debug')
tag.set_property('wrap-mode', gtk.WRAP_WORD)
tag_table.add(tag)
def show(self):
super(TracWindow, self).show()
TracWindowUpdater(self)
def set_ticket_list(self, ticket_list, current_ticket=None):
assert (current_ticket is None) or (current_ticket in ticket_list)
self.ticket_list = ticket_list
self.ticketlist_store.clear()
for ticket in ticket_list:
n = ticket.get_number()
row = [n,
'<b>#'+str(n)+'</b> '+ticket.get_title(),
str(ticket.get_pretty_last_viewed_time())]
self.ticketlist_store.append(row)
self.set_current_ticket(current_ticket)
def get_ticket_numbers(self):
result = []
store = self.ticketlist_store
iter = store.get_iter_first()
while iter is not None:
result.append(store.get_value(iter, 0))
return tuple(result)
def set_current_ticket(self, ticket):
"""
Select ``ticket`` in the ticket list.
Also, updates the "Last seen" field since it probably changed to right now.
"""
self.current_ticket = ticket
sel = self.ticketlist_view.get_selection()
if ticket is None:
sel.unselect_all()
self.toolbar_refresh.set_sensitive(False)
self.toolbar_web.set_sensitive(False)
self.toolbar_git.set_sensitive(False)
return
assert ticket in self.ticket_list
ticket_number = ticket.get_number()
store = self.ticketlist_store
iter = store.get_iter_first()
while (iter is not None) and (store.get_value(iter, 0) != ticket_number):
iter = store.iter_next(iter)
assert iter != None
sel.select_iter(iter)
self.toolbar_refresh.set_sensitive(True)
self.toolbar_web.set_sensitive(True)
self.toolbar_git.set_sensitive(ticket.get_branch() is not None)
self.update_ticket_age([ticket])
def update_ticket_age(self, tickets=None):
if tickets is None:
tickets = self.ticket_list
if tickets is None:
return
ticket_by_number = dict()
for ticket in self.ticket_list:
ticket_by_number[ticket.get_number()] = ticket
store = self.ticketlist_store
iter = store.get_iter_first()
while iter is not None:
n = store.get_value(iter, 0)
ticket = ticket_by_number[n]
store.set(iter, 2, str(ticket.get_pretty_last_viewed_time()))
iter = store.iter_next(iter)
def on_trac_ticketlist_view_cursor_changed(self, widget, data=None):
model, iter = self.ticketlist_view.get_selection().get_selected()
if not iter:
return
ticket_number = model.get_value(iter, 0)
logging.info('trac ticket cursor changed to #%s', ticket_number)
self.presenter.ticket_selected(ticket_number)
def display_ticket(self, ticket):
buf = self.comments.get_buffer()
buf.set_text('')
if ticket is None:
return
def append(*args):
buf.insert_with_tags(buf.get_end_iter(), *args)
tag_table = buf.get_tag_table()
warn_tag = tag_table.lookup('warning')
title_tag = tag_table.lookup('title')
label_tag = tag_table.lookup('label')
trac_field_tag = tag_table.lookup('trac_field')
description_tag = tag_table.lookup('description')
comment_tag = tag_table.lookup('comment')
debug_tag = tag_table.lookup('debug')
append('Trac #'+str(ticket.get_number())+': '+ticket.get_title(), title_tag)
append('\n\n')
branch = ticket.get_branch()
if branch is not None:
append('Branch: ', label_tag)
append(branch, trac_field_tag)
append('\n')
deps = ticket.get_dependencies()
if deps is not None:
append('Dependencies: ', label_tag)
append(deps, trac_field_tag)
append('\n')
append('Description:\n', label_tag)
append(ticket.get_description().strip(), description_tag)
for comment in ticket.comment_iter():
append('\n\n')
author = comment.get_author()
time = comment.get_ctime().ctime()
append('Comment (by {0} on {1}):\n'.format(author, time), label_tag)
append(comment.get_comment().strip(), comment_tag)
append('\n\n')
append('Created: ', label_tag)
append(ticket.get_ctime().ctime(), trac_field_tag)
append('\t Last modified: ', label_tag)
append(ticket.get_mtime().ctime(), trac_field_tag)
append('\n\n')
append(str(ticket._data), debug_tag)
append('\n')
for log in ticket._change_log:
append(str(log) + '\n', debug_tag)
def on_trac_window_delete_event(self, widget, data=None):
self.presenter.hide_trac_window()
return True
def on_trac_menu_close_activate(self, widget, data=None):
self.presenter.hide_trac_window()
def on_trac_window_map(self, widget, data=None):
print 'trac window map'
def on_trac_menu_new_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac new ticket")
def on_trac_menu_open_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac open ticket")
def on_trac_menu_about_activate(self, widget, data=None):
self.presenter.show_about_dialog()
def on_trac_menu_cut_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac cut")
def on_trac_menu_copy_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac copy")
def on_trac_menu_paste_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac paste")
def on_trac_menu_delete_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac delete")
def on_trac_menu_preferences_activate(self, widget, data=None):
self.presenter.show_preferences_dialog()
def on_trac_tool_new_clicked(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac new ticket")
def on_trac_tool_web_clicked(self, widget, data=None):
url = 'http://trac.sagemath.org/{0}'.format(self.current_ticket.get_number())
self.presenter.xdg_open(url)
def on_trac_tool_git_clicked(self, widget, data=None):
branch = self.current_ticket.get_branch()
assert branch is not None # button should have been disabled
number = self.current_ticket.get_number()
logging.info('git button for %s %s', branch, number)
self.presenter.checkout_branch(branch, number)
self.presenter.show_git_window()
def on_trac_tool_refresh_clicked(self, widget, data=None):
self.presenter.load_ticket(self.current_ticket)
def on_trac_search_entry_activate(self, widget, data=None):
entry = self.search_entry.get_buffer().get_text()
entry = entry.strip('# ')
logging.info('searching trac for %s', entry)
try:
ticket_number = int(entry)
self.presenter.load_ticket(ticket_number)
except ValueError:
self.presenter.show_error(self, 'Invalid ticket number', 'Expected integer, got: '+entry)
| gpl-3.0 | 6,258,722,187,542,107,000 | 39.48773 | 101 | 0.608152 | false |
berkeley-stat159/project-lambda | code/stat159lambda/utils/tests/test_parse_demographics.py | 1 | 1388 | from __future__ import absolute_import
from stat159lambda.utils import parse_demographics
import os
import csv
def prepare_for_tests():
with open('demographics.csv', 'w') as csvfile:
file_writer = csv.writer(csvfile, delimiter=',', quotechar='"')
file_writer.writerow(['id', 'gender', 'age', 'forrest_seen_count'])
file_writer.writerow(['1', 'm', '30-35', '5'])
file_writer.writerow(['2', 'm', '30-35', '1'])
test_object = parse_demographics.parse_csv('demographics.csv')
return test_object
def test_seen_most_times():
test_subjects = prepare_for_tests()
seen_count = parse_demographics.seen_most_times(test_subjects)
assert seen_count[0] == 5
assert seen_count[1] == 1
delete_file()
def test_seen_least_times():
test_subjects = prepare_for_tests()
seen_count = parse_demographics.seen_least_times(test_subjects)
assert seen_count[0] == 1
assert seen_count[1] == 2
delete_file()
def test_find_id_by_gender():
test_subjects = prepare_for_tests()
id_list = parse_demographics.find_id_by_gender(test_subjects, 'm')
assert len(id_list) == 2
assert id_list[0] == 'm'
assert id_list[1] == 'm'
delete_file()
def test_find_count_by_id():
test_subjects = prepare_for_tests()
count = parse_demographics.find_count_by_id(test_subjects, 1)
assert count == 5
delete_file()
def delete_file():
os.remove('demographics.csv')
| bsd-3-clause | -6,484,319,558,559,641,000 | 26.215686 | 75 | 0.676513 | false |
trabucayre/gnuradio | gr-audio/examples/python/dial_tone_daemon.py | 1 | 1411 | #!/usr/bin/env python
#
# Copyright 2004,2005,2007,2008,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gru
from gnuradio import audio
from gnuradio.eng_arg import eng_float
from argparse import ArgumentParser
import os
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = ArgumentParser()
parser.add_argument("-O", "--audio-output", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_argument("-r", "--sample-rate", type=eng_float, default=48000,
help="set sample rate to RATE (%(default)r)")
args = parser.parse_args()
sample_rate = int(args.sample_rate)
ampl = 0.1
src0 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 350, ampl)
src1 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 440, ampl)
dst = audio.sink(sample_rate, args.audio_output)
self.connect(src0, (dst, 0))
self.connect(src1, (dst, 1))
if __name__ == '__main__':
pid = gru.daemonize()
print("To stop this program, enter 'kill %d'" % pid)
my_top_block().run()
| gpl-3.0 | -8,665,632,935,623,739,000 | 29.673913 | 83 | 0.61871 | false |
mtl/svg2mod | svg2mod/svg2mod.py | 1 | 39409 | #!/usr/bin/python
from __future__ import absolute_import
import argparse
import datetime
import os
from pprint import pformat, pprint
import re
import svg2mod.svg as svg
import sys
#----------------------------------------------------------------------------
DEFAULT_DPI = 96 # 96 as of Inkscape 0.92
def main():
args, parser = get_arguments()
pretty = args.format == 'pretty'
use_mm = args.units == 'mm'
if pretty:
if not use_mm:
print( "Error: decimil units only allowed with legacy output type" )
sys.exit( -1 )
#if args.include_reverse:
#print(
#"Warning: reverse footprint not supported or required for" +
#" pretty output format"
#)
# Import the SVG:
imported = Svg2ModImport(
args.input_file_name,
args.module_name,
args.module_value
)
# Pick an output file name if none was provided:
if args.output_file_name is None:
args.output_file_name = os.path.splitext(
os.path.basename( args.input_file_name )
)[ 0 ]
# Append the correct file name extension if needed:
if pretty:
extension = ".kicad_mod"
else:
extension = ".mod"
if args.output_file_name[ - len( extension ) : ] != extension:
args.output_file_name += extension
# Create an exporter:
if pretty:
exported = Svg2ModExportPretty(
imported,
args.output_file_name,
args.scale_factor,
args.precision,
args.dpi,
)
else:
# If the module file exists, try to read it:
exported = None
if os.path.isfile( args.output_file_name ):
try:
exported = Svg2ModExportLegacyUpdater(
imported,
args.output_file_name,
args.scale_factor,
args.precision,
args.dpi,
include_reverse = not args.front_only,
)
except Exception as e:
raise e
#print( e.message )
#exported = None
# Write the module file:
if exported is None:
exported = Svg2ModExportLegacy(
imported,
args.output_file_name,
args.scale_factor,
args.precision,
use_mm = use_mm,
dpi = args.dpi,
include_reverse = not args.front_only,
)
# Export the footprint:
exported.write()
#----------------------------------------------------------------------------
class LineSegment( object ):
#------------------------------------------------------------------------
@staticmethod
def _on_segment( p, q, r ):
""" Given three colinear points p, q, and r, check if
point q lies on line segment pr. """
if (
q.x <= max( p.x, r.x ) and
q.x >= min( p.x, r.x ) and
q.y <= max( p.y, r.y ) and
q.y >= min( p.y, r.y )
):
return True
return False
#------------------------------------------------------------------------
@staticmethod
def _orientation( p, q, r ):
""" Find orientation of ordered triplet (p, q, r).
Returns following values
0 --> p, q and r are colinear
1 --> Clockwise
2 --> Counterclockwise
"""
val = (
( q.y - p.y ) * ( r.x - q.x ) -
( q.x - p.x ) * ( r.y - q.y )
)
if val == 0: return 0
if val > 0: return 1
return 2
#------------------------------------------------------------------------
def __init__( self, p = None, q = None ):
self.p = p
self.q = q
#------------------------------------------------------------------------
def connects( self, segment ):
if self.q.x == segment.p.x and self.q.y == segment.p.y: return True
if self.q.x == segment.q.x and self.q.y == segment.q.y: return True
if self.p.x == segment.p.x and self.p.y == segment.p.y: return True
if self.p.x == segment.q.x and self.p.y == segment.q.y: return True
return False
#------------------------------------------------------------------------
def intersects( self, segment ):
""" Return true if line segments 'p1q1' and 'p2q2' intersect.
Adapted from:
http://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/
"""
# Find the four orientations needed for general and special cases:
o1 = self._orientation( self.p, self.q, segment.p )
o2 = self._orientation( self.p, self.q, segment.q )
o3 = self._orientation( segment.p, segment.q, self.p )
o4 = self._orientation( segment.p, segment.q, self.q )
return (
# General case:
( o1 != o2 and o3 != o4 )
or
# p1, q1 and p2 are colinear and p2 lies on segment p1q1:
( o1 == 0 and self._on_segment( self.p, segment.p, self.q ) )
or
# p1, q1 and p2 are colinear and q2 lies on segment p1q1:
( o2 == 0 and self._on_segment( self.p, segment.q, self.q ) )
or
# p2, q2 and p1 are colinear and p1 lies on segment p2q2:
( o3 == 0 and self._on_segment( segment.p, self.p, segment.q ) )
or
# p2, q2 and q1 are colinear and q1 lies on segment p2q2:
( o4 == 0 and self._on_segment( segment.p, self.q, segment.q ) )
)
#------------------------------------------------------------------------
def q_next( self, q ):
self.p = self.q
self.q = q
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class PolygonSegment( object ):
#------------------------------------------------------------------------
def __init__( self, points ):
self.points = points
if len( points ) < 3:
print(
"Warning:"
" Path segment has only {} points (not a polygon?)".format(
len( points )
)
)
#------------------------------------------------------------------------
# KiCad will not "pick up the pen" when moving between a polygon outline
# and holes within it, so we search for a pair of points connecting the
# outline (self) to the hole such that the connecting segment will not
# cross the visible inner space within any hole.
def _find_insertion_point( self, hole, holes ):
#print( " Finding insertion point. {} holes".format( len( holes ) ) )
# Try the next point on the container:
for cp in range( len( self.points ) ):
container_point = self.points[ cp ]
#print( " Trying container point {}".format( cp ) )
# Try the next point on the hole:
for hp in range( len( hole.points ) - 1 ):
hole_point = hole.points[ hp ]
#print( " Trying hole point {}".format( cp ) )
bridge = LineSegment( container_point, hole_point )
# Check for intersection with each other hole:
for other_hole in holes:
#print( " Trying other hole. Check = {}".format( hole == other_hole ) )
# If the other hole intersects, don't bother checking
# remaining holes:
if other_hole.intersects(
bridge,
check_connects = (
other_hole == hole or other_hole == self
)
): break
#print( " Hole does not intersect." )
else:
print( " Found insertion point: {}, {}".format( cp, hp ) )
# No other holes intersected, so this insertion point
# is acceptable:
return ( cp, hole.points_starting_on_index( hp ) )
print(
"Could not insert segment without overlapping other segments"
)
#------------------------------------------------------------------------
# Return the list of ordered points starting on the given index, ensuring
# that the first and last points are the same.
def points_starting_on_index( self, index ):
points = self.points
if index > 0:
# Strip off end point, which is a duplicate of the start point:
points = points[ : -1 ]
points = points[ index : ] + points[ : index ]
points.append(
svg.Point( points[ 0 ].x, points[ 0 ].y )
)
return points
#------------------------------------------------------------------------
# Return a list of points with the given polygon segments (paths) inlined.
def inline( self, segments ):
if len( segments ) < 1:
return self.points
print( " Inlining {} segments...".format( len( segments ) ) )
all_segments = segments[ : ] + [ self ]
insertions = []
# Find the insertion point for each hole:
for hole in segments:
insertion = self._find_insertion_point(
hole, all_segments
)
if insertion is not None:
insertions.append( insertion )
insertions.sort( key = lambda i: i[ 0 ] )
inlined = [ self.points[ 0 ] ]
ip = 1
points = self.points
for insertion in insertions:
while ip <= insertion[ 0 ]:
inlined.append( points[ ip ] )
ip += 1
if (
inlined[ -1 ].x == insertion[ 1 ][ 0 ].x and
inlined[ -1 ].y == insertion[ 1 ][ 0 ].y
):
inlined += insertion[ 1 ][ 1 : -1 ]
else:
inlined += insertion[ 1 ]
inlined.append( svg.Point(
points[ ip - 1 ].x,
points[ ip - 1 ].y,
) )
while ip < len( points ):
inlined.append( points[ ip ] )
ip += 1
return inlined
#------------------------------------------------------------------------
def intersects( self, line_segment, check_connects ):
hole_segment = LineSegment()
# Check each segment of other hole for intersection:
for point in self.points:
hole_segment.q_next( point )
if hole_segment.p is not None:
if (
check_connects and
line_segment.connects( hole_segment )
): continue
if line_segment.intersects( hole_segment ):
#print( "Intersection detected." )
return True
return False
#------------------------------------------------------------------------
# Apply all transformations and rounding, then remove duplicate
# consecutive points along the path.
def process( self, transformer, flip ):
points = []
for point in self.points:
point = transformer.transform_point( point, flip )
if (
len( points ) < 1 or
point.x != points[ -1 ].x or
point.y != points[ -1 ].y
):
points.append( point )
if (
points[ 0 ].x != points[ -1 ].x or
points[ 0 ].y != points[ -1 ].y
):
#print( "Warning: Closing polygon. start=({}, {}) end=({}, {})".format(
#points[ 0 ].x, points[ 0 ].y,
#points[ -1 ].x, points[ -1 ].y,
#) )
points.append( svg.Point(
points[ 0 ].x,
points[ 0 ].y,
) )
#else:
#print( "Polygon closed: start=({}, {}) end=({}, {})".format(
#points[ 0 ].x, points[ 0 ].y,
#points[ -1 ].x, points[ -1 ].y,
#) )
self.points = points
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModImport( object ):
#------------------------------------------------------------------------
def __init__( self, file_name, module_name, module_value ):
self.file_name = file_name
self.module_name = module_name
self.module_value = module_value
print( "Parsing SVG..." )
self.svg = svg.parse( file_name )
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExport( object ):
#------------------------------------------------------------------------
@staticmethod
def _convert_decimil_to_mm( decimil ):
return float( decimil ) * 0.00254
#------------------------------------------------------------------------
@staticmethod
def _convert_mm_to_decimil( mm ):
return int( round( mm * 393.700787 ) )
#------------------------------------------------------------------------
def _get_fill_stroke( self, item ):
fill = True
stroke = True
stroke_width = 0.0
if item.style is not None and item.style != "":
for property in item.style.split( ";" ):
nv = property.split( ":" );
name = nv[ 0 ].strip()
value = nv[ 1 ].strip()
if name == "fill" and value == "none":
fill = False
elif name == "stroke" and value == "none":
stroke = False
elif name == "stroke-width":
value = value.replace( "px", "" )
stroke_width = float( value ) * 25.4 / float(self.dpi)
if not stroke:
stroke_width = 0.0
elif stroke_width is None:
# Give a default stroke width?
stroke_width = self._convert_decimil_to_mm( 1 )
return fill, stroke, stroke_width
#------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
scale_factor = 1.0,
precision = 20.0,
use_mm = True,
dpi = DEFAULT_DPI,
):
if use_mm:
# 25.4 mm/in;
scale_factor *= 25.4 / float(dpi)
use_mm = True
else:
# PCBNew uses "decimil" (10K DPI);
scale_factor *= 10000.0 / float(dpi)
self.imported = svg2mod_import
self.file_name = file_name
self.scale_factor = scale_factor
self.precision = precision
self.use_mm = use_mm
self.dpi = dpi
#------------------------------------------------------------------------
def _calculate_translation( self ):
min_point, max_point = self.imported.svg.bbox()
# Center the drawing:
adjust_x = min_point.x + ( max_point.x - min_point.x ) / 2.0
adjust_y = min_point.y + ( max_point.y - min_point.y ) / 2.0
self.translation = svg.Point(
0.0 - adjust_x,
0.0 - adjust_y,
)
#------------------------------------------------------------------------
# Find and keep only the layers of interest.
def _prune( self, items = None ):
if items is None:
self.layers = {}
for name in self.layer_map.iterkeys():
self.layers[ name ] = None
items = self.imported.svg.items
self.imported.svg.items = []
for item in items:
if not isinstance( item, svg.Group ):
continue
for name in self.layers.iterkeys():
#if re.search( name, item.name, re.I ):
if name == item.name:
print( "Found SVG layer: {}".format( item.name ) )
self.imported.svg.items.append( item )
self.layers[ name ] = item
break
else:
self._prune( item.items )
#------------------------------------------------------------------------
def _write_items( self, items, layer, flip = False ):
for item in items:
if isinstance( item, svg.Group ):
self._write_items( item.items, layer, flip )
continue
elif isinstance( item, svg.Path ):
segments = [
PolygonSegment( segment )
for segment in item.segments(
precision = self.precision
)
]
for segment in segments:
segment.process( self, flip )
if len( segments ) > 1:
points = segments[ 0 ].inline( segments[ 1 : ] )
elif len( segments ) > 0:
points = segments[ 0 ].points
fill, stroke, stroke_width = self._get_fill_stroke( item )
if not self.use_mm:
stroke_width = self._convert_mm_to_decimil(
stroke_width
)
print( " Writing polygon with {} points".format(
len( points ) )
)
self._write_polygon(
points, layer, fill, stroke, stroke_width
)
else:
print( "Unsupported SVG element: {}".format(
item.__class__.__name__
) )
#------------------------------------------------------------------------
def _write_module( self, front ):
module_name = self._get_module_name( front )
min_point, max_point = self.imported.svg.bbox()
min_point = self.transform_point( min_point, flip = False )
max_point = self.transform_point( max_point, flip = False )
label_offset = 1200
label_size = 600
label_pen = 120
if self.use_mm:
label_size = self._convert_decimil_to_mm( label_size )
label_pen = self._convert_decimil_to_mm( label_pen )
reference_y = min_point.y - self._convert_decimil_to_mm( label_offset )
value_y = max_point.y + self._convert_decimil_to_mm( label_offset )
else:
reference_y = min_point.y - label_offset
value_y = max_point.y + label_offset
self._write_module_header(
label_size, label_pen,
reference_y, value_y,
front,
)
for name, group in self.layers.iteritems():
if group is None: continue
layer = self._get_layer_name( name, front )
#print( " Writing layer: {}".format( name ) )
self._write_items( group.items, layer, not front )
self._write_module_footer( front )
#------------------------------------------------------------------------
def _write_polygon_filled( self, points, layer, stroke_width = 0.0 ):
self._write_polygon_header( points, layer )
for point in points:
self._write_polygon_point( point )
self._write_polygon_footer( layer, stroke_width )
#------------------------------------------------------------------------
def _write_polygon_outline( self, points, layer, stroke_width ):
prior_point = None
for point in points:
if prior_point is not None:
self._write_polygon_segment(
prior_point, point, layer, stroke_width
)
prior_point = point
#------------------------------------------------------------------------
def transform_point( self, point, flip = False ):
transformed_point = svg.Point(
( point.x + self.translation.x ) * self.scale_factor,
( point.y + self.translation.y ) * self.scale_factor,
)
if flip:
transformed_point.x *= -1
if self.use_mm:
transformed_point.x = round( transformed_point.x, 12 )
transformed_point.y = round( transformed_point.y, 12 )
else:
transformed_point.x = int( round( transformed_point.x ) )
transformed_point.y = int( round( transformed_point.y ) )
return transformed_point
#------------------------------------------------------------------------
def write( self ):
self._prune()
# Must come after pruning:
translation = self._calculate_translation()
print( "Writing module file: {}".format( self.file_name ) )
self.output_file = open( self.file_name, 'w' )
self._write_library_intro()
self._write_modules()
self.output_file.close()
self.output_file = None
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExportLegacy( Svg2ModExport ):
layer_map = {
#'inkscape-name' : [ kicad-front, kicad-back ],
'Cu' : [ 15, 0 ],
'Adhes' : [ 17, 16 ],
'Paste' : [ 19, 18 ],
'SilkS' : [ 21, 20 ],
'Mask' : [ 23, 22 ],
'Dwgs.User' : [ 24, 24 ],
'Cmts.User' : [ 25, 25 ],
'Eco1.User' : [ 26, 26 ],
'Eco2.User' : [ 27, 27 ],
'Edge.Cuts' : [ 28, 28 ],
}
#------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
scale_factor = 1.0,
precision = 20.0,
use_mm = True,
dpi = DEFAULT_DPI,
include_reverse = True,
):
super( Svg2ModExportLegacy, self ).__init__(
svg2mod_import,
file_name,
scale_factor,
precision,
use_mm,
dpi,
)
self.include_reverse = include_reverse
#------------------------------------------------------------------------
def _get_layer_name( self, name, front ):
layer_info = self.layer_map[ name ]
layer = layer_info[ 0 ]
if not front and layer_info[ 1 ] is not None:
layer = layer_info[ 1 ]
return layer
#------------------------------------------------------------------------
def _get_module_name( self, front = None ):
if self.include_reverse and not front:
return self.imported.module_name + "-rev"
return self.imported.module_name
#------------------------------------------------------------------------
def _write_library_intro( self ):
modules_list = self._get_module_name( front = True )
if self.include_reverse:
modules_list += (
"\n" +
self._get_module_name( front = False )
)
units = ""
if self.use_mm:
units = "\nUnits mm"
self.output_file.write( """PCBNEW-LibModule-V1 {0}{1}
$INDEX
{2}
$EndINDEX
#
# {3}
#
""".format(
datetime.datetime.now().strftime( "%a %d %b %Y %I:%M:%S %p %Z" ),
units,
modules_list,
self.imported.file_name,
)
)
#------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
self.output_file.write( """$MODULE {0}
Po 0 0 0 {6} 00000000 00000000 ~~
Li {0}
T0 0 {1} {2} {2} 0 {3} N I 21 "{0}"
T1 0 {5} {2} {2} 0 {3} N I 21 "{4}"
""".format(
self._get_module_name( front ),
reference_y,
label_size,
label_pen,
self.imported.module_value,
value_y,
15, # Seems necessary
)
)
#------------------------------------------------------------------------
def _write_module_footer( self, front ):
self.output_file.write(
"$EndMODULE {0}\n".format( self._get_module_name( front ) )
)
#------------------------------------------------------------------------
def _write_modules( self ):
self._write_module( front = True )
if self.include_reverse:
self._write_module( front = False )
self.output_file.write( "$EndLIBRARY" )
#------------------------------------------------------------------------
def _write_polygon( self, points, layer, fill, stroke, stroke_width ):
if fill:
self._write_polygon_filled(
points, layer
)
if stroke:
self._write_polygon_outline(
points, layer, stroke_width
)
#------------------------------------------------------------------------
def _write_polygon_footer( self, layer, stroke_width ):
pass
#------------------------------------------------------------------------
def _write_polygon_header( self, points, layer ):
pen = 1
if self.use_mm:
pen = self._convert_decimil_to_mm( pen )
self.output_file.write( "DP 0 0 0 0 {} {} {}\n".format(
len( points ),
pen,
layer
) )
#------------------------------------------------------------------------
def _write_polygon_point( self, point ):
self.output_file.write(
"Dl {} {}\n".format( point.x, point.y )
)
#------------------------------------------------------------------------
def _write_polygon_segment( self, p, q, layer, stroke_width ):
self.output_file.write( "DS {} {} {} {} {} {}\n".format(
p.x, p.y,
q.x, q.y,
stroke_width,
layer
) )
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExportLegacyUpdater( Svg2ModExportLegacy ):
#------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
scale_factor = 1.0,
precision = 20.0,
dpi = DEFAULT_DPI,
include_reverse = True,
):
self.file_name = file_name
use_mm = self._parse_output_file()
super( Svg2ModExportLegacyUpdater, self ).__init__(
svg2mod_import,
file_name,
scale_factor,
precision,
use_mm,
dpi,
include_reverse,
)
#------------------------------------------------------------------------
def _parse_output_file( self ):
print( "Parsing module file: {}".format( self.file_name ) )
module_file = open( self.file_name, 'r' )
lines = module_file.readlines()
module_file.close()
self.loaded_modules = {}
self.post_index = []
self.pre_index = []
use_mm = False
index = 0
# Find the start of the index:
while index < len( lines ):
line = lines[ index ]
index += 1
self.pre_index.append( line )
if line[ : 6 ] == "$INDEX":
break
m = re.match( "Units[\s]+mm[\s]*", line )
if m is not None:
print( " Use mm detected" )
use_mm = True
# Read the index:
while index < len( lines ):
line = lines[ index ]
if line[ : 9 ] == "$EndINDEX":
break
index += 1
self.loaded_modules[ line.strip() ] = []
# Read up until the first module:
while index < len( lines ):
line = lines[ index ]
if line[ : 7 ] == "$MODULE":
break
index += 1
self.post_index.append( line )
# Read modules:
while index < len( lines ):
line = lines[ index ]
if line[ : 7 ] == "$MODULE":
module_name, module_lines, index = self._read_module( lines, index )
if module_name is not None:
self.loaded_modules[ module_name ] = module_lines
elif line[ : 11 ] == "$EndLIBRARY":
break
else:
raise Exception(
"Expected $EndLIBRARY: [{}]".format( line )
)
#print( "Pre-index:" )
#pprint( self.pre_index )
#print( "Post-index:" )
#pprint( self.post_index )
#print( "Loaded modules:" )
#pprint( self.loaded_modules )
return use_mm
#------------------------------------------------------------------------
def _read_module( self, lines, index ):
# Read module name:
m = re.match( r'\$MODULE[\s]+([^\s]+)[\s]*', lines[ index ] )
module_name = m.group( 1 )
print( " Reading module {}".format( module_name ) )
index += 1
module_lines = []
while index < len( lines ):
line = lines[ index ]
index += 1
m = re.match(
r'\$EndMODULE[\s]+' + module_name + r'[\s]*', line
)
if m is not None:
return module_name, module_lines, index
module_lines.append( line )
raise Exception(
"Could not find end of module '{}'".format( module_name )
)
#------------------------------------------------------------------------
def _write_library_intro( self ):
# Write pre-index:
self.output_file.writelines( self.pre_index )
self.loaded_modules[ self._get_module_name( front = True ) ] = None
if self.include_reverse:
self.loaded_modules[
self._get_module_name( front = False )
] = None
# Write index:
for module_name in sorted(
self.loaded_modules.iterkeys(),
key = str.lower
):
self.output_file.write( module_name + "\n" )
# Write post-index:
self.output_file.writelines( self.post_index )
#------------------------------------------------------------------------
def _write_preserved_modules( self, up_to = None ):
if up_to is not None:
up_to = up_to.lower()
for module_name in sorted(
self.loaded_modules.iterkeys(),
key = str.lower
):
if up_to is not None and module_name.lower() >= up_to:
continue
module_lines = self.loaded_modules[ module_name ]
if module_lines is not None:
self.output_file.write(
"$MODULE {}\n".format( module_name )
)
self.output_file.writelines( module_lines )
self.output_file.write(
"$EndMODULE {}\n".format( module_name )
)
self.loaded_modules[ module_name ] = None
#------------------------------------------------------------------------
def _write_module_footer( self, front ):
super( Svg2ModExportLegacyUpdater, self )._write_module_footer(
front,
)
# Write remaining modules:
if not front:
self._write_preserved_modules()
#------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
self._write_preserved_modules(
up_to = self._get_module_name( front )
)
super( Svg2ModExportLegacyUpdater, self )._write_module_header(
label_size,
label_pen,
reference_y,
value_y,
front,
)
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExportPretty( Svg2ModExport ):
layer_map = {
#'inkscape-name' : kicad-name,
'Cu' : "{}.Cu",
'Adhes' : "{}.Adhes",
'Paste' : "{}.Paste",
'SilkS' : "{}.SilkS",
'Mask' : "{}.Mask",
'CrtYd' : "{}.CrtYd",
'Fab' : "{}.Fab",
'Edge.Cuts' : "Edge.Cuts"
}
#------------------------------------------------------------------------
def _get_layer_name( self, name, front ):
if front:
return self.layer_map[ name ].format("F")
else:
return self.layer_map[ name ].format("B")
#------------------------------------------------------------------------
def _get_module_name( self, front = None ):
return self.imported.module_name
#------------------------------------------------------------------------
def _write_library_intro( self ):
self.output_file.write( """(module {0} (layer F.Cu) (tedit {1:8X})
(attr smd)
(descr "{2}")
(tags {3})
""".format(
self.imported.module_name, #0
int( round( os.path.getctime( #1
self.imported.file_name
) ) ),
"Imported from {}".format( self.imported.file_name ), #2
"svg2mod", #3
)
)
#------------------------------------------------------------------------
def _write_module_footer( self, front ):
self.output_file.write( "\n)" )
#------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
if front:
side = "F"
else:
side = "B"
self.output_file.write(
""" (fp_text reference {0} (at 0 {1}) (layer {2}.SilkS) hide
(effects (font (size {3} {3}) (thickness {4})))
)
(fp_text value {5} (at 0 {6}) (layer {2}.SilkS) hide
(effects (font (size {3} {3}) (thickness {4})))
)""".format(
self._get_module_name(), #0
reference_y, #1
side, #2
label_size, #3
label_pen, #4
self.imported.module_value, #5
value_y, #6
)
)
#------------------------------------------------------------------------
def _write_modules( self ):
self._write_module( front = True )
#------------------------------------------------------------------------
def _write_polygon( self, points, layer, fill, stroke, stroke_width ):
if fill:
self._write_polygon_filled(
points, layer, stroke_width
)
# Polygons with a fill and stroke are drawn with the filled polygon
# above:
if stroke and not fill:
self._write_polygon_outline(
points, layer, stroke_width
)
#------------------------------------------------------------------------
def _write_polygon_footer( self, layer, stroke_width ):
self.output_file.write(
" )\n (layer {})\n (width {})\n )".format(
layer, stroke_width
)
)
#------------------------------------------------------------------------
def _write_polygon_header( self, points, layer ):
self.output_file.write( "\n (fp_poly\n (pts \n" )
#------------------------------------------------------------------------
def _write_polygon_point( self, point ):
self.output_file.write(
" (xy {} {})\n".format( point.x, point.y )
)
#------------------------------------------------------------------------
def _write_polygon_segment( self, p, q, layer, stroke_width ):
self.output_file.write(
"""\n (fp_line
(start {} {})
(end {} {})
(layer {})
(width {})
)""".format(
p.x, p.y,
q.x, q.y,
layer,
stroke_width,
)
)
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
def get_arguments():
parser = argparse.ArgumentParser(
description = (
'Convert Inkscape SVG drawings to KiCad footprint modules.'
)
)
#------------------------------------------------------------------------
parser.add_argument(
'-i', '--input-file',
type = str,
dest = 'input_file_name',
metavar = 'FILENAME',
help = "name of the SVG file",
required = True,
)
parser.add_argument(
'-o', '--output-file',
type = str,
dest = 'output_file_name',
metavar = 'FILENAME',
help = "name of the module file",
)
parser.add_argument(
'--name', '--module-name',
type = str,
dest = 'module_name',
metavar = 'NAME',
help = "base name of the module",
default = "svg2mod",
)
parser.add_argument(
'--value', '--module-value',
type = str,
dest = 'module_value',
metavar = 'VALUE',
help = "value of the module",
default = "G***",
)
parser.add_argument(
'-f', '--factor',
type = float,
dest = 'scale_factor',
metavar = 'FACTOR',
help = "scale paths by this factor",
default = 1.0,
)
parser.add_argument(
'-p', '--precision',
type = float,
dest = 'precision',
metavar = 'PRECISION',
help = "smoothness for approximating curves with line segments (float)",
default = 10.0,
)
parser.add_argument(
'--front-only',
dest = 'front_only',
action = 'store_const',
const = True,
help = "omit output of back module (legacy output format)",
default = False,
)
parser.add_argument(
'--format',
type = str,
dest = 'format',
metavar = 'FORMAT',
choices = [ 'legacy', 'pretty' ],
help = "output module file format (legacy|pretty)",
default = 'pretty',
)
parser.add_argument(
'--units',
type = str,
dest = 'units',
metavar = 'UNITS',
choices = [ 'decimil', 'mm' ],
help = "output units, if output format is legacy (decimil|mm)",
default = 'mm',
)
parser.add_argument(
'-d', '--dpi',
type = int,
dest = 'dpi',
metavar = 'DPI',
help = "DPI of the SVG file (int)",
default = DEFAULT_DPI,
)
return parser.parse_args(), parser
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
main()
#----------------------------------------------------------------------------
# vi: set et sts=4 sw=4 ts=4:
| cc0-1.0 | 9,195,613,608,947,256,000 | 26.103851 | 97 | 0.414905 | false |
mikehulluk/morphforge | src/morphforge/constants/ions.py | 1 | 1668 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
class ChlIon(object):
Na = 'na'
Ks = 'ks'
Kf = 'kf'
Ca = 'ca'
Lk = 'lk'
Chls = [Na, Ks, Kf, Ca, Lk]
| bsd-2-clause | 4,506,075,776,726,834,700 | 37.790698 | 72 | 0.67446 | false |
paninetworks/neutron | neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py | 1 | 46084 | #!/usr/bin/env python
# Copyright 2012 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Performs per host Linux Bridge configuration for Neutron.
# Based on the structure of the OpenVSwitch agent in the
# Neutron OpenVSwitch Plugin.
import os
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import service
from six import moves
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.l2pop.rpc_manager \
import l2population_rpc as l2pop_rpc
from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect
from neutron.plugins.ml2.drivers.linuxbridge.agent.common import config # noqa
from neutron.plugins.ml2.drivers.linuxbridge.agent.common \
import constants as lconst
LOG = logging.getLogger(__name__)
BRIDGE_NAME_PREFIX = "brq"
# NOTE(toabctl): Don't use /sys/devices/virtual/net here because not all tap
# devices are listed here (i.e. when using Xen)
BRIDGE_FS = "/sys/class/net/"
BRIDGE_NAME_PLACEHOLDER = "bridge_name"
BRIDGE_INTERFACES_FS = BRIDGE_FS + BRIDGE_NAME_PLACEHOLDER + "/brif/"
DEVICE_NAME_PLACEHOLDER = "device_name"
BRIDGE_PORT_FS_FOR_DEVICE = BRIDGE_FS + DEVICE_NAME_PLACEHOLDER + "/brport"
VXLAN_INTERFACE_PREFIX = "vxlan-"
class NetworkSegment(object):
def __init__(self, network_type, physical_network, segmentation_id):
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
class LinuxBridgeManager(object):
def __init__(self, interface_mappings):
self.interface_mappings = interface_mappings
self.ip = ip_lib.IPWrapper()
# VXLAN related parameters:
self.local_ip = cfg.CONF.VXLAN.local_ip
self.vxlan_mode = lconst.VXLAN_NONE
if cfg.CONF.VXLAN.enable_vxlan:
device = self.ip.get_device_by_ip(self.local_ip)
if device:
self.local_int = device.name
self.check_vxlan_support()
else:
self.local_int = None
LOG.warning(_LW('VXLAN is enabled, a valid local_ip '
'must be provided'))
# Store network mapping to segments
self.network_map = {}
def interface_exists_on_bridge(self, bridge, interface):
directory = '/sys/class/net/%s/brif' % bridge
for filename in os.listdir(directory):
if filename == interface:
return True
return False
def get_bridge_name(self, network_id):
if not network_id:
LOG.warning(_LW("Invalid Network ID, will lead to incorrect "
"bridge name"))
bridge_name = BRIDGE_NAME_PREFIX + network_id[0:11]
return bridge_name
def get_subinterface_name(self, physical_interface, vlan_id):
if not vlan_id:
LOG.warning(_LW("Invalid VLAN ID, will lead to incorrect "
"subinterface name"))
subinterface_name = '%s.%s' % (physical_interface, vlan_id)
return subinterface_name
def get_tap_device_name(self, interface_id):
if not interface_id:
LOG.warning(_LW("Invalid Interface ID, will lead to incorrect "
"tap device name"))
tap_device_name = constants.TAP_DEVICE_PREFIX + interface_id[0:11]
return tap_device_name
def get_vxlan_device_name(self, segmentation_id):
if 0 <= int(segmentation_id) <= p_const.MAX_VXLAN_VNI:
return VXLAN_INTERFACE_PREFIX + str(segmentation_id)
else:
LOG.warning(_LW("Invalid Segmentation ID: %s, will lead to "
"incorrect vxlan device name"), segmentation_id)
def get_all_neutron_bridges(self):
neutron_bridge_list = []
bridge_list = os.listdir(BRIDGE_FS)
for bridge in bridge_list:
if bridge.startswith(BRIDGE_NAME_PREFIX):
neutron_bridge_list.append(bridge)
return neutron_bridge_list
def get_interfaces_on_bridge(self, bridge_name):
if ip_lib.device_exists(bridge_name):
bridge_interface_path = BRIDGE_INTERFACES_FS.replace(
BRIDGE_NAME_PLACEHOLDER, bridge_name)
return os.listdir(bridge_interface_path)
else:
return []
def get_tap_devices_count(self, bridge_name):
bridge_interface_path = BRIDGE_INTERFACES_FS.replace(
BRIDGE_NAME_PLACEHOLDER, bridge_name)
try:
if_list = os.listdir(bridge_interface_path)
return len([interface for interface in if_list if
interface.startswith(constants.TAP_DEVICE_PREFIX)])
except OSError:
return 0
def get_bridge_for_tap_device(self, tap_device_name):
bridges = self.get_all_neutron_bridges()
for bridge in bridges:
interfaces = self.get_interfaces_on_bridge(bridge)
if tap_device_name in interfaces:
return bridge
return None
def is_device_on_bridge(self, device_name):
if not device_name:
return False
else:
bridge_port_path = BRIDGE_PORT_FS_FOR_DEVICE.replace(
DEVICE_NAME_PLACEHOLDER, device_name)
return os.path.exists(bridge_port_path)
def ensure_vlan_bridge(self, network_id, physical_interface, vlan_id):
"""Create a vlan and bridge unless they already exist."""
interface = self.ensure_vlan(physical_interface, vlan_id)
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(interface)
if self.ensure_bridge(bridge_name, interface, ips, gateway):
return interface
def ensure_vxlan_bridge(self, network_id, segmentation_id):
"""Create a vxlan and bridge unless they already exist."""
interface = self.ensure_vxlan(segmentation_id)
if not interface:
LOG.error(_LE("Failed creating vxlan interface for "
"%(segmentation_id)s"),
{segmentation_id: segmentation_id})
return
bridge_name = self.get_bridge_name(network_id)
self.ensure_bridge(bridge_name, interface)
return interface
def get_interface_details(self, interface):
device = self.ip.device(interface)
ips = device.addr.list(scope='global')
# Update default gateway if necessary
gateway = device.route.get_gateway(scope='global')
return ips, gateway
def ensure_flat_bridge(self, network_id, physical_interface):
"""Create a non-vlan bridge unless it already exists."""
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(physical_interface)
if self.ensure_bridge(bridge_name, physical_interface, ips, gateway):
return physical_interface
def ensure_local_bridge(self, network_id):
"""Create a local bridge unless it already exists."""
bridge_name = self.get_bridge_name(network_id)
return self.ensure_bridge(bridge_name)
def ensure_vlan(self, physical_interface, vlan_id):
"""Create a vlan unless it already exists."""
interface = self.get_subinterface_name(physical_interface, vlan_id)
if not ip_lib.device_exists(interface):
LOG.debug("Creating subinterface %(interface)s for "
"VLAN %(vlan_id)s on interface "
"%(physical_interface)s",
{'interface': interface, 'vlan_id': vlan_id,
'physical_interface': physical_interface})
if utils.execute(['ip', 'link', 'add', 'link',
physical_interface,
'name', interface, 'type', 'vlan', 'id',
vlan_id], run_as_root=True):
return
if utils.execute(['ip', 'link', 'set',
interface, 'up'], run_as_root=True):
return
LOG.debug("Done creating subinterface %s", interface)
return interface
def ensure_vxlan(self, segmentation_id):
"""Create a vxlan unless it already exists."""
interface = self.get_vxlan_device_name(segmentation_id)
if not ip_lib.device_exists(interface):
LOG.debug("Creating vxlan interface %(interface)s for "
"VNI %(segmentation_id)s",
{'interface': interface,
'segmentation_id': segmentation_id})
args = {'dev': self.local_int}
if self.vxlan_mode == lconst.VXLAN_MCAST:
args['group'] = cfg.CONF.VXLAN.vxlan_group
if cfg.CONF.VXLAN.ttl:
args['ttl'] = cfg.CONF.VXLAN.ttl
if cfg.CONF.VXLAN.tos:
args['tos'] = cfg.CONF.VXLAN.tos
if cfg.CONF.VXLAN.l2_population:
args['proxy'] = True
int_vxlan = self.ip.add_vxlan(interface, segmentation_id, **args)
int_vxlan.link.set_up()
LOG.debug("Done creating vxlan interface %s", interface)
return interface
def update_interface_ip_details(self, destination, source, ips,
gateway):
if ips or gateway:
dst_device = self.ip.device(destination)
src_device = self.ip.device(source)
# Append IP's to bridge if necessary
if ips:
for ip in ips:
dst_device.addr.add(cidr=ip['cidr'])
if gateway:
# Ensure that the gateway can be updated by changing the metric
metric = 100
if 'metric' in gateway:
metric = gateway['metric'] - 1
dst_device.route.add_gateway(gateway=gateway['gateway'],
metric=metric)
src_device.route.delete_gateway(gateway=gateway['gateway'])
# Remove IP's from interface
if ips:
for ip in ips:
src_device.addr.delete(cidr=ip['cidr'])
def _bridge_exists_and_ensure_up(self, bridge_name):
"""Check if the bridge exists and make sure it is up."""
br = ip_lib.IPDevice(bridge_name)
br.set_log_fail_as_error(False)
try:
# If the device doesn't exist this will throw a RuntimeError
br.link.set_up()
except RuntimeError:
return False
return True
def ensure_bridge(self, bridge_name, interface=None, ips=None,
gateway=None):
"""Create a bridge unless it already exists."""
# _bridge_exists_and_ensure_up instead of device_exists is used here
# because there are cases where the bridge exists but it's not UP,
# for example:
# 1) A greenthread was executing this function and had not yet executed
# "ip link set bridge_name up" before eventlet switched to this
# thread running the same function
# 2) The Nova VIF driver was running concurrently and had just created
# the bridge, but had not yet put it UP
if not self._bridge_exists_and_ensure_up(bridge_name):
LOG.debug("Starting bridge %(bridge_name)s for subinterface "
"%(interface)s",
{'bridge_name': bridge_name, 'interface': interface})
bridge_device = bridge_lib.BridgeDevice.addbr(bridge_name)
if bridge_device.setfd(0):
return
if bridge_device.disable_stp():
return
if bridge_device.link.set_up():
return
LOG.debug("Done starting bridge %(bridge_name)s for "
"subinterface %(interface)s",
{'bridge_name': bridge_name, 'interface': interface})
else:
bridge_device = bridge_lib.BridgeDevice(bridge_name)
if not interface:
return bridge_name
# Update IP info if necessary
self.update_interface_ip_details(bridge_name, interface, ips, gateway)
# Check if the interface is part of the bridge
if not self.interface_exists_on_bridge(bridge_name, interface):
try:
# Check if the interface is not enslaved in another bridge
if self.is_device_on_bridge(interface):
bridge = self.get_bridge_for_tap_device(interface)
bridge_lib.BridgeDevice(bridge).delif(interface)
bridge_device.addif(interface)
except Exception as e:
LOG.error(_LE("Unable to add %(interface)s to %(bridge_name)s"
"! Exception: %(e)s"),
{'interface': interface, 'bridge_name': bridge_name,
'e': e})
return
return bridge_name
def ensure_physical_in_bridge(self, network_id,
network_type,
physical_network,
segmentation_id):
if network_type == p_const.TYPE_VXLAN:
if self.vxlan_mode == lconst.VXLAN_NONE:
LOG.error(_LE("Unable to add vxlan interface for network %s"),
network_id)
return
return self.ensure_vxlan_bridge(network_id, segmentation_id)
physical_interface = self.interface_mappings.get(physical_network)
if not physical_interface:
LOG.error(_LE("No mapping for physical network %s"),
physical_network)
return
if network_type == p_const.TYPE_FLAT:
return self.ensure_flat_bridge(network_id, physical_interface)
elif network_type == p_const.TYPE_VLAN:
return self.ensure_vlan_bridge(network_id, physical_interface,
segmentation_id)
else:
LOG.error(_LE("Unknown network_type %(network_type)s for network "
"%(network_id)s."), {network_type: network_type,
network_id: network_id})
def add_tap_interface(self, network_id, network_type, physical_network,
segmentation_id, tap_device_name):
"""Add tap interface.
If a VIF has been plugged into a network, this function will
add the corresponding tap device to the relevant bridge.
"""
if not ip_lib.device_exists(tap_device_name):
LOG.debug("Tap device: %s does not exist on "
"this host, skipped", tap_device_name)
return False
bridge_name = self.get_bridge_name(network_id)
if network_type == p_const.TYPE_LOCAL:
self.ensure_local_bridge(network_id)
else:
phy_dev_name = self.ensure_physical_in_bridge(network_id,
network_type,
physical_network,
segmentation_id)
if not phy_dev_name:
return False
self.ensure_tap_mtu(tap_device_name, phy_dev_name)
# Check if device needs to be added to bridge
tap_device_in_bridge = self.get_bridge_for_tap_device(tap_device_name)
if not tap_device_in_bridge:
data = {'tap_device_name': tap_device_name,
'bridge_name': bridge_name}
LOG.debug("Adding device %(tap_device_name)s to bridge "
"%(bridge_name)s", data)
if bridge_lib.BridgeDevice(bridge_name).addif(tap_device_name):
return False
else:
data = {'tap_device_name': tap_device_name,
'bridge_name': bridge_name}
LOG.debug("%(tap_device_name)s already exists on bridge "
"%(bridge_name)s", data)
return True
def ensure_tap_mtu(self, tap_dev_name, phy_dev_name):
"""Ensure the MTU on the tap is the same as the physical device."""
phy_dev_mtu = ip_lib.IPDevice(phy_dev_name).link.mtu
ip_lib.IPDevice(tap_dev_name).link.set_mtu(phy_dev_mtu)
def add_interface(self, network_id, network_type, physical_network,
segmentation_id, port_id):
self.network_map[network_id] = NetworkSegment(network_type,
physical_network,
segmentation_id)
tap_device_name = self.get_tap_device_name(port_id)
return self.add_tap_interface(network_id, network_type,
physical_network, segmentation_id,
tap_device_name)
def delete_vlan_bridge(self, bridge_name):
if ip_lib.device_exists(bridge_name):
interfaces_on_bridge = self.get_interfaces_on_bridge(bridge_name)
for interface in interfaces_on_bridge:
self.remove_interface(bridge_name, interface)
if interface.startswith(VXLAN_INTERFACE_PREFIX):
self.delete_vxlan(interface)
continue
for physical_interface in self.interface_mappings.values():
if (interface.startswith(physical_interface)):
ips, gateway = self.get_interface_details(bridge_name)
if ips:
# This is a flat network or a VLAN interface that
# was setup outside of neutron => return IP's from
# bridge to interface
self.update_interface_ip_details(interface,
bridge_name,
ips, gateway)
elif physical_interface != interface:
self.delete_vlan(interface)
LOG.debug("Deleting bridge %s", bridge_name)
bridge_device = bridge_lib.BridgeDevice(bridge_name)
if bridge_device.link.set_down():
return
if bridge_device.delbr():
return
LOG.debug("Done deleting bridge %s", bridge_name)
else:
LOG.error(_LE("Cannot delete bridge %s, does not exist"),
bridge_name)
def remove_empty_bridges(self):
for network_id in list(self.network_map.keys()):
bridge_name = self.get_bridge_name(network_id)
if not self.get_tap_devices_count(bridge_name):
self.delete_vlan_bridge(bridge_name)
del self.network_map[network_id]
def remove_interface(self, bridge_name, interface_name):
if ip_lib.device_exists(bridge_name):
if not self.is_device_on_bridge(interface_name):
return True
LOG.debug("Removing device %(interface_name)s from bridge "
"%(bridge_name)s",
{'interface_name': interface_name,
'bridge_name': bridge_name})
if bridge_lib.BridgeDevice(bridge_name).delif(interface_name):
return False
LOG.debug("Done removing device %(interface_name)s from bridge "
"%(bridge_name)s",
{'interface_name': interface_name,
'bridge_name': bridge_name})
return True
else:
LOG.debug("Cannot remove device %(interface_name)s bridge "
"%(bridge_name)s does not exist",
{'interface_name': interface_name,
'bridge_name': bridge_name})
return False
def delete_vlan(self, interface):
if ip_lib.device_exists(interface):
LOG.debug("Deleting subinterface %s for vlan", interface)
if utils.execute(['ip', 'link', 'set', interface, 'down'],
run_as_root=True):
return
if utils.execute(['ip', 'link', 'delete', interface],
run_as_root=True):
return
LOG.debug("Done deleting subinterface %s", interface)
def delete_vxlan(self, interface):
if ip_lib.device_exists(interface):
LOG.debug("Deleting vxlan interface %s for vlan",
interface)
int_vxlan = self.ip.device(interface)
int_vxlan.link.set_down()
int_vxlan.link.delete()
LOG.debug("Done deleting vxlan interface %s", interface)
def get_tap_devices(self):
devices = set()
for device in os.listdir(BRIDGE_FS):
if device.startswith(constants.TAP_DEVICE_PREFIX):
devices.add(device)
return devices
def vxlan_ucast_supported(self):
if not cfg.CONF.VXLAN.l2_population:
return False
if not ip_lib.iproute_arg_supported(
['bridge', 'fdb'], 'append'):
LOG.warning(_LW('Option "%(option)s" must be supported by command '
'"%(command)s" to enable %(mode)s mode'),
{'option': 'append',
'command': 'bridge fdb',
'mode': 'VXLAN UCAST'})
return False
test_iface = None
for seg_id in moves.range(1, p_const.MAX_VXLAN_VNI + 1):
if not ip_lib.device_exists(
self.get_vxlan_device_name(seg_id)):
test_iface = self.ensure_vxlan(seg_id)
break
else:
LOG.error(_LE('No valid Segmentation ID to perform UCAST test.'))
return False
try:
utils.execute(
cmd=['bridge', 'fdb', 'append', constants.FLOODING_ENTRY[0],
'dev', test_iface, 'dst', '1.1.1.1'],
run_as_root=True, log_fail_as_error=False)
return True
except RuntimeError:
return False
finally:
self.delete_vxlan(test_iface)
def vxlan_mcast_supported(self):
if not cfg.CONF.VXLAN.vxlan_group:
LOG.warning(_LW('VXLAN muticast group must be provided in '
'vxlan_group option to enable VXLAN MCAST mode'))
return False
if not ip_lib.iproute_arg_supported(
['ip', 'link', 'add', 'type', 'vxlan'],
'proxy'):
LOG.warning(_LW('Option "%(option)s" must be supported by command '
'"%(command)s" to enable %(mode)s mode'),
{'option': 'proxy',
'command': 'ip link add type vxlan',
'mode': 'VXLAN MCAST'})
return False
return True
def check_vxlan_support(self):
self.vxlan_mode = lconst.VXLAN_NONE
if self.vxlan_ucast_supported():
self.vxlan_mode = lconst.VXLAN_UCAST
elif self.vxlan_mcast_supported():
self.vxlan_mode = lconst.VXLAN_MCAST
else:
raise exceptions.VxlanNetworkUnsupported()
LOG.debug('Using %s VXLAN mode', self.vxlan_mode)
def fdb_ip_entry_exists(self, mac, ip, interface):
entries = utils.execute(['ip', 'neigh', 'show', 'to', ip,
'dev', interface],
run_as_root=True)
return mac in entries
def fdb_bridge_entry_exists(self, mac, interface, agent_ip=None):
entries = utils.execute(['bridge', 'fdb', 'show', 'dev', interface],
run_as_root=True)
if not agent_ip:
return mac in entries
return (agent_ip in entries and mac in entries)
def add_fdb_ip_entry(self, mac, ip, interface):
utils.execute(['ip', 'neigh', 'replace', ip, 'lladdr', mac,
'dev', interface, 'nud', 'permanent'],
run_as_root=True,
check_exit_code=False)
def remove_fdb_ip_entry(self, mac, ip, interface):
utils.execute(['ip', 'neigh', 'del', ip, 'lladdr', mac,
'dev', interface],
run_as_root=True,
check_exit_code=False)
def add_fdb_bridge_entry(self, mac, agent_ip, interface, operation="add"):
utils.execute(['bridge', 'fdb', operation, mac, 'dev', interface,
'dst', agent_ip],
run_as_root=True,
check_exit_code=False)
def remove_fdb_bridge_entry(self, mac, agent_ip, interface):
utils.execute(['bridge', 'fdb', 'del', mac, 'dev', interface,
'dst', agent_ip],
run_as_root=True,
check_exit_code=False)
def add_fdb_entries(self, agent_ip, ports, interface):
for mac, ip in ports:
if mac != constants.FLOODING_ENTRY[0]:
self.add_fdb_ip_entry(mac, ip, interface)
self.add_fdb_bridge_entry(mac, agent_ip, interface,
operation="replace")
elif self.vxlan_mode == lconst.VXLAN_UCAST:
if self.fdb_bridge_entry_exists(mac, interface):
self.add_fdb_bridge_entry(mac, agent_ip, interface,
"append")
else:
self.add_fdb_bridge_entry(mac, agent_ip, interface)
def remove_fdb_entries(self, agent_ip, ports, interface):
for mac, ip in ports:
if mac != constants.FLOODING_ENTRY[0]:
self.remove_fdb_ip_entry(mac, ip, interface)
self.remove_fdb_bridge_entry(mac, agent_ip, interface)
elif self.vxlan_mode == lconst.VXLAN_UCAST:
self.remove_fdb_bridge_entry(mac, agent_ip, interface)
class LinuxBridgeRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2pop_rpc.L2populationRpcCallBackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
target = oslo_messaging.Target(version='1.3')
def __init__(self, context, agent, sg_agent):
super(LinuxBridgeRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def network_delete(self, context, **kwargs):
LOG.debug("network_delete received")
network_id = kwargs.get('network_id')
bridge_name = self.agent.br_mgr.get_bridge_name(network_id)
LOG.debug("Delete %s", bridge_name)
self.agent.br_mgr.delete_vlan_bridge(bridge_name)
def port_update(self, context, **kwargs):
port_id = kwargs['port']['id']
tap_name = self.agent.br_mgr.get_tap_device_name(port_id)
# Put the tap name in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
self.agent.updated_devices.add(tap_name)
LOG.debug("port_update RPC received for port: %s", port_id)
def fdb_add(self, context, fdb_entries):
LOG.debug("fdb_add received")
for network_id, values in fdb_entries.items():
segment = self.agent.br_mgr.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.br_mgr.get_vxlan_device_name(
segment.segmentation_id)
agent_ports = values.get('ports')
for agent_ip, ports in agent_ports.items():
if agent_ip == self.agent.br_mgr.local_ip:
continue
self.agent.br_mgr.add_fdb_entries(agent_ip,
ports,
interface)
def fdb_remove(self, context, fdb_entries):
LOG.debug("fdb_remove received")
for network_id, values in fdb_entries.items():
segment = self.agent.br_mgr.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.br_mgr.get_vxlan_device_name(
segment.segmentation_id)
agent_ports = values.get('ports')
for agent_ip, ports in agent_ports.items():
if agent_ip == self.agent.br_mgr.local_ip:
continue
self.agent.br_mgr.remove_fdb_entries(agent_ip,
ports,
interface)
def _fdb_chg_ip(self, context, fdb_entries):
LOG.debug("update chg_ip received")
for network_id, agent_ports in fdb_entries.items():
segment = self.agent.br_mgr.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.br_mgr.get_vxlan_device_name(
segment.segmentation_id)
for agent_ip, state in agent_ports.items():
if agent_ip == self.agent.br_mgr.local_ip:
continue
after = state.get('after', [])
for mac, ip in after:
self.agent.br_mgr.add_fdb_ip_entry(mac, ip, interface)
before = state.get('before', [])
for mac, ip in before:
self.agent.br_mgr.remove_fdb_ip_entry(mac, ip, interface)
def fdb_update(self, context, fdb_entries):
LOG.debug("fdb_update received")
for action, values in fdb_entries.items():
method = '_fdb_' + action
if not hasattr(self, method):
raise NotImplementedError()
getattr(self, method)(context, values)
class LinuxBridgeNeutronAgentRPC(service.Service):
def __init__(self, interface_mappings, polling_interval,
quitting_rpc_timeout):
"""Constructor.
:param interface_mappings: dict mapping physical_networks to
physical_interfaces.
:param polling_interval: interval (secs) to poll DB.
:param quitting_rpc_timeout: timeout in seconds for rpc calls after
stop is called.
"""
super(LinuxBridgeNeutronAgentRPC, self).__init__()
self.interface_mappings = interface_mappings
self.polling_interval = polling_interval
self.quitting_rpc_timeout = quitting_rpc_timeout
def start(self):
self.prevent_arp_spoofing = cfg.CONF.AGENT.prevent_arp_spoofing
self.setup_linux_bridge(self.interface_mappings)
configurations = {'interface_mappings': self.interface_mappings}
if self.br_mgr.vxlan_mode != lconst.VXLAN_NONE:
configurations['tunneling_ip'] = self.br_mgr.local_ip
configurations['tunnel_types'] = [p_const.TYPE_VXLAN]
configurations['l2_population'] = cfg.CONF.VXLAN.l2_population
self.agent_state = {
'binary': 'neutron-linuxbridge-agent',
'host': cfg.CONF.host,
'topic': constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': constants.AGENT_TYPE_LINUXBRIDGE,
'start_flag': True}
# stores received port_updates for processing by the main loop
self.updated_devices = set()
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc, defer_refresh_firewall=True)
self.setup_rpc(self.interface_mappings.values())
self.daemon_loop()
def stop(self, graceful=True):
LOG.info(_LI("Stopping linuxbridge agent."))
if graceful and self.quitting_rpc_timeout:
self.set_rpc_timeout(self.quitting_rpc_timeout)
super(LinuxBridgeNeutronAgentRPC, self).stop(graceful)
def reset(self):
common_config.setup_logging()
def _report_state(self):
try:
devices = len(self.br_mgr.get_tap_devices())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def setup_rpc(self, physical_interfaces):
if physical_interfaces:
mac = utils.get_interface_mac(physical_interfaces[0])
else:
devices = ip_lib.IPWrapper().get_devices(True)
if devices:
mac = utils.get_interface_mac(devices[0].name)
else:
LOG.error(_LE("Unable to obtain MAC address for unique ID. "
"Agent terminated!"))
exit(1)
self.agent_id = '%s%s' % ('lb', (mac.replace(":", "")))
LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
# Handle updates from service
self.endpoints = [LinuxBridgeRpcCallbacks(self.context, self,
self.sg_agent)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
if cfg.CONF.VXLAN.l2_population:
consumers.append([topics.L2POPULATION,
topics.UPDATE, cfg.CONF.host])
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def setup_linux_bridge(self, interface_mappings):
self.br_mgr = LinuxBridgeManager(interface_mappings)
def remove_port_binding(self, network_id, interface_id):
bridge_name = self.br_mgr.get_bridge_name(network_id)
tap_device_name = self.br_mgr.get_tap_device_name(interface_id)
return self.br_mgr.remove_interface(bridge_name, tap_device_name)
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.sg_agent.setup_port_filters(device_info.get('added'),
device_info.get('updated'))
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_devices_added_updated(self, devices):
try:
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context, devices, self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for "
"%(devices)s: %(e)s",
{'devices': devices, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.debug("Port %s added", device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': device_details})
if self.prevent_arp_spoofing:
port = self.br_mgr.get_tap_device_name(
device_details['port_id'])
arp_protect.setup_arp_spoofing_protection(port,
device_details)
if device_details['admin_state_up']:
# create the networking for the port
network_type = device_details.get('network_type')
if network_type:
segmentation_id = device_details.get('segmentation_id')
else:
# compatibility with pre-Havana RPC vlan_id encoding
vlan_id = device_details.get('vlan_id')
(network_type,
segmentation_id) = lconst.interpret_vlan_id(vlan_id)
if self.br_mgr.add_interface(
device_details['network_id'],
network_type,
device_details['physical_network'],
segmentation_id,
device_details['port_id']):
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.remove_port_binding(device_details['network_id'],
device_details['port_id'])
else:
LOG.info(_LI("Device %s not defined on plugin"), device)
return False
def treat_devices_removed(self, devices):
resync = False
self.sg_agent.remove_devices_filter(devices)
for device in devices:
LOG.info(_LI("Attachment %s removed"), device)
details = None
try:
details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("port_removed failed for %(device)s: %(e)s",
{'device': device, 'e': e})
resync = True
if details and details['exists']:
LOG.info(_LI("Port %s updated."), device)
else:
LOG.debug("Device %s not defined on plugin", device)
if self.prevent_arp_spoofing:
arp_protect.delete_arp_spoofing_protection(devices)
return resync
def scan_devices(self, previous, sync):
device_info = {}
# Save and reinitialise the set variable that the port_update RPC uses.
# This should be thread-safe as the greenthread should not yield
# between these two statements.
updated_devices = self.updated_devices
self.updated_devices = set()
current_devices = self.br_mgr.get_tap_devices()
device_info['current'] = current_devices
if previous is None:
# This is the first iteration of daemon_loop().
previous = {'added': set(),
'current': set(),
'updated': set(),
'removed': set()}
# clear any orphaned ARP spoofing rules (e.g. interface was
# manually deleted)
if self.prevent_arp_spoofing:
arp_protect.delete_unreferenced_arp_protection(current_devices)
if sync:
# This is the first iteration, or the previous one had a problem.
# Re-add all existing devices.
device_info['added'] = current_devices
# Retry cleaning devices that may not have been cleaned properly.
# And clean any that disappeared since the previous iteration.
device_info['removed'] = (previous['removed'] | previous['current']
- current_devices)
# Retry updating devices that may not have been updated properly.
# And any that were updated since the previous iteration.
# Only update devices that currently exist.
device_info['updated'] = (previous['updated'] | updated_devices
& current_devices)
else:
device_info['added'] = current_devices - previous['current']
device_info['removed'] = previous['current'] - current_devices
device_info['updated'] = updated_devices & current_devices
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def daemon_loop(self):
LOG.info(_LI("LinuxBridge Agent RPC Daemon Started!"))
device_info = None
sync = True
while True:
start = time.time()
device_info = self.scan_devices(previous=device_info, sync=sync)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
sync = False
if (self._device_info_has_changes(device_info)
or self.sg_agent.firewall_refresh_needed()):
LOG.debug("Agent loop found changes! %s", device_info)
try:
sync = self.process_network_devices(device_info)
except Exception:
LOG.exception(_LE("Error in agent loop. Devices info: %s"),
device_info)
sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
def set_rpc_timeout(self, timeout):
for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc,
self.state_rpc):
rpc_api.client.timeout = timeout
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
interface_mappings = n_utils.parse_mappings(
cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
except ValueError as e:
LOG.error(_LE("Parsing physical_interface_mappings failed: %s. "
"Agent terminated!"), e)
sys.exit(1)
LOG.info(_LI("Interface mappings: %s"), interface_mappings)
polling_interval = cfg.CONF.AGENT.polling_interval
quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout
agent = LinuxBridgeNeutronAgentRPC(interface_mappings,
polling_interval,
quitting_rpc_timeout)
LOG.info(_LI("Agent initialized successfully, now running... "))
launcher = service.launch(cfg.CONF, agent)
launcher.wait()
if __name__ == "__main__":
main()
| apache-2.0 | -1,381,419,549,410,301,700 | 42.230769 | 79 | 0.552578 | false |
intel-hpdd/intel-manager-for-lustre | tests/unit/services/test_plugin_runner.py | 1 | 1732 | import mock
from chroma_core.models import VolumeNode
from tests.unit.lib.iml_unit_test_case import IMLUnitTestCase
from chroma_core.services.plugin_runner import AgentPluginHandlerCollection
from tests.unit.chroma_core.helpers import synthetic_host, synthetic_volume_full
from tests.unit.chroma_core.helpers import load_default_profile
class TestRebalancePassthrough(IMLUnitTestCase):
"""
Validate that the rebalance_host_volumes member function correctly calls through
to resource manager
"""
def setUp(self):
super(TestRebalancePassthrough, self).setUp()
load_default_profile()
# Initialise storage plugin stuff for the benefit of synthetic_volume_full
import chroma_core.lib.storage_plugin.manager
chroma_core.lib.storage_plugin.manager.storage_plugin_manager = (
chroma_core.lib.storage_plugin.manager.StoragePluginManager()
)
def test_multiple_volume_nodes(self):
"""
Test that when a volume has multiple volume nodes on one host, the volume is
not duplicated in the arguments to resource manager (HYD-2119)
"""
host = synthetic_host()
volume = synthetic_volume_full(host)
# An extra volume node, so that there are now two on one host
VolumeNode.objects.create(volume=volume, host=host, path="/dev/sdaxxx")
self.assertEqual(VolumeNode.objects.filter(host=host).count(), 2)
resource_manager = mock.Mock()
AgentPluginHandlerCollection(resource_manager).rebalance_host_volumes(host.id)
called_with_volumes = list(resource_manager.balance_unweighted_volume_nodes.call_args[0][0])
self.assertListEqual(called_with_volumes, [volume])
| mit | -8,590,990,271,830,882,000 | 39.27907 | 100 | 0.718245 | false |
pyrochlore/cycles | src/blender/addon/__init__.py | 1 | 3388 | #
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# <pep8 compliant>
bl_info = {
"name": "Cycles Render Engine",
"author": "",
"blender": (2, 70, 0),
"location": "Info header, render engine menu",
"description": "Cycles Render Engine integration",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Doc:2.6/Manual/Render/Cycles",
"tracker_url": "",
"support": 'OFFICIAL',
"category": "Render"}
import bpy
from . import engine
from . import version_update
class CyclesRender(bpy.types.RenderEngine):
bl_idname = 'CYCLES'
bl_label = "Cycles Render"
bl_use_shading_nodes = True
bl_use_preview = True
bl_use_exclude_layers = True
bl_use_save_buffers = True
def __init__(self):
self.session = None
def __del__(self):
engine.free(self)
# final render
def update(self, data, scene):
if self.is_preview:
if not self.session:
cscene = bpy.context.scene.cycles
use_osl = cscene.shading_system and cscene.device == 'CPU'
engine.create(self, data, scene,
None, None, None, use_osl)
else:
if not self.session:
engine.create(self, data, scene)
else:
engine.reset(self, data, scene)
engine.update(self, data, scene)
def render(self, scene):
engine.render(self)
def bake(self, scene, obj, pass_type, pixel_array, num_pixels, depth, result):
engine.bake(self, obj, pass_type, pixel_array, num_pixels, depth, result)
# viewport render
def view_update(self, context):
if not self.session:
engine.create(self, context.blend_data, context.scene,
context.region, context.space_data, context.region_data)
engine.update(self, context.blend_data, context.scene)
def view_draw(self, context):
engine.draw(self, context.region, context.space_data, context.region_data)
def update_script_node(self, node):
if engine.with_osl():
from . import osl
osl.update_script_node(node, self.report)
else:
self.report({'ERROR'}, "OSL support disabled in this build.")
def register():
from . import ui
from . import properties
from . import presets
engine.init()
properties.register()
ui.register()
presets.register()
bpy.utils.register_module(__name__)
bpy.app.handlers.version_update.append(version_update.do_versions)
def unregister():
from . import ui
from . import properties
from . import presets
bpy.app.handlers.version_update.remove(version_update.do_versions)
ui.unregister()
properties.unregister()
presets.unregister()
bpy.utils.unregister_module(__name__)
| apache-2.0 | -5,977,137,211,679,149,000 | 27.957265 | 82 | 0.634002 | false |
leppa/home-assistant | homeassistant/components/minio/__init__.py | 1 | 8056 | """Minio component."""
import logging
import os
from queue import Queue
import threading
from typing import List
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
from .minio_helper import MinioEventThread, create_minio_client
_LOGGER = logging.getLogger(__name__)
DOMAIN = "minio"
CONF_HOST = "host"
CONF_PORT = "port"
CONF_ACCESS_KEY = "access_key"
CONF_SECRET_KEY = "secret_key"
CONF_SECURE = "secure"
CONF_LISTEN = "listen"
CONF_LISTEN_BUCKET = "bucket"
CONF_LISTEN_PREFIX = "prefix"
CONF_LISTEN_SUFFIX = "suffix"
CONF_LISTEN_EVENTS = "events"
ATTR_BUCKET = "bucket"
ATTR_KEY = "key"
ATTR_FILE_PATH = "file_path"
DEFAULT_LISTEN_PREFIX = ""
DEFAULT_LISTEN_SUFFIX = ".*"
DEFAULT_LISTEN_EVENTS = "s3:ObjectCreated:*"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_ACCESS_KEY): cv.string,
vol.Required(CONF_SECRET_KEY): cv.string,
vol.Required(CONF_SECURE): cv.boolean,
vol.Optional(CONF_LISTEN, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_LISTEN_BUCKET): cv.string,
vol.Optional(
CONF_LISTEN_PREFIX, default=DEFAULT_LISTEN_PREFIX
): cv.string,
vol.Optional(
CONF_LISTEN_SUFFIX, default=DEFAULT_LISTEN_SUFFIX
): cv.string,
vol.Optional(
CONF_LISTEN_EVENTS, default=DEFAULT_LISTEN_EVENTS
): cv.string,
}
)
],
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
BUCKET_KEY_SCHEMA = vol.Schema(
{vol.Required(ATTR_BUCKET): cv.template, vol.Required(ATTR_KEY): cv.template}
)
BUCKET_KEY_FILE_SCHEMA = BUCKET_KEY_SCHEMA.extend(
{vol.Required(ATTR_FILE_PATH): cv.template}
)
def setup(hass, config):
"""Set up MinioClient and event listeners."""
conf = config[DOMAIN]
host = conf[CONF_HOST]
port = conf[CONF_PORT]
access_key = conf[CONF_ACCESS_KEY]
secret_key = conf[CONF_SECRET_KEY]
secure = conf[CONF_SECURE]
queue_listener = QueueListener(hass)
queue = queue_listener.queue
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, queue_listener.start_handler)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, queue_listener.stop_handler)
def _setup_listener(listener_conf):
bucket = listener_conf[CONF_LISTEN_BUCKET]
prefix = listener_conf[CONF_LISTEN_PREFIX]
suffix = listener_conf[CONF_LISTEN_SUFFIX]
events = listener_conf[CONF_LISTEN_EVENTS]
minio_listener = MinioListener(
queue,
get_minio_endpoint(host, port),
access_key,
secret_key,
secure,
bucket,
prefix,
suffix,
events,
)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, minio_listener.start_handler)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, minio_listener.stop_handler)
for listen_conf in conf[CONF_LISTEN]:
_setup_listener(listen_conf)
minio_client = create_minio_client(
get_minio_endpoint(host, port), access_key, secret_key, secure
)
def _render_service_value(service, key):
value = service.data[key]
value.hass = hass
return value.async_render()
def put_file(service):
"""Upload file service."""
bucket = _render_service_value(service, ATTR_BUCKET)
key = _render_service_value(service, ATTR_KEY)
file_path = _render_service_value(service, ATTR_FILE_PATH)
if not hass.config.is_allowed_path(file_path):
_LOGGER.error("Invalid file_path %s", file_path)
return
minio_client.fput_object(bucket, key, file_path)
def get_file(service):
"""Download file service."""
bucket = _render_service_value(service, ATTR_BUCKET)
key = _render_service_value(service, ATTR_KEY)
file_path = _render_service_value(service, ATTR_FILE_PATH)
if not hass.config.is_allowed_path(file_path):
_LOGGER.error("Invalid file_path %s", file_path)
return
minio_client.fget_object(bucket, key, file_path)
def remove_file(service):
"""Delete file service."""
bucket = _render_service_value(service, ATTR_BUCKET)
key = _render_service_value(service, ATTR_KEY)
minio_client.remove_object(bucket, key)
hass.services.register(DOMAIN, "put", put_file, schema=BUCKET_KEY_FILE_SCHEMA)
hass.services.register(DOMAIN, "get", get_file, schema=BUCKET_KEY_FILE_SCHEMA)
hass.services.register(DOMAIN, "remove", remove_file, schema=BUCKET_KEY_SCHEMA)
return True
def get_minio_endpoint(host: str, port: int) -> str:
"""Create minio endpoint from host and port."""
return f"{host}:{port}"
class QueueListener(threading.Thread):
"""Forward events from queue into HASS event bus."""
def __init__(self, hass):
"""Create queue."""
super().__init__()
self._hass = hass
self._queue = Queue()
def run(self):
"""Listen to queue events, and forward them to HASS event bus."""
_LOGGER.info("Running QueueListener")
while True:
event = self._queue.get()
if event is None:
break
_, file_name = os.path.split(event[ATTR_KEY])
_LOGGER.debug(
"Sending event %s, %s, %s",
event["event_name"],
event[ATTR_BUCKET],
event[ATTR_KEY],
)
self._hass.bus.fire(DOMAIN, {"file_name": file_name, **event})
@property
def queue(self):
"""Return wrapped queue."""
return self._queue
def stop(self):
"""Stop run by putting None into queue and join the thread."""
_LOGGER.info("Stopping QueueListener")
self._queue.put(None)
self.join()
_LOGGER.info("Stopped QueueListener")
def start_handler(self, _):
"""Start handler helper method."""
self.start()
def stop_handler(self, _):
"""Stop handler helper method."""
self.stop()
class MinioListener:
"""MinioEventThread wrapper with helper methods."""
def __init__(
self,
queue: Queue,
endpoint: str,
access_key: str,
secret_key: str,
secure: bool,
bucket_name: str,
prefix: str,
suffix: str,
events: List[str],
):
"""Create Listener."""
self._queue = queue
self._endpoint = endpoint
self._access_key = access_key
self._secret_key = secret_key
self._secure = secure
self._bucket_name = bucket_name
self._prefix = prefix
self._suffix = suffix
self._events = events
self._minio_event_thread = None
def start_handler(self, _):
"""Create and start the event thread."""
self._minio_event_thread = MinioEventThread(
self._queue,
self._endpoint,
self._access_key,
self._secret_key,
self._secure,
self._bucket_name,
self._prefix,
self._suffix,
self._events,
)
self._minio_event_thread.start()
def stop_handler(self, _):
"""Issue stop and wait for thread to join."""
if self._minio_event_thread is not None:
self._minio_event_thread.stop()
| apache-2.0 | 4,996,086,234,179,014,000 | 29.4 | 85 | 0.566783 | false |
matejsuchanek/pywikibot-scripts | fix_qualifiers.py | 1 | 4360 | #!/usr/bin/python
"""This script is obsolete!"""
import pywikibot
from pywikibot import pagegenerators
from .query_store import QueryStore
from .wikidata import WikidataEntityBot
class QualifiersFixingBot(WikidataEntityBot):
blacklist = frozenset(['P143', 'P248', 'P459', 'P518', 'P577', 'P805',
'P972', 'P1065', 'P1135', 'P1480', 'P1545', 'P1932',
'P2315', 'P2701', 'P3274', ])
whitelist = frozenset(['P17', 'P21', 'P39', 'P155', 'P156', 'P281', 'P580',
'P582', 'P585', 'P669', 'P708', 'P969', 'P1355',
'P1356', ])
good_item = 'Q15720608'
use_from_page = False
def __init__(self, **kwargs):
kwargs.update({
'bad_cache': kwargs.get('bad_cache', []) + list(self.blacklist),
'good_cache': kwargs.get('good_cache', []) + list(self.whitelist),
})
super().__init__(**kwargs)
self.store = QueryStore()
def filterProperty(self, prop_page):
if prop_page.type == 'external-id':
return False
prop_page.get()
if 'P31' not in prop_page.claims:
pywikibot.warning('%s is not classified' % prop_page.getID())
return False
for claim in prop_page.claims['P31']:
if claim.target_equals(self.good_item):
return True
return False
@property
def generator(self):
query = self.store.build_query(
'qualifiers', item=self.good_item,
good=', wd:'.join(self.whitelist),
bad=', wd:'.join(self.blacklist))
return pagegenerators.PreloadingItemGenerator(
pagegenerators.WikidataSPARQLPageGenerator(query, site=self.repo))
def treat_page_and_item(self, page, item):
for prop in item.claims.keys():
for claim in item.claims[prop]:
moved = set()
json = claim.toJSON()
i = -1
for source in claim.sources:
i += 1
for ref_prop in filter(self.checkProperty, source.keys()):
for snak in source[ref_prop]:
json.setdefault('qualifiers', {}).setdefault(ref_prop, [])
for qual in (pywikibot.Claim.qualifierFromJSON(self.repo, q)
for q in json['qualifiers'][ref_prop]):
if qual.target_equals(snak.getTarget()):
break
else:
snak.isReference = False
snak.isQualifier = True
json['qualifiers'][ref_prop].append(snak.toJSON())
json['references'][i]['snaks'][ref_prop].pop(0)
if len(json['references'][i]['snaks'][ref_prop]) == 0:
json['references'][i]['snaks'].pop(ref_prop)
if len(json['references'][i]['snaks']) == 0:
json['references'].pop(i)
i -= 1
moved.add(ref_prop)
if len(moved) > 0:
data = {'claims': [json]}
self.user_edit_entity(item, data, summary=self.makeSummary(prop, moved),
asynchronous=True)
def makeSummary(self, prop, props):
props = ['[[Property:P%s]]' % pid for pid in sorted(
int(pid[1:]) for pid in props)]
return '[[Property:%s]]: moving misplaced reference%s %s to qualifiers' % (
prop, 's' if len(props) > 1 else '', '%s and %s' % (
', '.join(props[:-1]), props[-1]) if len(props) > 1 else props[0])
def main(*args):
options = {}
for arg in pywikibot.handle_args(args):
if arg.startswith('-'):
arg, sep, value = arg.partition(':')
if value != '':
options[arg[1:]] = value if not value.isdigit() else int(value)
else:
options[arg[1:]] = True
site = pywikibot.Site('wikidata', 'wikidata')
bot = QualifiersFixingBot(site=site, **options)
bot.run()
if __name__ == '__main__':
main()
| gpl-2.0 | 4,387,339,838,632,818,700 | 38.636364 | 92 | 0.482569 | false |
ppke-nlpg/purepos-python3 | purepos.py | 1 | 17540 | #!/usr/bin/env python3
# -*- coding: utf-8, vim: expandtab:ts=4 -*-
###############################################################################
# Copyright (c) 2015 Móréh, Tamás
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v3
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/
#
# This file is part of PurePos-Python3.
#
# PurePos-Python3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PurePos-Python3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# Contributors:
# Móréh, Tamás - initial API and implementation
##############################################################################
__author__ = '[email protected]'
import argparse
import os
import sys
import math
import importlib.machinery
from corpusreader.corpus_reader import CorpusReader
from corpusreader.tokenreaders import StemmedTaggedTokenReader
from docmodel.token import Token, Colors
from purepos.trainer import Trainer
from purepos.common.serializer import StandardSerializer
from purepos.common import util
from purepos.tagger import POSTagger, MorphTagger
from purepos.morphology import BaseMorphologicalAnalyser, MorphologicalTable, HumorAnalyser
from purepos.cli.configuration import Configuration
from purepos.common.analysisqueue import AnalysisQueue
def parse_arguments():
parser = argparse.ArgumentParser("purepos", description="PurePos is an open source hybrid "
"morphological tagger.")
# parser.add_argument("-h", "--help", help="Print this message.")
parser.add_argument("command", help="Mode selection: train for training the "
"tagger, tag for tagging a text with the given model.",
metavar="tag|train", type=str, choices=["tag", "train"])
parser.add_argument("-m", "--model",
help="Specifies a path to a model file. If an exisiting model is given for "
"training, the tool performs incremental training.",
metavar="<modelfile>", required=True, type=str)
parser.add_argument("-t", "--tag-order",
help="Order of tag transition. Second order means "
"trigram tagging. The default is 2. Training only option.",
metavar="<number>", type=int, default=2)
parser.add_argument("-e", "--emission-order",
help="Order of emission. First order means that the given word depends "
"only on its tag. The default is 2. Training only option.",
metavar="<number>", type=int, default=2)
parser.add_argument("-s", "--suffix-length",
help="Use a suffix trie for guessing unknown words tags with the given "
"maximum suffix length. The default is 10. Training only option.",
metavar="<length>", type=int, default=10)
parser.add_argument("-r", "--rare-frequency",
help="Add only words to the suffix trie with frequency less than the given"
" treshold. The default is 10. Training only option.",
metavar="<treshold>", type=int, default=10)
parser.add_argument("-a", "--analyzer",
help="Set the morphological analyzer. <analyzer> can be "
"'none', 'integrated' or a file : <morphologicalTableFile>. The "
"default is to use the integrated one. Tagging only option. ",
metavar="<analyzer>", type=str, default="integrated", dest="morphology")
parser.add_argument("-H", "--pyhumor-path",
help="Set the path of the PyHumor module where the Humor class is defined.",
metavar="<path>", type=str, default="pyhumor/")
parser.add_argument("-L", "--lex-path",
help="Set the path of the lex file used by the Humor analyser. The "
"pyhumor module delivered lex is used.",
metavar="<path>", type=str, default="lex/")
parser.add_argument("--only-pos-tags",
help="Do not perform stemming, output only POS tags. Tagging only option.",
action="store_true", dest="no_stemming")
parser.add_argument("-g", "--max-guessed",
help="Limit the max guessed tags for each token. The default is 10. "
"Tagging only option.",
metavar="<number>", type=int, default=10)
parser.add_argument("-n", "--max-results",
help="Set the expected maximum number of tag sequences (with its score). "
"The default is 1. Tagging only option.",
metavar="<number>", type=int, default=1)
parser.add_argument("-b", "--beam-theta",
help="Set the beam-search limit. "
"The default is 1000. Tagging only option.",
metavar="<theta>", type=int, default=1000)
parser.add_argument("-o", "--output-file",
help="File where the tagging output is redirected. Tagging only option.",
metavar="<file>", type=str, default=None)
parser.add_argument("--color-stdout",
help="Use colored console if the stdout is the choosen output.",
action="store_true")
parser.add_argument("-c", "--encoding",
help="Encoding used to read the training set, or write the results. "
"The default is your OS default.",
metavar="<encoding>", type=str, default=sys.getdefaultencoding())
parser.add_argument("--input-separator",
help="Separator characters and tag starting character for annotated input "
"(divided by the first character cf. sed). Eg.: \"#{{#||#}}#[\"",
metavar="<separators>", type=str, default=" {{ || }} [")
parser.add_argument("-S", "--separator",
help="Separator character between word, lemma and tags. Default: '#'",
metavar="<separator>", type=str, default="#")
parser.add_argument("-i", "--input-file",
help="File containg the training set (for tagging) or the text to be tagged"
" (for tagging). The default is the standard input.",
metavar="<file>", type=str, default=None)
parser.add_argument("-d", "--beam-decoder",
help="Use Beam Search decoder. The default is to employ the Viterbi "
"algorithm. Tagging only option.", action="store_true")
# todo beam_size
parser.add_argument("-f", "--config-file",
help="Configuratoin file containg tag mappings. "
"Defaults to do not map any tag.",
metavar="<file>", type=str, default=None)
return parser.parse_args()
class PurePos:
"""The main PurePos class. This is the interface for training and tagging.
Using from command line:
Run purepos.py --help
Using as a module:
Use the following static methods without instantiation:
PurePos.train()
PurePos.tag()
"""
TAG_OPT = "tag"
TRAIN_OPT = "train"
PRE_MA = "pre"
NONE_MA = "none"
INTEGRATED_MA = "integrated"
@staticmethod
def train(encoding: str,
model_path: str,
input_path: str or None,
tag_order: int,
emission_order: int,
suff_length: int,
rare_freq: int,
separator: str,
linesep: str): # todo verbose mode
"""Create a language model from an analysed corpora (and optionally from an existing model).
It performs on the given input which can be also the stdin.
:param encoding: The encoding of the corpora. If None, Python3 default will be used.
:param model_path: Path of the model file. If exists, it will be improved.
:param input_path: Path of the analysed corpora. If None, stdin will be used.
:param tag_order: # todo
:param emission_order: # todo
:param suff_length: # todo
:param rare_freq: # todo
:param separator: The sepatator character(s) inside the token. Default/traditionally: '#'.
:param linesep: The sepatator character(s) between the sentences. Default: newline.
"""
if input_path is not None:
source = open(input_path, encoding=encoding) # todo default encoding? (a Python3 okos)
else:
source = sys.stdin
trainer = Trainer(source, CorpusReader(StemmedTaggedTokenReader(separator, linesep)))
if os.path.isfile(model_path):
print("Reading model... ", file=sys.stderr)
ret_model = StandardSerializer.read_model(model_path)
print("Training model... ", file=sys.stderr)
ret_model = trainer.train_model(ret_model)
else:
print("Training model... ", file=sys.stderr)
ret_model = trainer.train(tag_order, emission_order, suff_length, rare_freq)
print(trainer.stat.stat(ret_model), file=sys.stderr)
print("Writing model... ", file=sys.stderr)
StandardSerializer.write_model(ret_model, model_path)
print("Done!", file=sys.stderr)
@staticmethod
def tag(encoding: str,
model_path: str,
input_path: str,
analyser: str,
no_stemming: bool,
max_guessed: int,
max_resnum: int,
beam_theta: int,
use_beam_search: bool,
out_path: str,
use_colored_stdout: bool,
humor_path: str,
lex_path: str): # todo IDÁIG KIHOZNI A HUMOR KONSTRUKTOR ELEMEIT *args, **kwargs
"""Perform tagging on the given input with the given model an properties to the given
output. The in and output can be also the standard IO.
:param encoding: The encoding of the input. If None, Python3 default will be used.
:param model_path: Path of the model file. It must be existing.
:param input_path: Path of the source file. If None, stdin will be used.
:param analyser: "integrated" or "none" if HUMOR analyser will be used or not.
Other case it can be the path of any morphological table.
:param no_stemming: Analyse without lemmatization.
:param max_guessed: # todo
:param max_resnum: # todo
:param beam_theta: # todo
:param use_beam_search: Using Beam Search algorithm instead of Viterbi.
:param out_path: Path of the output file. If None, stdout will be used.
:param use_colored_stdout: Use colored output only if the output is the stdout.
:param humor_path: The path of the pyhumor module file.
:param lex_path: The path of the lex directory for humor.
"""
if not input_path:
source = sys.stdin
if use_colored_stdout:
# HEADER = '\033[95m'
# OKBLUE = '\033[94m'
# OKGREEN = '\033[92m'
# WARNING = '\033[93m'
# FAIL = '\033[91m'
# ENDC = '\033[0m'
# BOLD = '\033[1m'
# UNDERLINE = '\033[4m' # todo legyen témázható.
Colors.ENDC = '\033[0m'
Colors.WORD = '\033[93m'
Colors.LEMMA = '\033[91m'
Colors.TAGS = '\033[32m' # '\033[36m'
Colors.SEPARATOR = '\033[90m'
else:
source = open(input_path, encoding=encoding) # todo default encoding? (a Python3 okos)
tagger = PurePos.create_tagger(model_path, analyser, no_stemming, max_guessed,
math.log(beam_theta), use_beam_search, util.CONFIGURATION,
humor_path, lex_path)
if not out_path:
output = sys.stdout
else:
output = open(out_path, mode="w", encoding=encoding)
print("Tagging:", file=sys.stderr)
tagger.tag(source, output, max_resnum)
@staticmethod
def load_humor(humor_path: str, lex_path: str) -> HumorAnalyser:
"""Tries to load and instantiate the pyhumor module.
It raises FileNotFoundError if any parameter is invalid.
:param humor_path: The path of the pyhumor module file.
:param lex_path: The path of the lex directory for humor.
:return: A HumorAnalyser object.
"""
humor_module = importlib.machinery.SourceFileLoader("humor", humor_path).load_module()
humor = humor_module.Humor(_lex_path=lex_path)
return HumorAnalyser(humor)
@staticmethod
def create_tagger(model_path: str,
analyser: str,
no_stemming: bool,
max_guessed: int,
beam_log_theta: float,
use_beam_search: bool,
conf: Configuration,
humor_path: str,
lex_path: str) -> POSTagger:
"""Create a tagger object with the given properties.
:param model_path:
:param analyser:
:param no_stemming:
:param max_guessed:
:param beam_log_theta:
:param use_beam_search:
:param conf:
:param humor_path:
:param lex_path:
:return: a tagger object.
"""
if analyser == PurePos.INTEGRATED_MA:
try:
ma = PurePos.load_humor(humor_path+"/bin/pyhumor/__init__.py", lex_path)
except FileNotFoundError:
print("Humor module not found. Not using any morphological analyzer.",
file=sys.stderr)
ma = BaseMorphologicalAnalyser()
elif analyser == PurePos.NONE_MA:
ma = BaseMorphologicalAnalyser()
else:
print("Using morphological table at: {}.".format(analyser), file=sys.stderr)
ma = MorphologicalTable(open(analyser))
print("Reading model... ", file=sys.stderr)
rawmodel = StandardSerializer.read_model(model_path)
print("Compiling model... ", file=sys.stderr)
cmodel = rawmodel.compile(conf)
suff_log_theta = math.log(10)
if no_stemming:
tagger = POSTagger(cmodel, ma, beam_log_theta,
suff_log_theta, max_guessed, use_beam_search)
else:
tagger = MorphTagger(cmodel, ma, beam_log_theta, suff_log_theta,
max_guessed, use_beam_search)
return tagger
def __init__(self, options: dict):
self.options = options
seps = options["input_separator"][1:].split(options["input_separator"][0])
AnalysisQueue.ANAL_OPEN = seps[0]
AnalysisQueue.ANAL_SPLIT_RE = seps[1]
AnalysisQueue.ANAL_CLOSE = seps[2]
AnalysisQueue.ANAL_TAG_OPEN = seps[3]
def run(self):
if self.options.get("config_file") is None:
util.CONFIGURATION = Configuration()
else:
util.CONFIGURATION = Configuration.read(self.options["config_file"])
Token.SEP = self.options["separator"]
if self.options["command"] == self.TRAIN_OPT:
self.train(self.options["encoding"],
self.options["model"],
self.options["input_file"],
self.options["tag_order"],
self.options["emission_order"],
self.options["suffix_length"],
self.options["rare_frequency"],
self.options["separator"],
"\n") # todo sor elválasztó?
elif self.options["command"] == self.TAG_OPT:
self.tag(self.options["encoding"],
self.options["model"],
self.options["input_file"],
self.options["morphology"],
self.options.get("no_stemming", False),
self.options["max_guessed"],
self.options["max_results"],
self.options["beam_theta"],
self.options.get("beam_decoder", False),
self.options["output_file"],
self.options.get("color_stdout", False),
self.options["pyhumor_path"],
self.options["lex_path"])
def main():
try:
options = parse_arguments()
PurePos(vars(options)).run()
except KeyboardInterrupt:
print("\nBye!", file=sys.stderr)
if __name__ == '__main__':
main()
| lgpl-3.0 | 4,538,679,995,033,387,500 | 47.960894 | 100 | 0.560646 | false |
KelSolaar/Manager | manager/tests/tests_manager/tests_globals/tests_constants.py | 1 | 5659 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**tests_constants.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
Defines units tests for :mod:`manager.globals.constants` module.
**Others:**
"""
from __future__ import unicode_literals
import sys
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from manager.globals.constants import Constants
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = ["TestConstants"]
class TestConstants(unittest.TestCase):
"""
Defines :class:`manager.globals.constants.Constants` class units tests methods.
"""
def test_required_attributes(self):
"""
Tests presence of required attributes.
"""
required_attributes = ("application_name",
"major_version",
"minor_version",
"change_version",
"version",
"logger",
"verbosity_level",
"verbosity_labels",
"logging_default_formatter",
"logging_separators",
"default_codec",
"codec_error",
"application_directory",
"provider_directory",
"null_object")
for attribute in required_attributes:
self.assertIn(attribute, Constants.__dict__)
def test_application_name_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.application_name` attribute.
"""
self.assertRegexpMatches(Constants.application_name, "\w+")
def test_major_version_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.major_version` attribute.
"""
self.assertRegexpMatches(Constants.version, "\d")
def test_minor_version_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.minor_version` attribute.
"""
self.assertRegexpMatches(Constants.version, "\d")
def test_change_version_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.change_version` attribute.
"""
self.assertRegexpMatches(Constants.version, "\d")
def test_version_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.version` attribute.
"""
self.assertRegexpMatches(Constants.version, "\d\.\d\.\d")
def test_logger_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.logger` attribute.
"""
self.assertRegexpMatches(Constants.logger, "\w+")
def test_verbosity_level_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.verbosity_level` attribute.
"""
self.assertIsInstance(Constants.verbosity_level, int)
self.assertGreaterEqual(Constants.verbosity_level, 0)
self.assertLessEqual(Constants.verbosity_level, 4)
def test_verbosity_labels_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.verbosity_labels` attribute.
"""
self.assertIsInstance(Constants.verbosity_labels, tuple)
for label in Constants.verbosity_labels:
self.assertIsInstance(label, unicode)
def test_logging_default_formatter_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.logging_default_formatter` attribute.
"""
self.assertIsInstance(Constants.logging_default_formatter, unicode)
def test_logging_separators_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.logging_separators` attribute.
"""
self.assertIsInstance(Constants.logging_separators, unicode)
def test_default_codec_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.default_codec` attribute.
"""
valid_encodings = ("utf-8",
"cp1252")
self.assertIn(Constants.default_codec, valid_encodings)
def test_encoding_error_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.codec_error` attribute.
"""
valid_encodings_errors = ("strict",
"ignore",
"replace",
"xmlcharrefreplace")
self.assertIn(Constants.codec_error, valid_encodings_errors)
def test_application_directory_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.application_directory` attribute.
"""
self.assertRegexpMatches(Constants.application_directory, "\w+")
def test_provider_directory_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.provider_directory` attribute.
"""
self.assertRegexpMatches(Constants.provider_directory, "\.*\w")
def test_null_object_attribute(self):
"""
Tests :attr:`manager.globals.constants.Constants.null_object` attribute.
"""
self.assertRegexpMatches(Constants.null_object, "\w+")
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 3,851,480,431,661,635,600 | 29.424731 | 94 | 0.587913 | false |
pwong-mapr/private-hue | apps/useradmin/src/useradmin/tests.py | 1 | 40345 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for "user admin"
"""
import re
import urllib
import ldap
from nose.plugins.attrib import attr
from nose.tools import assert_true, assert_equal, assert_false
import desktop.conf
from desktop.lib.test_utils import grant_access
from desktop.lib.django_test_util import make_logged_in_client
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.utils.encoding import smart_unicode
from django.core.urlresolvers import reverse
from useradmin.models import HuePermission, GroupPermission, LdapGroup, UserProfile
from useradmin.models import get_profile, get_default_user_group
import useradmin.conf
from hadoop import pseudo_hdfs4
from views import sync_ldap_users, sync_ldap_groups, import_ldap_users, import_ldap_groups, \
add_ldap_users, add_ldap_groups, sync_ldap_users_groups
import ldap_access
def reset_all_users():
"""Reset to a clean state by deleting all users"""
for user in User.objects.all():
user.delete()
def reset_all_groups():
"""Reset to a clean state by deleting all groups"""
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing(None)
for grp in Group.objects.all():
grp.delete()
class LdapTestConnection(object):
"""
Test class which mimics the behaviour of LdapConnection (from ldap_access.py).
It also includes functionality to fake modifications to an LDAP server. It is designed
as a singleton, to allow for changes to persist across discrete connections.
This class assumes uid is the user_name_attr.
"""
def __init__(self):
self._instance = LdapTestConnection.Data()
def add_user_group_for_test(self, user, group):
self._instance.groups[group]['members'].append(user)
def remove_user_group_for_test(self, user, group):
self._instance.groups[group]['members'].remove(user)
def add_posix_user_group_for_test(self, user, group):
self._instance.groups[group]['posix_members'].append(user)
def remove_posix_user_group_for_test(self, user, group):
self._instance.groups[group]['posix_members'].remove(user)
def find_users(self, username_pattern, search_attr=None, user_name_attr=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
""" Returns info for a particular user via a case insensitive search """
if find_by_dn:
data = filter(lambda attrs: attrs['dn'] == username_pattern, self._instance.users.values())
else:
username_pattern = "^%s$" % username_pattern.replace('.','\\.').replace('*', '.*')
username_fsm = re.compile(username_pattern, flags=re.I)
usernames = filter(lambda username: username_fsm.match(username), self._instance.users.keys())
data = [self._instance.users.get(username) for username in usernames]
return data
def find_groups(self, groupname_pattern, search_attr=None, group_name_attr=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
""" Return all groups in the system with parents and children """
if find_by_dn:
data = filter(lambda attrs: attrs['dn'] == groupname_pattern, self._instance.groups.values())
# SCOPE_SUBTREE means we return all sub-entries of the desired entry along with the desired entry.
if data and scope == ldap.SCOPE_SUBTREE:
sub_data = filter(lambda attrs: attrs['dn'].endswith(data[0]['dn']), self._instance.groups.values())
data.extend(sub_data)
else:
groupname_pattern = "^%s$" % groupname_pattern.replace('.','\\.').replace('*', '.*')
groupnames = filter(lambda username: re.match(groupname_pattern, username), self._instance.groups.keys())
data = [self._instance.groups.get(groupname) for groupname in groupnames]
return data
class Data:
def __init__(self):
self.users = {'moe': {'dn': 'uid=moe,ou=People,dc=example,dc=com', 'username':'moe', 'first':'Moe', 'email':'[email protected]', 'groups': ['cn=TestUsers,ou=Groups,dc=example,dc=com']},
'lårry': {'dn': 'uid=lårry,ou=People,dc=example,dc=com', 'username':'lårry', 'first':'Larry', 'last':'Stooge', 'email':'[email protected]', 'groups': ['cn=TestUsers,ou=Groups,dc=example,dc=com', 'cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com']},
'curly': {'dn': 'uid=curly,ou=People,dc=example,dc=com', 'username':'curly', 'first':'Curly', 'last':'Stooge', 'email':'[email protected]', 'groups': ['cn=TestUsers,ou=Groups,dc=example,dc=com', 'cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com']},
'rock': {'dn': 'uid=rock,ou=People,dc=example,dc=com', 'username':'rock', 'first':'rock', 'last':'man', 'email':'[email protected]', 'groups': ['cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com']},
'otherguy': {'dn': 'uid=otherguy,ou=People,dc=example,dc=com', 'username':'otherguy', 'first':'Other', 'last':'Guy', 'email':'[email protected]'},
'posix_person': {'dn': 'uid=posix_person,ou=People,dc=example,dc=com', 'username': 'posix_person', 'first': 'pos', 'last': 'ix', 'email': '[email protected]'},
'posix_person2': {'dn': 'uid=posix_person2,ou=People,dc=example,dc=com', 'username': 'posix_person2', 'first': 'pos', 'last': 'ix', 'email': '[email protected]'}}
self.groups = {'TestUsers': {
'dn': 'cn=TestUsers,ou=Groups,dc=example,dc=com',
'name':'TestUsers',
'members':['uid=moe,ou=People,dc=example,dc=com','uid=lårry,ou=People,dc=example,dc=com','uid=curly,ou=People,dc=example,dc=com'],
'posix_members':[]},
'Test Administrators': {
'dn': 'cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com',
'name':'Test Administrators',
'members':['uid=rock,ou=People,dc=example,dc=com','uid=lårry,ou=People,dc=example,dc=com','uid=curly,ou=People,dc=example,dc=com'],
'posix_members':[]},
'OtherGroup': {
'dn': 'cn=OtherGroup,cn=TestUsers,ou=Groups,dc=example,dc=com',
'name':'OtherGroup',
'members':[],
'posix_members':[]},
'PosixGroup': {
'dn': 'cn=PosixGroup,ou=Groups,dc=example,dc=com',
'name':'PosixGroup',
'members':[],
'posix_members':['posix_person','lårry']},
'PosixGroup1': {
'dn': 'cn=PosixGroup1,cn=PosixGroup,ou=Groups,dc=example,dc=com',
'name':'PosixGroup1',
'members':[],
'posix_members':['posix_person2']},
}
def test_invalid_username():
BAD_NAMES = ('-foo', 'foo:o', 'foo o', ' foo')
c = make_logged_in_client(username="test", is_superuser=True)
for bad_name in BAD_NAMES:
assert_true(c.get('/useradmin/users/new'))
response = c.post('/useradmin/users/new', dict(username=bad_name, password1="test", password2="test"))
assert_true('not allowed' in response.context["form"].errors['username'][0])
def test_group_permissions():
reset_all_users()
reset_all_groups()
# Get ourselves set up with a user and a group
c = make_logged_in_client(username="test", is_superuser=True)
Group.objects.create(name="test-group")
test_user = User.objects.get(username="test")
test_user.groups.add(Group.objects.get(name="test-group"))
test_user.save()
# Make sure that a superuser can always access applications
response = c.get('/useradmin/users')
assert_true('Hue Users' in response.content)
assert_true(len(GroupPermission.objects.all()) == 0)
c.post('/useradmin/groups/edit/test-group',
dict(name="test-group",
members=[User.objects.get(username="test").pk],
permissions=[HuePermission.objects.get(app='useradmin',action='access').pk],
save="Save"), follow=True)
assert_true(len(GroupPermission.objects.all()) == 1)
# Now test that we have limited access
c1 = make_logged_in_client(username="nonadmin", is_superuser=False)
response = c1.get('/useradmin/users')
assert_true('You do not have permission to access the Useradmin application.' in response.content)
# Add the non-admin to a group that should grant permissions to the app
test_user = User.objects.get(username="nonadmin")
test_user.groups.add(Group.objects.get(name='test-group'))
test_user.save()
# Check that we have access now
response = c1.get('/useradmin/users')
assert_true(get_profile(test_user).has_hue_permission('access','useradmin'))
assert_true('Hue Users' in response.content)
# Make sure we can't modify permissions
response = c1.get('/useradmin/permissions/edit/useradmin/access')
assert_true('must be a superuser to change permissions' in response.content)
# And revoke access from the group
c.post('/useradmin/permissions/edit/useradmin/access',
dict(app='useradmin',
priv='access',
groups=[],
save="Save"), follow=True)
assert_true(len(GroupPermission.objects.all()) == 0)
assert_false(get_profile(test_user).has_hue_permission('access','useradmin'))
# We should no longer have access to the app
response = c1.get('/useradmin/users')
assert_true('You do not have permission to access the Useradmin application.' in response.content)
def test_default_group():
reset_all_users()
reset_all_groups()
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('test_default')
get_default_user_group()
c = make_logged_in_client(username='test', is_superuser=True)
# Create default group if it doesn't already exist.
assert_true(Group.objects.filter(name='test_default').exists())
# Try deleting the default group
assert_true(Group.objects.filter(name='test_default').exists())
response = c.post('/useradmin/groups/delete/test_default')
assert_true('default user group may not be deleted' in response.content)
assert_true(Group.objects.filter(name='test_default').exists())
# Change the name of the default group, and try deleting again
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('new_default')
response = c.post('/useradmin/groups/delete/test_default')
assert_false(Group.objects.filter(name='test_default').exists())
assert_true(Group.objects.filter(name='new_default').exists())
def test_get_profile():
# Ensure profiles are created after get_profile is called.
reset_all_users()
reset_all_groups()
c = make_logged_in_client(username='test', password='test', is_superuser=True)
assert_equal(0, UserProfile.objects.count())
p = get_profile(User.objects.get(username='test'))
assert_equal(1, UserProfile.objects.count())
def test_group_admin():
reset_all_users()
reset_all_groups()
c = make_logged_in_client(username="test", is_superuser=True)
response = c.get('/useradmin/groups')
# No groups just yet
assert_true(len(response.context["groups"]) == 0)
assert_true("Hue Groups" in response.content)
# Create a group
response = c.get('/useradmin/groups/new')
assert_equal('/useradmin/groups/new', response.context['action'])
c.post('/useradmin/groups/new', dict(name="testgroup"))
# We should have an empty group in the DB now
assert_true(len(Group.objects.all()) == 1)
assert_true(Group.objects.filter(name="testgroup").exists())
assert_true(len(Group.objects.get(name="testgroup").user_set.all()) == 0)
# And now, just for kicks, let's try adding a user
response = c.post('/useradmin/groups/edit/testgroup',
dict(name="testgroup",
members=[User.objects.get(username="test").pk],
save="Save"), follow=True)
assert_true(len(Group.objects.get(name="testgroup").user_set.all()) == 1)
assert_true(Group.objects.get(name="testgroup").user_set.filter(username="test").exists())
# Test some permissions
c2 = make_logged_in_client(username="nonadmin", is_superuser=False)
# Need to give access to the user for the rest of the test
group = Group.objects.create(name="access-group")
perm = HuePermission.objects.get(app='useradmin', action='access')
GroupPermission.objects.create(group=group, hue_permission=perm)
test_user = User.objects.get(username="nonadmin")
test_user.groups.add(Group.objects.get(name="access-group"))
test_user.save()
# Make sure non-superusers can't do bad things
response = c2.get('/useradmin/groups/new')
assert_true("You must be a superuser" in response.content)
response = c2.get('/useradmin/groups/edit/testgroup')
assert_true("You must be a superuser" in response.content)
response = c2.post('/useradmin/groups/new', dict(name="nonsuperuser"))
assert_true("You must be a superuser" in response.content)
response = c2.post('/useradmin/groups/edit/testgroup',
dict(name="nonsuperuser",
members=[User.objects.get(username="test").pk],
save="Save"), follow=True)
assert_true("You must be a superuser" in response.content)
# Should be one group left, because we created the other group
response = c.post('/useradmin/groups/delete/testgroup')
assert_true(len(Group.objects.all()) == 1)
group_count = len(Group.objects.all())
response = c.post('/useradmin/groups/new', dict(name="with space"))
assert_equal(len(Group.objects.all()), group_count + 1)
def test_user_admin():
FUNNY_NAME = '~`!@#$%^&*()_-+={}[]|\;"<>?/,.'
FUNNY_NAME_QUOTED = urllib.quote(FUNNY_NAME)
reset_all_users()
reset_all_groups()
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('test_default')
c = make_logged_in_client('test', is_superuser=True)
user = User.objects.get(username='test')
# Test basic output.
response = c.get('/useradmin/')
assert_true(len(response.context["users"]) > 0)
assert_true("Hue Users" in response.content)
# Test editing a superuser
# Just check that this comes back
response = c.get('/useradmin/users/edit/test')
# Edit it, to add a first and last name
response = c.post('/useradmin/users/edit/test',
dict(username="test",
first_name=u"Inglés",
last_name=u"Español",
is_superuser="True",
is_active="True"),
follow=True)
assert_true("User information updated" in response.content,
"Notification should be displayed in: %s" % response.content)
# Edit it, can't change username
response = c.post('/useradmin/users/edit/test',
dict(username="test2",
first_name=u"Inglés",
last_name=u"Español",
is_superuser="True",
is_active="True"),
follow=True)
assert_true("You cannot change a username" in response.content)
# Now make sure that those were materialized
response = c.get('/useradmin/users/edit/test')
assert_equal(smart_unicode("Inglés"), response.context["form"].instance.first_name)
assert_true("Español" in response.content)
# Shouldn't be able to demote to non-superuser
response = c.post('/useradmin/users/edit/test', dict(username="test",
first_name=u"Inglés", last_name=u"Español",
is_superuser=False, is_active=True))
assert_true("You cannot remove" in response.content,
"Shouldn't be able to remove the last superuser")
# Shouldn't be able to delete oneself
response = c.post('/useradmin/users/delete', {u'user_ids': [user.id]})
assert_true("You cannot remove yourself" in response.content,
"Shouldn't be able to delete the last superuser")
# Let's try changing the password
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", is_superuser=True, password1="foo", password2="foobar"))
assert_equal(["Passwords do not match."], response.context["form"]["password2"].errors, "Should have complained about mismatched password")
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", password1="foo", password2="foo", is_active=True, is_superuser=True))
assert_true(User.objects.get(username="test").is_superuser)
assert_true(User.objects.get(username="test").check_password("foo"))
# Change it back!
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", password1="test", password2="test", is_active="True", is_superuser="True"))
assert_true(User.objects.get(username="test").check_password("test"))
assert_true(make_logged_in_client(username = "test", password = "test"),
"Check that we can still login.")
# Check new user form for default group
group = get_default_user_group()
response = c.get('/useradmin/users/new')
assert_true(response)
assert_true(('<option value="1" selected="selected">%s</option>' % group) in str(response))
# Create a new regular user (duplicate name)
response = c.post('/useradmin/users/new', dict(username="test", password1="test", password2="test"))
assert_equal({ 'username': ["User with this Username already exists."]}, response.context["form"].errors)
# Create a new regular user (for real)
response = c.post('/useradmin/users/new', dict(username=FUNNY_NAME,
password1="test",
password2="test",
is_active="True"))
response = c.get('/useradmin/')
assert_true(FUNNY_NAME_QUOTED in response.content)
assert_true(len(response.context["users"]) > 1)
assert_true("Hue Users" in response.content)
# Validate profile is created.
assert_true(UserProfile.objects.filter(user__username=FUNNY_NAME).exists())
# Need to give access to the user for the rest of the test
group = Group.objects.create(name="test-group")
perm = HuePermission.objects.get(app='useradmin', action='access')
GroupPermission.objects.create(group=group, hue_permission=perm)
# Verify that we can modify user groups through the user admin pages
response = c.post('/useradmin/users/new', dict(username="group_member", password1="test", password2="test", groups=[group.pk]))
User.objects.get(username='group_member')
assert_true(User.objects.get(username='group_member').groups.filter(name='test-group').exists())
response = c.post('/useradmin/users/edit/group_member', dict(username="group_member", password1="test", password2="test", groups=[]))
assert_false(User.objects.get(username='group_member').groups.filter(name='test-group').exists())
# Check permissions by logging in as the new user
c_reg = make_logged_in_client(username=FUNNY_NAME, password="test")
test_user = User.objects.get(username=FUNNY_NAME)
test_user.groups.add(Group.objects.get(name="test-group"))
test_user.save()
# Regular user should be able to modify oneself
response = c_reg.post('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,),
dict(username = FUNNY_NAME,
first_name = "Hello",
is_active = True))
response = c_reg.get('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,))
assert_equal("Hello", response.context["form"].instance.first_name)
funny_user = User.objects.get(username=FUNNY_NAME)
# Can't edit other people.
response = c_reg.post("/useradmin/users/delete", {u'user_ids': [funny_user.id]})
assert_true("You must be a superuser" in response.content,
"Regular user can't edit other people")
# Revert to regular "test" user, that has superuser powers.
c_su = make_logged_in_client()
# Inactivate FUNNY_NAME
c_su.post('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,),
dict(username = FUNNY_NAME,
first_name = "Hello",
is_active = False))
# Now make sure FUNNY_NAME can't log back in
response = c_reg.get('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,))
assert_true(response.status_code == 302 and "login" in response["location"],
"Inactivated user gets redirected to login page")
# Delete that regular user
funny_profile = get_profile(test_user)
response = c_su.post('/useradmin/users/delete', {u'user_ids': [funny_user.id]})
assert_equal(302, response.status_code)
assert_false(User.objects.filter(username=FUNNY_NAME).exists())
assert_false(UserProfile.objects.filter(id=funny_profile.id).exists())
# Bulk delete users
u1 = User.objects.create(username='u1', password="u1")
u2 = User.objects.create(username='u2', password="u2")
assert_equal(User.objects.filter(username__in=['u1', 'u2']).count(), 2)
response = c_su.post('/useradmin/users/delete', {u'user_ids': [u1.id, u2.id]})
assert_equal(User.objects.filter(username__in=['u1', 'u2']).count(), 0)
# Make sure that user deletion works if the user has never performed a request.
funny_user = User.objects.create(username=FUNNY_NAME, password='test')
assert_true(User.objects.filter(username=FUNNY_NAME).exists())
assert_false(UserProfile.objects.filter(user__username=FUNNY_NAME).exists())
response = c_su.post('/useradmin/users/delete', {u'user_ids': [funny_user.id]})
assert_equal(302, response.status_code)
assert_false(User.objects.filter(username=FUNNY_NAME).exists())
assert_false(UserProfile.objects.filter(user__username=FUNNY_NAME).exists())
# You shouldn't be able to create a user without a password
response = c_su.post('/useradmin/users/new', dict(username="test"))
assert_true("You must specify a password when creating a new user." in response.content)
def test_useradmin_ldap_user_group_membership_sync():
reset = [desktop.conf.AUTH.USER_GROUP_MEMBERSHIP_SYNCHRONIZATION_BACKEND.set_for_testing('desktop.auth.backend.LdapSynchronizationBackend')]
settings.MIDDLEWARE_CLASSES.append('desktop.middleware.UserGroupSynchronizationMiddleware')
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
try:
# Import curly who is part of TestUsers and Test Administrators
import_ldap_users('curly', sync_groups=False, import_by_dn=False)
# Set a password so that we can login
user = User.objects.get(username='curly')
user.set_password('test')
user.save()
# Should have 0 groups
assert_equal(0, user.groups.all().count())
# Make an authenticated request as curly so that we can see call middleware.
c = make_logged_in_client('curly', 'test', is_superuser=False)
grant_access("curly", "test", "useradmin")
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 3 groups now. 2 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
# Now remove a group and try again.
old_group = ldap_access.CACHED_LDAP_CONN._instance.users['curly']['groups'].pop()
# Make an authenticated request as curly so that we can see call middleware.
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 2 groups now. 1 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
finally:
for finish in reset:
finish()
settings.MIDDLEWARE_CLASSES.remove('desktop.middleware.UserGroupSynchronizationMiddleware')
def test_useradmin_ldap_group_integration():
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Import groups only
import_ldap_groups('TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups('TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Should import a group, but will only sync already-imported members
import_ldap_groups('Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 3)
assert_equal(Group.objects.all().count(), 2)
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 2)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups('TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='moe').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups('TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 3)
assert_equal(User.objects.get(username='moe').groups.all().count(), 1)
# Import all members of TestUsers and members of subgroups
import_ldap_groups('TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 4)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups('OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
def test_useradmin_ldap_posix_group_integration():
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Import groups only
import_ldap_groups('PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups('PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Should import a group, but will only sync already-imported members
import_ldap_groups('Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 2, User.objects.all())
assert_equal(Group.objects.all().count(), 2, Group.objects.all())
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 1)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups('PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 1)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups('PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1)
# Import all members of PosixGroup and members of subgroups
import_ldap_groups('PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups('OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
def test_useradmin_ldap_user_integration():
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Try importing a user
import_ldap_users('lårry', sync_groups=False, import_by_dn=False)
larry = User.objects.get(username='lårry')
assert_true(larry.first_name == 'Larry')
assert_true(larry.last_name == 'Stooge')
assert_true(larry.email == '[email protected]')
assert_true(get_profile(larry).creation_method == str(UserProfile.CreationMethod.EXTERNAL))
# Should be a noop
sync_ldap_users()
sync_ldap_groups()
assert_equal(User.objects.all().count(), 1)
assert_equal(Group.objects.all().count(), 0)
# Make sure that if a Hue user already exists with a naming collision, we
# won't overwrite any of that user's information.
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
import_ldap_users('otherguy', sync_groups=False, import_by_dn=False)
hue_user = User.objects.get(username='otherguy')
assert_equal(get_profile(hue_user).creation_method, str(UserProfile.CreationMethod.HUE))
assert_equal(hue_user.first_name, 'Different')
# Try importing a user and sync groups
import_ldap_users('curly', sync_groups=True, import_by_dn=False)
curly = User.objects.get(username='curly')
assert_equal(curly.first_name, 'Curly')
assert_equal(curly.last_name, 'Stooge')
assert_equal(curly.email, '[email protected]')
assert_equal(get_profile(curly).creation_method, str(UserProfile.CreationMethod.EXTERNAL))
assert_equal(2, curly.groups.all().count(), curly.groups.all())
reset_all_users()
reset_all_groups()
# Test import case sensitivity
reset = desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True)
import_ldap_users('Lårry', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Lårry').exists())
assert_true(User.objects.filter(username='lårry').exists())
reset()
def test_add_ldap_users():
URL = reverse(add_ldap_users)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
assert_true(c.get(URL))
response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(username_pattern='*r*', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'], response)
# Test ignore case
reset = desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True)
User.objects.filter(username='moe').delete()
assert_false(User.objects.filter(username='Moe').exists())
assert_false(User.objects.filter(username='moe').exists())
response = c.post(URL, dict(username_pattern='Moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
assert_false(User.objects.filter(username='Moe').exists())
assert_true(User.objects.filter(username='moe').exists())
reset()
def test_add_ldap_groups():
URL = reverse(add_ldap_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client(username='test', is_superuser=True)
assert_true(c.get(URL))
response = c.post(URL, dict(groupname_pattern='TestUsers'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'])
# Test with space
response = c.post(URL, dict(groupname_pattern='Test Administrators'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'], response)
response = c.post(URL, dict(groupname_pattern='toolongnametoolongnametoolongnametoolongnametoolongnametoolongnametoolongnametoolongname'))
assert_true('Ensure this value has at most 80 characters' in response.context['form'].errors['groupname_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(groupname_pattern='*r*'))
assert_true('/useradmin/groups' in response['Location'], response)
def test_sync_ldap_users_groups():
URL = reverse(sync_ldap_users_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
assert_true(c.get(URL))
assert_true(c.post(URL))
def test_ldap_exception_handling():
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
class LdapTestConnectionError(LdapTestConnection):
def find_users(self, user, find_by_dn=False):
raise ldap.LDAPError('No such object')
ldap_access.CACHED_LDAP_CONN = LdapTestConnectionError()
c = make_logged_in_client('test', is_superuser=True)
response = c.post(reverse(add_ldap_users), dict(username_pattern='moe', password1='test', password2='test'), follow=True)
assert_true('There was an error when communicating with LDAP' in response.content, response)
@attr('requires_hadoop')
def test_ensure_home_directory_add_ldap_users():
URL = reverse(add_ldap_users)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
assert_true(c.get(URL))
response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'])
assert_false(cluster.fs.exists('/user/moe'))
# Try same thing with home directory creation.
response = c.post(URL, dict(username_pattern='curly', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0])
assert_false(cluster.fs.exists('/user/bad_name'))
# See if moe, who did not ask for his home directory, has a home directory.
assert_false(cluster.fs.exists('/user/moe'))
# Try wild card now
response = c.post(URL, dict(username_pattern='*r*', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
assert_true(cluster.fs.exists(u'/user/lårry'))
assert_true(cluster.fs.exists('/user/otherguy'))
# Clean up
cluster.fs.rmtree('/user/curly')
cluster.fs.rmtree(u'/user/lårry')
cluster.fs.rmtree('/user/otherguy')
@attr('requires_hadoop')
def test_ensure_home_directory_sync_ldap_users_groups():
URL = reverse(sync_ldap_users_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
response = c.post(reverse(add_ldap_users), dict(username_pattern='curly', password1='test', password2='test'))
assert_false(cluster.fs.exists('/user/curly'))
assert_true(c.post(URL, dict(ensure_home_directory=True)))
assert_true(cluster.fs.exists('/user/curly'))
@attr('requires_hadoop')
def test_ensure_home_directory():
reset_all_users()
reset_all_groups()
# Cluster and client for home directory creation
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True, groupname='test1')
cluster.fs.setuser(cluster.superuser)
# Create a user with a home directory
assert_false(cluster.fs.exists('/user/test1'))
response = c.post('/useradmin/users/new', dict(username="test1", password1='test', password2='test', ensure_home_directory=True))
assert_true(cluster.fs.exists('/user/test1'))
dir_stat = cluster.fs.stats('/user/test1')
assert_equal('test1', dir_stat.user)
assert_equal('test1', dir_stat.group)
assert_equal('40755', '%o' % dir_stat.mode)
# Create a user, then add their home directory
assert_false(cluster.fs.exists('/user/test2'))
response = c.post('/useradmin/users/new', dict(username="test2", password1='test', password2='test'))
assert_false(cluster.fs.exists('/user/test2'))
response = c.post('/useradmin/users/edit/%s' % "test2", dict(username="test2", password1='test', password2='test', ensure_home_directory=True))
assert_true(cluster.fs.exists('/user/test2'))
dir_stat = cluster.fs.stats('/user/test2')
assert_equal('test2', dir_stat.user)
assert_equal('test2', dir_stat.group)
assert_equal('40755', '%o' % dir_stat.mode)
| apache-2.0 | 4,844,797,381,928,160,000 | 45.886047 | 283 | 0.693244 | false |
jakbob/guitarlegend | gamescene.py | 1 | 13650 | # -*- coding: utf-8 -*-
#
# gamescene.py
# Defines the behaviour of the actual game scene
#
# (c) Jakob Florell and Jonne Mickelin 2009
import error
####################
# Standard library #
####################
import math
import heapq
import os
####################
# Required Modules #
####################
import pyglet
from pyglet.gl import *
from pyglet.graphics import vertex_list
####################
# Game modules #
####################
import scene
import options
import tab
import wonderful
import graphics
import particlesystem
from manager import game_manager
the_danger_point = 100 #the point where the notes should be played
def midify(f):
"""
Returns the midi keycode for given frequency.
Could probably be more optimized but this will have to do
for now.
"""
n = round(69.0 + 12.0 * math.log(f / 440.0, 2))
return int(n)
def uniq(iterable):
N = options.DFT_SIZE
SAMPLE_RATE = options.SAMPLE_RATE
rets = {}
for (num, amp) in iterable:
try:
p = midify(num * float(SAMPLE_RATE)/N)
if p < 0:
raise OverflowError("Bajs")
except OverflowError:
continue
if p in rets:
if rets[p] < amp:
rets[p] = amp
else:
rets[p] = amp
return rets
def get_note_numbers(mag_list):
N = options.DFT_SIZE
MAG_THRESHOLD = options.MAG_THRESHOLD
if mag_list is not None:
assert mag_list[:N/2][0] == mag_list[0]
note_numbers = uniq(enumerate(mag_list[:N/2]))
return [(p, note_numbers[p]) for p in get_largest(note_numbers) if note_numbers[p] > MAG_THRESHOLD and p > 70 * float(N) / options.SAMPLE_RATE]
else:
return None
def get_largest(l):
largest = heapq.nlargest(6, l, key=(lambda key: l[key]))
return largest
def get_sound():
mag_list = wonderful.munch()
return get_note_numbers(mag_list)
#try:
# while True:
# t = time.clock()
# s = get_sound()
# if s is not None:
# print s, "\t\tin", t-lasttime, "seconds"
# lasttime = t
#
#except KeyboardInterrupt:
# pass
class GameScene(scene.TestScene):
"""In this scene we test things. Mostly notes"""
def __init__(self, soundfile, midifile):
self.name = "Ingame"
self._setup_graphics()
self._load_file(soundfile, midifile)
self.particles = particlesystem.ParticleSystem(velfactor=50)
def end(self):
try:
self.music.stop()
except ValueError:
pass #det blir knas ibland när låten stoppas 2 ggr
def on_resize(self, width, height):
# Perspective
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
## glOrtho(-width/2., width/2., -height/2., height/2., 0, 1000) # I should save this snippet somewhere else
gluPerspective(30, width / float(height), .1, 10000)
glMatrixMode(GL_MODELVIEW)
#self.camera.focus(width, height)
def game_draw(self, window):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # We need to set a default scene clearcolor.
# How about on_switch_to and on_witch_from functions?
glPushMatrix()
glLoadIdentity()
glEnable(GL_DEPTH_TEST)
glTranslatef(-window.width/2.0 + 100,
-self.guitar_neck.height/2.0,
-900.0)# Ugly magic number.
self.pointmeter.draw()
self.pointprocent.draw()
self.stringlabels.draw()
# Draw the notes rotated, as per the user's preferences
glRotatef(options.notes_x_rot, 1.0, 0.0, 0.0)
glRotatef(options.notes_y_rot, 0.0, 1.0, 0.0)
glRotatef(options.notes_z_rot, 0.0, 0.0, 1.0)
# Graphics of guitar neck in background?
self.guitar_neck.blit(0, 0)
glTranslatef(0, 0, 1.0)
self.note_batch.draw()
glTranslatef(0, 0, 1.0)
# The labels are also drawn rotated, which makes them less readable.
# I'll work on improving this, when I have time.
self.label_batch.draw()
glTranslatef(0, 0, 1.0)
self.deathbar.draw()
glDisable(GL_DEPTH_TEST)
#Try to uncomment this and see why it is commented out.
#There is something wrong with the particles, and I don't have
#the orc to find out what. particlesystem.py is like an italian
#restaurant.
#glLoadIdentity()
#glEnable(GL_DEPTH_TEST)
#glTranslatef(-window.width/2.0 + 100,
#-self.guitar_neck.height/2.0,
#-880.0) # Ugly magic number.
#self.particles.draw()
glDisable(GL_DEPTH_TEST)
glPopMatrix()
#self.camera.hud_mode(window.width, window.height)
def do_logic(self, dt):
# The progress of the notes is synchronized with the background song.
t = self.music.time
self._check_tempochange(t)
#let's see if it works
if t == self.lasttime:
delta_time = self.lastdelta #move a little
self.offsync += self.lastdelta
else:
delta_time = t - self.lasttime
if self.offsync > delta_time / 3:
delta_time -= self.offsync / 3
self.offsync -= self.offsync / 3
else:
delta_time -= self.offsync
self.offsync = 0.0
try:
self._update_notes(delta_time)
except IndexError:
pass
self.particles.update(delta_time)
self.lasttime = t
self.lastdelta = delta_time
in_notes = get_sound()
if in_notes is not None:
#print in_notes
self._compare_notes([p for (p, mag) in in_notes])
def _setup_graphics(self):
#äcklig grå färg
#glClearColor(0x4b/255.0, 0x4b/255.0, 0x4b/255.0, 0)
#svart:
glClearColor(0,0,0,0)
glClearDepth(1.0) # Prepare for 3d. Actually, this might as well
# be in on_resize, no? Or maybe not. I don't know.
glDepthFunc(GL_LEQUAL) # Change the z-priority or whatever one should call it
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
glHint(GL_POINT_SMOOTH_HINT, GL_NICEST)
glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST)
def _load_file(self, soundfile, midifile):
print "Loading File. Please wait."
self.tab = tab.Tab(midifile)
self.note_batch = pyglet.graphics.Batch()
self.label_batch = pyglet.graphics.Batch()
#load important images
self.guitar_neck = graphics.create_guitar_texture(3000)
img = pyglet.image.load(
os.path.join(options.data_dir, "thingy.png"))
self.deathbar = pyglet.sprite.Sprite(img,
the_danger_point + img.width / 2, 0)
#make important label
self.stringlabels = pyglet.graphics.Batch()
y = lambda string: (6 - string) / 6.0 * self.guitar_neck.height + 3.5
pyglet.text.Label("E", x=-15, y=y(1), batch=self.stringlabels)
pyglet.text.Label("B", x=-15, y=y(2), batch=self.stringlabels)
pyglet.text.Label("G", x=-15, y=y(3), batch=self.stringlabels)
pyglet.text.Label("D", x=-15, y=y(4), batch=self.stringlabels)
pyglet.text.Label("A", x=-15, y=y(5), batch=self.stringlabels)
pyglet.text.Label("E", x=-15, y=y(6), batch=self.stringlabels)
#do points stuff
self.points = 0
self.nonpoints = 0
self.pointmeter = pyglet.text.Label(str(self.points), font_size = 20,
bold = True, anchor_x = "right", anchor_y = "top",
color = (200, 200, 20, 255),
x = 560, y = 270) #äckliga magiska konstanter men jag pallarnte
self.pointprocent = pyglet.text.Label("100%", font_size = 20,
bold = True, anchor_x = "right", anchor_y = "top",
color = (200, 200, 20, 255),
x = self.pointmeter.x, y = self.pointmeter.y - 40)
# Create the textures for all the notes
self.death_notes = [] # Graphics for all notes, active or inactive
for note in self.tab.all_notes:
x = the_danger_point + (note.start) * graphics.quarterlen / self.tab.ticksPerQuarter
y = (6 - note.string) / 6.0 * self.guitar_neck.height + 3.5 # 2 is calibration
notegraphic = graphics.DeathNote(note, self.tab.ticksPerQuarter,
x=x, y=y, batch=None)
self.death_notes.append(notegraphic)
# Only a fixed number of notes are moved across the screen at once, to
# improve performance
self.notecounter = 20 # Number of notes that will be active
self.active_sprites = self.death_notes[:self.notecounter]
for note in self.active_sprites:
note.sprite.batch = self.note_batch
note.label.begin_update()
note.label.batch = self.label_batch
note.label.end_update()
self.the_edge = [] #notes that are to be played
self.temponr = 0
self.tempo = self.tab.tempo[self.temponr][1] #välj första tempot
music = pyglet.media.load(soundfile)
self.music = pyglet.media.StaticSource(music).play()
self.lasttime = self.music.time # The position in the song in the last frame
self.offsync = 0
self.lastdelta = 0 #holds the last delta_time, used for smooth movement
self.music._old_eos = self.music._on_eos
def on_music_eos():
self.music._old_eos()
game_manager.pop()
self.music._on_eos = on_music_eos #misstänker bugg i pyglet
def _check_tempochange(self, t):
"""Check if there are more changes in tempo and if
it is time for such a change. In that case, do the change."""
# Tempo change position is in microseconds
if len(self.tab.tempo) - 1 < self.temponr \
and self.tab.tempo[self.temponr + 1][0] <= t*1000000:
self.temponr += 1
self.tempo = self.tab.tempo[self.temponr][1]
def _update_notes(self, dt):
"""Make sure there are notes to update, and update them"""
if self.active_sprites:
self._update_active_notes(dt)
if self.notecounter < len(self.death_notes):
self._set_active_notes(dt)
#self.check_whos_playing([]) #såhär nånting?
def _update_active_notes(self, dt):
self.the_edge = [] #
# Update only active notes
for note in self.active_sprites:
# Movement during one second
#vel = graphics.quarterlen * 1000000 / float(self.tempo) # Tempo is in microseconds
note.update(dt, self.tempo)
if note.sprite.x < the_danger_point + 10 and \
note.sprite.x + note.sprite.width > the_danger_point - 10:
self.the_edge.append(note)
def _set_active_notes(self, dt):
# Kill the notes that have travelled far enough. This distance
# used to be the screen width, but this does not apply when it's tilted
if (self.active_sprites[0].sprite.x \
+ self.active_sprites[0].sprite.width) < -100: # A little bit of margin
self.active_sprites[0].die()
self.active_sprites.pop(0)
# At the same time, we add new notes at the end once the last
# currently active note is supposed to appear on screen.
# Again, this is not the same anymore.
if self.active_sprites[-1].sprite.x < (options.window_width + 500):
# Alternatively, one should store the length of the longest notes
# This could cause bugs if the last note is longer than window_width + 200
# Recall that self.notecounter is the index of the next note currently not on screen.
note = self.death_notes[self.notecounter]
# Put it at the correct distance behind the last note
note.sprite.x = self.active_sprites[-1].sprite.x \
+ (note.note.start - self.active_sprites[-1].note.start) \
* graphics.quarterlen / self.tab.ticksPerQuarter
# Add the note and it's label to the batches
note.sprite.batch = self.note_batch
note.label.begin_update()
note.label.batch = self.label_batch
note.label.end_update()
self.active_sprites.append(note)
self.notecounter += 1
def _compare_notes(self, notes_played):
if notes_played is None:
return
for note in self.the_edge:
if note.note.pitch in notes_played:
note.is_played()
#give points
self.points += 1 # This sounds bad
self.pointmeter.text = str(self.points)
self.particles.explode(pos=(note.sprite.x,
note.sprite.y, 0))
else:
note.missed()
self.nonpoints += 1
try:
procent = float(self.points) / (self.points + self.nonpoints)
except ZeroDivisionError:
procent = 1.0
procent = int(procent * 100)
self.pointprocent.text = str(procent) + "%"
| gpl-3.0 | 8,430,830,132,402,125,000 | 35.271277 | 151 | 0.568412 | false |
mediatum/mediatum | workflow/addtofolder.py | 1 | 4475 | # -*- coding: utf-8 -*-
"""
mediatum - a multimedia content repository
Copyright (C) 2011 Arne Seifert <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from .upload import WorkflowStep
from .workflow import registerStep
from core.translation import t, addLabels
from utils.utils import isNumeric
from core import Node
from core import db
import json
from schema.schema import Metafield
from contenttypes.container import Directory
q = db.query
def register():
registerStep("workflowstep_addtofolder")
addLabels(WorkflowStep_AddToFolder.getLabels())
class WorkflowStep_AddToFolder(WorkflowStep):
"""
workflowstep that adds item to selectable subfolder.
attributes:
- destination: list of node ids ;-separated
- subfolder: list of subfolders below destination, if a subfolder exists the item is added and the remaining
subfolders are ignored
subfolders are specified as json-string and can contain metadata from the item, like:
["{faculty}/Prüfungsarbeiten/{type}en/", "{faculty}/Prüfungsarbeiten/Weitere Prüfungsarbeiten/"]
"""
def show_workflow_node(self, node, req):
return self.forwardAndShow(node, True, req)
def getFolder(self, node, destNode, subfolder):
"""
search the subfolder below destNode
:param node: node which should be placed in the subfolder, parts of the node attributes may be specified
in subfolder
:param destNode: destination Node under which the subfolder is searched
:param subfolder: directorypath to the subfolder below destNode like: "{faculty}/Prüfungsarbeiten/{type}en/"
:return: returns the node if the subfolder if found or None
"""
subfolderNode = destNode
for subdir in subfolder.format(**node.attrs).split("/"):
if not subdir:
continue
subfolderNode = subfolderNode.children.filter_by(name=subdir).scalar()
if not subfolderNode:
return None
return subfolderNode
def runAction(self, node, op=""):
subfolders = json.loads(self.get('destination_subfolder'))
for nid in self.get('destination').split(";"):
if not nid:
continue
destNode = q(Node).get(nid)
if not destNode:
continue
for subfolder in subfolders:
subfolderNode = self.getFolder(node, destNode, subfolder)
if not subfolderNode:
continue
subfolderNode.children.append(node)
db.session.commit()
break
def metaFields(self, lang=None):
ret = []
field = Metafield("destination")
field.set("label", t(lang, "admin_wfstep_addtofolder_destination"))
field.set("type", "treeselect")
ret.append(field)
field = Metafield("destination_subfolder")
field.set("label", t(lang, "admin_wfstep_addtofolder_destination_subfolder"))
field.set("type", "text")
ret.append(field)
return ret
@staticmethod
def getLabels():
return {"de":
[
("workflowstep-addtofolder", "Zu einem Verzeichnis hinzufügen"),
("admin_wfstep_addtofolder_destination", "Zielknoten-ID"),
("admin_wfstep_addtofolder_destination_subfolder", "Unterverzeichnis"),
],
"en":
[
("workflowstep-addtofolder", "add to folder"),
("admin_wfstep_addtofolder_destination", "ID of destination node"),
("admin_wfstep_addtofolder_destination_subfolder", "sub folder"),
]
}
| gpl-3.0 | 92,644,555,041,265,800 | 36.25 | 121 | 0.625056 | false |
greggian/TapdIn | django/core/handlers/base.py | 1 | 9267 | import sys
from django import http
from django.core import signals
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
dot = middleware_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured, '%s isn\'t a middleware module' % middleware_path
mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured, 'Error importing middleware %s: "%s"' % (mw_module, e)
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured, 'Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname)
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
return response
# Get urlconf from request object, if available. Otherwise use default.
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
return response
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception, e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
return response
raise
# Complain if the view returned None (a common error).
if response is None:
try:
view_name = callback.func_name # If it's a function
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__' # If it's a class
raise ValueError, "The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name)
return response
except http.Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
return callback(request, **param_dict)
except:
try:
return self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
except exceptions.PermissionDenied:
return http.HttpResponseForbidden('<h1>Permission denied</h1>')
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
exc_info = sys.exc_info()
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
return self.handle_uncaught_exception(request, resolver, exc_info)
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
from django.core.mail import mail_admins
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
# When DEBUG is False, send an error message to the admins.
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (self._get_traceback(exc_info), request_repr)
mail_admins(subject, message, fail_silently=True)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def _get_traceback(self, exc_info=None):
"Helper function to return the traceback as a string"
import traceback
return '\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless DJANGO_USE_POST_REWRITE is set (to
anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_unicode(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every webserver (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', u'')
if not script_url:
script_url = environ.get('REDIRECT_URL', u'')
if script_url:
return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_unicode(environ.get('SCRIPT_NAME', u''))
| apache-2.0 | -8,968,178,709,805,447,000 | 42.985437 | 143 | 0.604726 | false |
luksan/kodos | modules/urlDialog.py | 1 | 1874 | # -*- coding: utf-8; mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; truncate-lines: 0 -*-
# vi: set fileencoding=utf-8 filetype=python expandtab tabstop=4 shiftwidth=4 softtabstop=4 cindent:
# :mode=python:indentSize=4:tabSize=4:noTabs=true:
#-----------------------------------------------------------------------------#
# Built-in modules
import urllib
#-----------------------------------------------------------------------------#
# Installed modules
from PyQt4 import QtGui, QtCore
#-----------------------------------------------------------------------------#
# Kodos modules
from .urlDialogBA import Ui_URLDialogBA
from . import help
#-----------------------------------------------------------------------------#
class URLDialog(QtGui.QDialog, Ui_URLDialogBA):
urlImported = QtCore.pyqtSignal(str, str)
def __init__(self, url=None, parent=None, f=QtCore.Qt.WindowFlags()):
QtGui.QDialog.__init__(self, parent, f)
self.setupUi(self)
if url:
self.URLTextEdit.setPlainText(url)
self.show()
return
def help_slot(self):
self.helpWindow = help.Help(self, "importURL.html")
return
def ok_slot(self):
url = str(self.URLTextEdit.toPlainText())
try:
fp = urllib.urlopen(url)
lines = fp.readlines()
except Exception as e:
QtGui.QMessageBox.information(
None,
"Failed to open URL",
"Could not open the specified URL. Please check to ensure \
that you have entered the correct URL.\n\n{0}".format(str(e))
)
return
html = ''.join(lines)
self.urlImported.emit(html, url)
self.accept()
return
#-----------------------------------------------------------------------------#
| gpl-2.0 | -2,844,676,749,710,994,000 | 28.28125 | 112 | 0.47492 | false |
luzhuomi/collamine-client-python | scrapybot/scrapybot/spiders/hwz.py | 1 | 1627 | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from scrapybot.items import ScrapybotItem
from scrapybot.utils import normalizeFriendlyDate
import datetime
from dateutil.parser import parse
from django.utils import timezone
import django.db.utils
class HwzSpider(CrawlSpider):
name = "hwz"
allowed_domains = ["hardwarezone.com.sg"]
domain = "www.hardwarezone.com" # for collamine upload
start_urls = [
"http://forums.hardwarezone.com.sg/current-affairs-lounge-17/",
"http://forums.hardwarezone.com.sg/money-mind-210/"
]
rules = (
Rule(SgmlLinkExtractor(allow=('current\-affairs\-lounge\-17/.*\.html', )), callback='parse_item', follow=True),
Rule(SgmlLinkExtractor(allow=('money\-mind\-210/.*\.html', )), callback='parse_item', follow=True),
)
"""
When writing crawl spider rules, avoid using parse as callback, since the CrawlSpider uses the parse method itself to implement its logic. So if you override the parse method, the crawl spider will no longer work.
"""
def parse_item(self, response):
source="original"
if ((response.flags) and ("collamine" in response.flags)):
source="collamine"
i = ScrapybotItem(url=response.url,
domain=self.allowed_domains[0],
source=source,
content=response.body.decode(response.encoding),
crawled_date=timezone.now())
try:
i.save()
except django.db.utils.IntegrityError:
print "url exists"
| apache-2.0 | -3,216,291,927,608,126,000 | 35.977273 | 217 | 0.690227 | false |
oudalab/phyllo | phyllo/extractors/germanicusDB.py | 1 | 3002 | import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
def main():
# The collection URL below.
collURL = 'http://www.thelatinlibrary.com/germanicus.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = collSOUP.title.string.split(":")[0].strip()
colltitle = collSOUP.p.string.split(":")[0].strip()
date = "no date found"
textsURL = [collURL]
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Germanicus'")
for url in textsURL:
chapter = "-1"
verse = 0
title = collSOUP.title.string.split(":")[1].strip()
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
getp = textsoup.find_all('p')
for p in getp:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
txtstr = p.get_text().strip()
if txtstr.startswith("The"):
continue
brtags = p.findAll('br')
verses = []
try:
firstline = brtags[0].previous_sibling.previous_sibling.strip()
except:
firstline = brtags[0].previous_sibling.strip()
verses.append(firstline)
for br in brtags:
try:
text = br.next_sibling.next_sibling.strip()
except:
text = br.next_sibling.strip()
if text is None or text == '' or text.isspace():
continue
# remove in-text line numbers
if text.endswith(r'[0-9]+'):
try:
text = text.split(r'[0-9]')[0].strip()
except:
pass
verses.append(text)
for v in verses:
# verse number assignment.
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v, url, 'poetry'))
if __name__ == '__main__':
main()
| apache-2.0 | -921,874,890,846,832,400 | 34.738095 | 111 | 0.476682 | false |
bird-house/esgf-pyclient | pyesgf/test/test_context.py | 1 | 7082 | """
Test the SearchContext class
"""
from pyesgf.search import SearchConnection, not_equals
from unittest import TestCase
import os
class TestContext(TestCase):
def setUp(self):
self.test_service = 'http://esgf-index1.ceda.ac.uk/esg-search'
self.cache = os.path.join(os.path.dirname(__file__), 'url_cache')
def test_context_freetext(self):
conn = SearchConnection(self.test_service, cache=self.cache)
context = conn.new_context(query="temperature")
assert context.freetext_constraint == "temperature"
def test_context_facets2(self):
conn = SearchConnection(self.test_service, cache=self.cache)
context = conn.new_context(project='CMIP5')
context2 = context.constrain(model="IPSL-CM5A-LR")
assert context2.facet_constraints['project'] == 'CMIP5'
assert context2.facet_constraints['model'] == 'IPSL-CM5A-LR'
def test_context_facets_multivalue(self):
conn = SearchConnection(self.test_service, cache=self.cache)
context = conn.new_context(project='CMIP5')
context2 = context.constrain(model=['IPSL-CM5A-LR', 'IPSL-CM5A-MR'])
assert context2.hit_count > 0
assert context2.facet_constraints['project'] == 'CMIP5'
assert sorted(context2
.facet_constraints
.getall('model')) == ['IPSL-CM5A-LR', 'IPSL-CM5A-MR']
def test_context_facet_multivalue2(self):
conn = SearchConnection(self.test_service, cache=self.cache)
context = conn.new_context(project='CMIP5', model='IPSL-CM5A-MR')
assert context.facet_constraints.getall('model') == ['IPSL-CM5A-MR']
context2 = context.constrain(model=['IPSL-CM5A-MR', 'IPSL-CM5A-LR'])
assert sorted(context2
.facet_constraints
.getall('model')) == ['IPSL-CM5A-LR', 'IPSL-CM5A-MR']
def test_context_facet_multivalue3(self):
conn = SearchConnection(self.test_service, cache=self.cache)
ctx = conn.new_context(project='CMIP5', query='humidity',
experiment='rcp45')
hits1 = ctx.hit_count
assert hits1 > 0
ctx2 = conn.new_context(project='CMIP5', query='humidity',
experiment=['rcp45', 'rcp85'])
hits2 = ctx2.hit_count
assert hits2 > hits1
def test_context_facet_options(self):
conn = SearchConnection(self.test_service, cache=self.cache)
context = conn.new_context(project='CMIP5', model='IPSL-CM5A-LR',
ensemble='r1i1p1', experiment='rcp60',
realm='seaIce')
expected = sorted([u'access', u'index_node', u'data_node', u'format',
u'cf_standard_name', u'variable_long_name',
u'cmor_table', u'time_frequency', u'variable'])
assert sorted(context.get_facet_options().keys()) == expected
def test_context_facets3(self):
conn = SearchConnection(self.test_service, cache=self.cache)
context = conn.new_context(project='CMIP5')
context2 = context.constrain(model="IPSL-CM5A-LR")
results = context2.search()
result = results[0]
assert result.json['project'] == ['CMIP5']
assert result.json['model'] == ['IPSL-CM5A-LR']
def test_facet_count(self):
conn = SearchConnection(self.test_service, cache=self.cache)
context = conn.new_context(project='CMIP5')
context2 = context.constrain(model="IPSL-CM5A-LR")
counts = context2.facet_counts
assert list(counts['model'].keys()) == ['IPSL-CM5A-LR']
assert list(counts['project'].keys()) == ['CMIP5']
def test_distrib(self):
conn = SearchConnection(self.test_service, cache=self.cache,
distrib=False)
context = conn.new_context(project='CMIP5')
count1 = context.hit_count
conn2 = SearchConnection(self.test_service, cache=self.cache,
distrib=True)
context = conn2.new_context(project='CMIP5')
count2 = context.hit_count
assert count1 < count2
def test_constrain(self):
conn = SearchConnection(self.test_service, cache=self.cache)
context = conn.new_context(project='CMIP5')
count1 = context.hit_count
context = context.constrain(model="IPSL-CM5A-LR")
count2 = context.hit_count
assert count1 > count2
def test_constrain_freetext(self):
conn = SearchConnection(self.test_service, cache=self.cache)
context = conn.new_context(project='CMIP5', query='humidity')
assert context.freetext_constraint == 'humidity'
context = context.constrain(experiment='historical')
assert context.freetext_constraint == 'humidity'
def test_constrain_regression1(self):
conn = SearchConnection(self.test_service, cache=self.cache)
context = conn.new_context(project='CMIP5', model='IPSL-CM5A-LR')
assert 'experiment' not in context.facet_constraints
context2 = context.constrain(experiment='historical')
assert 'experiment' in context2.facet_constraints
def test_negative_facet(self):
conn = SearchConnection(self.test_service, cache=self.cache)
context = conn.new_context(project='CMIP5', model='IPSL-CM5A-LR')
hits1 = context.hit_count
print(context.facet_counts['experiment'])
context2 = context.constrain(experiment='historical')
hits2 = context2.hit_count
context3 = context.constrain(experiment=not_equals('historical'))
hits3 = context3.hit_count
assert hits1 == hits2 + hits3
def test_replica(self):
# Test that we can exclude replicas
# This tests assumes the test dataset is replicated
conn = SearchConnection(self.test_service, cache=self.cache)
qry = 'id:cmip5.output1.MOHC.HadGEM2-ES.rcp45.mon.atmos.Amon.r1i1p1.*'
version = '20111128'
# Search for all replicas
context = conn.new_context(query=qry, version=version)
assert context.hit_count == 2
# Search for only one replicant
context = conn.new_context(query=qry, replica=False, version=version)
assert context.hit_count == 1
def test_response_from_bad_parameter(self):
# Test that a bad parameter name raises a useful exception
# NOTE::: !!! This would fail because urllib2 HTTP query is overridden
# !!! with
# !!! cache handler instead of usual response.
# !!! So catch other error instead
conn = SearchConnection(self.test_service, cache=self.cache)
context = conn.new_context(project='CMIP5', rubbish='nonsense')
try:
context.hit_count
except Exception as err:
assert str(err).strip() in ("Invalid query parameter(s): rubbish",
"No JSON object could be decoded")
| bsd-3-clause | 8,450,543,210,295,021,000 | 37.48913 | 78 | 0.617905 | false |
kubernetes-client/python | kubernetes/client/models/v1_job_spec.py | 1 | 13937 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1JobSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'active_deadline_seconds': 'int',
'backoff_limit': 'int',
'completions': 'int',
'manual_selector': 'bool',
'parallelism': 'int',
'selector': 'V1LabelSelector',
'template': 'V1PodTemplateSpec',
'ttl_seconds_after_finished': 'int'
}
attribute_map = {
'active_deadline_seconds': 'activeDeadlineSeconds',
'backoff_limit': 'backoffLimit',
'completions': 'completions',
'manual_selector': 'manualSelector',
'parallelism': 'parallelism',
'selector': 'selector',
'template': 'template',
'ttl_seconds_after_finished': 'ttlSecondsAfterFinished'
}
def __init__(self, active_deadline_seconds=None, backoff_limit=None, completions=None, manual_selector=None, parallelism=None, selector=None, template=None, ttl_seconds_after_finished=None, local_vars_configuration=None): # noqa: E501
"""V1JobSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._active_deadline_seconds = None
self._backoff_limit = None
self._completions = None
self._manual_selector = None
self._parallelism = None
self._selector = None
self._template = None
self._ttl_seconds_after_finished = None
self.discriminator = None
if active_deadline_seconds is not None:
self.active_deadline_seconds = active_deadline_seconds
if backoff_limit is not None:
self.backoff_limit = backoff_limit
if completions is not None:
self.completions = completions
if manual_selector is not None:
self.manual_selector = manual_selector
if parallelism is not None:
self.parallelism = parallelism
if selector is not None:
self.selector = selector
self.template = template
if ttl_seconds_after_finished is not None:
self.ttl_seconds_after_finished = ttl_seconds_after_finished
@property
def active_deadline_seconds(self):
"""Gets the active_deadline_seconds of this V1JobSpec. # noqa: E501
Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer # noqa: E501
:return: The active_deadline_seconds of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._active_deadline_seconds
@active_deadline_seconds.setter
def active_deadline_seconds(self, active_deadline_seconds):
"""Sets the active_deadline_seconds of this V1JobSpec.
Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer # noqa: E501
:param active_deadline_seconds: The active_deadline_seconds of this V1JobSpec. # noqa: E501
:type: int
"""
self._active_deadline_seconds = active_deadline_seconds
@property
def backoff_limit(self):
"""Gets the backoff_limit of this V1JobSpec. # noqa: E501
Specifies the number of retries before marking this job failed. Defaults to 6 # noqa: E501
:return: The backoff_limit of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._backoff_limit
@backoff_limit.setter
def backoff_limit(self, backoff_limit):
"""Sets the backoff_limit of this V1JobSpec.
Specifies the number of retries before marking this job failed. Defaults to 6 # noqa: E501
:param backoff_limit: The backoff_limit of this V1JobSpec. # noqa: E501
:type: int
"""
self._backoff_limit = backoff_limit
@property
def completions(self):
"""Gets the completions of this V1JobSpec. # noqa: E501
Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
:return: The completions of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._completions
@completions.setter
def completions(self, completions):
"""Sets the completions of this V1JobSpec.
Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
:param completions: The completions of this V1JobSpec. # noqa: E501
:type: int
"""
self._completions = completions
@property
def manual_selector(self):
"""Gets the manual_selector of this V1JobSpec. # noqa: E501
manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector # noqa: E501
:return: The manual_selector of this V1JobSpec. # noqa: E501
:rtype: bool
"""
return self._manual_selector
@manual_selector.setter
def manual_selector(self, manual_selector):
"""Sets the manual_selector of this V1JobSpec.
manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector # noqa: E501
:param manual_selector: The manual_selector of this V1JobSpec. # noqa: E501
:type: bool
"""
self._manual_selector = manual_selector
@property
def parallelism(self):
"""Gets the parallelism of this V1JobSpec. # noqa: E501
Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
:return: The parallelism of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._parallelism
@parallelism.setter
def parallelism(self, parallelism):
"""Sets the parallelism of this V1JobSpec.
Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
:param parallelism: The parallelism of this V1JobSpec. # noqa: E501
:type: int
"""
self._parallelism = parallelism
@property
def selector(self):
"""Gets the selector of this V1JobSpec. # noqa: E501
:return: The selector of this V1JobSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1JobSpec.
:param selector: The selector of this V1JobSpec. # noqa: E501
:type: V1LabelSelector
"""
self._selector = selector
@property
def template(self):
"""Gets the template of this V1JobSpec. # noqa: E501
:return: The template of this V1JobSpec. # noqa: E501
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""Sets the template of this V1JobSpec.
:param template: The template of this V1JobSpec. # noqa: E501
:type: V1PodTemplateSpec
"""
if self.local_vars_configuration.client_side_validation and template is None: # noqa: E501
raise ValueError("Invalid value for `template`, must not be `None`") # noqa: E501
self._template = template
@property
def ttl_seconds_after_finished(self):
"""Gets the ttl_seconds_after_finished of this V1JobSpec. # noqa: E501
ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature. # noqa: E501
:return: The ttl_seconds_after_finished of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._ttl_seconds_after_finished
@ttl_seconds_after_finished.setter
def ttl_seconds_after_finished(self, ttl_seconds_after_finished):
"""Sets the ttl_seconds_after_finished of this V1JobSpec.
ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature. # noqa: E501
:param ttl_seconds_after_finished: The ttl_seconds_after_finished of this V1JobSpec. # noqa: E501
:type: int
"""
self._ttl_seconds_after_finished = ttl_seconds_after_finished
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1JobSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1JobSpec):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 | 6,172,444,438,612,771,000 | 43.244444 | 685 | 0.664705 | false |
andrewjpage/gubbins | python/scripts/gubbins_drawer.py | 1 | 26242 | #!/usr/bin/env python3
#################################
# Import some necessary modules #
#################################
import argparse
import pkg_resources
from Bio.Nexus import Trees, Nodes
from Bio.Graphics.GenomeDiagram._Colors import ColorTranslator
from Bio.GenBank import Scanner
from Bio.GenBank import _FeatureConsumer
from Bio.GenBank.utils import FeatureValueCleaner
from reportlab.lib.units import inch
from reportlab.lib import pagesizes
from reportlab.graphics.shapes import *
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics import renderPDF
################################
# Get the command line options #
################################
def main():
parser = argparse.ArgumentParser(description='Gubbins Drawer creates a PDF with a tree on one side and the recombination regions plotted on the reference space on the other side. An interactive version can be found at https://sanger-pathogens.github.io/phandango/', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('tree', help='Tree in Newick format, such as XXXX.final_tree.tre')
parser.add_argument('embl_file', help='EMBL file, such as XXXX.recombination_predictions.embl')
parser.add_argument( '-o', '--outputfile', help='Output PDF filename', default = 'gubbins_recombinations.pdf')
return parser.parse_args()
##########################################################
# Function to read an alignment whichever format it's in #
##########################################################
def tab_parser(handle, quiet=False):
def Drawer_parse_tab_features(object, skip=False):
features = []
line = object.line
while True:
if not line:
break
raise ValueError("Premature end of line during features table")
if line[:object.HEADER_WIDTH].rstrip() in object.SEQUENCE_HEADERS:
if object.debug : print("Found start of sequence")
break
line = line.rstrip()
if line == "//":
raise ValueError("Premature end of features table, marker '//' found")
if line in object.FEATURE_END_MARKERS:
if object.debug : print("Found end of features")
line = object.handle.readline()
break
if line[2:object.FEATURE_QUALIFIER_INDENT].strip() == "":
print(line[2:object.FEATURE_QUALIFIER_INDENT].strip())
raise ValueError("Expected a feature qualifier in line '%s'" % line)
if skip:
line = object.handle.readline()
while line[:object.FEATURE_QUALIFIER_INDENT] == object.FEATURE_QUALIFIER_SPACER:
line = object.handle.readline()
else:
#Build up a list of the lines making up this feature:
feature_key = line[2:object.FEATURE_QUALIFIER_INDENT].strip()
feature_lines = [line[object.FEATURE_QUALIFIER_INDENT:]]
line = object.handle.readline()
while line[:object.FEATURE_QUALIFIER_INDENT] == object.FEATURE_QUALIFIER_SPACER or line.rstrip() == "" : # cope with blank lines in the midst of a feature
feature_lines.append(line[object.FEATURE_QUALIFIER_INDENT:].rstrip())
line = object.handle.readline()
if len(line)==0:
break#EOF
feature_lines.append('/seq="N"')
sys.stdout.flush()
features.append(object.parse_feature(feature_key, feature_lines))
object.line = line
return features
def Drawer_feed(object, handle, consumer, do_features=True):
if do_features:
object._feed_feature_table(consumer, Drawer_parse_tab_features(object,skip=False))
else:
Drawer_parse_tab_features(object,skip=True) # ignore the data
sequence_string="N"
consumer.sequence(sequence_string)
consumer.record_end("//")
length=0
for record in consumer.data.features:
if record.location.nofuzzy_end>length:
length=record.location.nofuzzy_end
consumer.data.seq="N"*length
return True
myscanner=Scanner.InsdcScanner()
myscanner.set_handle(handle)
myscanner.line=myscanner.handle.readline()
myscanner.FEATURE_QUALIFIER_INDENT=21
myscanner.FEATURE_QUALIFIER_SPACER = "FT" + " " * (myscanner.FEATURE_QUALIFIER_INDENT-2)
myscanner.debug=True
consumer = _FeatureConsumer(use_fuzziness = 1, feature_cleaner = FeatureValueCleaner())
Drawer_feed(myscanner, handle, consumer)
return consumer.data
####################################################
# Function to round floats to n significant digits #
####################################################
def round_to_n(x, n):
if n < 1:
raise ValueError("number of significant digits must be >= 1")
# Use %e format to get the n most significant digits, as a string.
format = "%." + str(n-1) + "e"
as_string = format % x
if x>=10 or x<=-10:
return int(float(as_string))
else:
return float(as_string)
##############################################################################################################
# Function to convert features with subfeatures (e.g. pseudogenes) to a list of locations of the subfeatures #
##############################################################################################################
def iterate_subfeatures(feature, locations):
if len(feature.sub_features)>0:
for subfeature in feature.sub_features:
locations=iterate_subfeatures(subfeature, locations)
else:
locations.append((feature.location.start.position, feature.location.end.position))
return locations
####################################################
# Function to get the pixel width of a text string #
####################################################
def get_text_width(font, size, text):
c = Canvas(test,pagesize=pagesize)
length= c.stringWidth(str(text),font,size)
return length
#####################################################################################
# Function to add an embl file to multiple tracks split using the qualifiers option #
#####################################################################################
def add_ordered_embl_to_diagram(record, incfeatures=["CDS", "feature"], emblfile=True):
incfeatures= [x.lower() for x in incfeatures]
new_tracks={}
print(len(record.features), "features found for", record.name)
if len(record.seq)>500000:
scale_largetick_interval=int(round((len(record.seq)/10),-5))
scale_smalltick_interval=int(round((len(record.seq)/10),-5)/5)
else:
scale_largetick_interval=len(record.seq)
scale_smalltick_interval=len(record.seq)/5
for x, feature in enumerate(record.features):
if feature.type.lower() not in incfeatures or feature.location.nofuzzy_end<0 or (feature.location.nofuzzy_start>-1 and -1!=-1):
continue
if "colour" in feature.qualifiers:
colourline=feature.qualifiers["colour"][0]
elif "color" in feature.qualifiers:
colourline=feature.qualifiers["color"][0]
else:
colourline = "5"
if len(colourline.split())==1:
colour=translator.artemis_color(colourline)
elif len(colourline.split())==3:
colour=translator.int255_color((int(colourline.split()[0]),int(colourline.split()[1]),int(colourline.split()[2])))
else:
print("Can't understand colour code!")
print(colourline)
sys.exit()
locations=[]
locations.append((feature.location.start, feature.location.end))
if "taxa" in feature.qualifiers:
qualifiernames=feature.qualifiers["taxa"][0].replace(", "," ").split()
for taxonname in qualifiernames:
taxonname=taxonname.strip()
if not taxonname in new_tracks:
newtrack = Track()
newtrack.name=taxonname
new_tracks[taxonname]=newtrack
arrows=0
new_tracks[taxonname].add_feature(locations, fillcolour=colour, strokecolour=colour, arrows=arrows)
else:
if not record.name in new_tracks:
newtrack = Track()
newtrack.name=record.name
new_tracks[record.name]=newtrack
arrows=0
new_tracks[record.name].add_feature(locations, fillcolour=colour, strokecolour=colour, arrows=arrows)
if len(new_tracks)>1 and record.name in new_tracks:
del new_tracks[record.name]
return new_tracks
###################################################################################
# Function to add a tab file to multiple tracks split using the qualifiers option #
###################################################################################
def add_ordered_tab_to_diagram(filename):
features={"":[]}
featurename=""
names_to_add_feature_to=[]
try:
record=tab_parser(open(filename,"r"))
except IOError:
print("Cannot find file", filename)
sys.exit()
record.name=filename
new_tracks=add_ordered_embl_to_diagram(record, incfeatures=["i", "d", "li", "del", "snp", "misc_feature", "core", "cds", "insertion", "deletion", "recombination", "feature", "blastn_hit", "fasta_record", "variation"], emblfile=False)
return new_tracks
def add_empty_track(existing_tracks, track_name):
newtrack = Track()
newtrack.name=track_name
newtrack.beginning=0
newtrack.track_height=1
existing_tracks[track_name] = newtrack
existing_tracks[track_name].add_feature(locations=[(0,0)], fillcolour=translator.artemis_color(2), strokecolour=translator.artemis_color(2), arrows=0)
return existing_tracks
#############################
# Function to draw the tree #
#############################
def drawtree(treeObject, treeheight, treewidth, xoffset, yoffset, name_offset=5):
def get_max_branch_depth():
terminals=treeObject.get_terminals()
maxbrlen=0.0
for terminal in terminals:
if treeObject.sum_branchlength(node=terminal)>maxbrlen:
maxbrlen=treeObject.sum_branchlength(node=terminal)
return maxbrlen
def draw_scale():
if vertical_scaling_factor<5:
linewidth=0.5
else:
linewidth=1.0
branchlength=round_to_n(max_branch_depth/10, 2)*horizontal_scaling_factor
horizontalpos=xoffset+round_to_n(max_branch_depth/10, 2)*horizontal_scaling_factor
vertpos=treebase-fontsize
scalestring = str(round_to_n(max_branch_depth/10, 2))
scalefontsize=fontsize
if scalefontsize<6:
scalefontsize=6
d.add(Line(horizontalpos, vertpos, horizontalpos+branchlength, vertpos, strokeWidth=linewidth))
d.add(String(horizontalpos+(float(branchlength)/2), vertpos-(scalefontsize+1), scalestring, textAnchor='middle', fontSize=scalefontsize, fontName='Helvetica'))
def get_node_vertical_positions():
def get_node_vertical_position(node):
for daughter in treeObject.node(node).succ:
get_node_vertical_position(daughter)
if not treeObject.is_terminal(node):
daughters=treeObject.node(node).succ
if treeObject.node(node).data.comment==None:
treeObject.node(node).data.comment={}
treeObject.node(node).data.comment["vertpos"]=float(treeObject.node(daughters[0]).data.comment["vertpos"]+treeObject.node(daughters[-1]).data.comment["vertpos"])/2
node=treeObject.root
get_node_vertical_position(node)
def drawbranch(node,horizontalpos):
vertpos=treeObject.node(node).data.comment["vertpos"]+yoffset
horizontalpos+=xoffset
branchlength=treeObject.node(node).data.branchlength*horizontal_scaling_factor
if vertical_scaling_factor<5:
linewidth=0.5
else:
linewidth=1.0
if treeObject.node(node).data.comment and "branch_colour" in treeObject.node(node).data.comment:
r,g,b=treeObject.node(node).data.comment["branch_colour"]
branch_colour=colors.Color(float(r)/255,float(g)/255,float(b)/255)
else:
branch_colour=colors.black
if branchlength<linewidth:
branchlength=linewidth
d.add(Line(horizontalpos-(linewidth/2), vertpos, (horizontalpos-(linewidth/2))+branchlength, vertpos, strokeWidth=linewidth, strokeColor=branch_colour))
if node!=treeObject.root:
parentnode=treeObject.node(node).prev
sisters=treeObject.node(parentnode).succ
parentvertpos=treeObject.node(parentnode).data.comment["vertpos"]+yoffset
d.add(Line(horizontalpos, vertpos, horizontalpos, parentvertpos, strokeWidth=linewidth, strokeColor=branch_colour))
if treeObject.is_terminal(node):
if treeObject.node(node).data.comment and "name_colour" in treeObject.node(node).data.comment:
name_colours=[]
for x in range(0,len(treeObject.node(node).data.comment["name_colour"])):
r,g,b= treeObject.node(node).data.comment["name_colour"][x]
name_colours.append(colors.Color(float(r)/255,float(g)/255,float(b)/255))
else:
name_colours=[colors.black]
gubbins_length=0.0
colpos=0
namewidth=get_text_width('Helvetica', fontsize, treeObject.node(node).data.taxon)+name_offset
gubbins_length += namewidth
colpos=1
for x in range(colpos,len(name_colours)):
gubbins_length += block_length
if x!=0:
gubbins_length += vertical_scaling_factor
#Add the taxon names
d.add(String(treewidth+xoffset+(max_name_width-gubbins_length)+(fontsize/2), vertpos-(fontsize/3), treeObject.node(node).data.taxon, textAnchor='start', fontSize=fontsize, fillColor=name_colours[0], fontName='Helvetica'))
block_xpos=treewidth+xoffset+(max_name_width-gubbins_length)+(fontsize/2)+namewidth
# draw dashed lines
d.add(Line(horizontalpos+branchlength, vertpos, treewidth+xoffset+(max_name_width-gubbins_length), vertpos, strokeDashArray=[1, 2], strokeWidth=linewidth/2, strokeColor=name_colours[0]))
def recurse_subtree(node, horizontalpos):
daughters=treeObject.node(node).succ
daughterhorizontalpos=horizontalpos+(treeObject.node(node).data.branchlength*horizontal_scaling_factor)
drawbranch(node,horizontalpos)
for daughter in daughters:
recurse_subtree(daughter,daughterhorizontalpos)
def get_max_name_width(name_offset, fontsize):
max_width=0.0
for taxon in treeObject.get_terminals():
curwidth= get_text_width("Helvetica", fontsize, treeObject.node(taxon).data.taxon)
if curwidth>max_width:
max_width=curwidth
return max_width
fontsize=vertical_scaling_factor
if fontsize>12:
fontsize=12
while get_max_name_width(name_offset, fontsize)+name_offset>treewidth/3:
fontsize-=0.2
max_name_width=get_max_name_width(name_offset, fontsize)+name_offset
colblockstart=1
block_length=0
treewidth-=(max_name_width+(fontsize/2))
max_branch_depth=get_max_branch_depth()
horizontal_scaling_factor=float(treewidth)/max_branch_depth
get_node_vertical_positions()
recurse_subtree(treeObject.root, 0)
treebase=treeObject.node(treeObject.get_terminals()[-1]).data.comment["vertpos"]+yoffset
draw_scale()
return
#################
# Drawing class #
#################
class Figure:
def __init__(self, beginning, end):
self.begnining=0
self.end=-1
###############
# Track class #
###############
class Track:
def __init__(self, track_position=[-1,-1], track_height=0, track_length=0, track_draw_proportion=0.75, scale=False, tick_marks=True, tick_mark_number=5, tick_mark_labels=True, minor_tick_marks=True, minor_tick_mark_number=3, features=[], beginning=0, end=-1):
self.track_position=track_position#horizontal and vertical position of centre of track
self.track_height=track_height#height of space allocated for track
self.track_length=track_length
self.track_draw_proportion=track_draw_proportion#proportion of the track that should be used for drawing
self.scale=scale
self.scale_position="middle"
self.tick_marks=tick_marks
self.tick_mark_number=tick_mark_number
self.tick_mark_labels=tick_mark_labels
self.tick_mark_label_font="Helvetica"
self.tick_mark_label_size=8
self.tick_mark_label_angle=45
self.minor_tick_marks=minor_tick_marks
self.minor_tick_mark_number=minor_tick_mark_number
self.features=features[:]
self.scaled_features=features[:]
self.draw_feature_labels=False
self.feature_label_size=8
self.feature_label_angle=0
self.feature_label_font="Helvetica"
self.greytrack=False
self.grey_track_colour=colors.Color(0.25,0.25,0.25)
self.grey_track_opacity_percent=10
self.max_feature_length=-1
self.beginning=0
self.end=-1
self.track_number=-1
self.plots=[]
self.fragments=1
self.name=""
self.show_name=False
self.name_font="Helvetica"
self.name_size=10
self.name_length=0
self.is_key=False
self.key_data=[]
def get_max_feature_length(self):
max_feature_length=0
for feature in self.features:
for location in feature.feature_locations:
if location[0]>max_feature_length:
max_feature_length=location[0]
if location[1]>max_feature_length:
max_feature_length=location[1]
return max_feature_length
def scale_feature_positions(self):
self.scaled_features=[]
if self.end!=-1:
length=float(self.end-self.beginning)
else:
length=float(self.max_feature_length-self.beginning)
for feature in self.features:
newfeature=Feature()
newfeature.fillcolour=feature.fillcolour
newfeature.strokecolour=feature.strokecolour
newfeature.strokeweight=feature.strokeweight
newfeature.strand=feature.strand
newfeature.label=feature.label
newfeature.arrows=feature.arrows
scaledlocations=[]
for location in feature.feature_locations:
start=location[0]
finish=location[1]
if self.beginning!=0:
if start<self.beginning and finish>self.beginning:
start=self.beginning
if self.end!=-1:
if start<self.end and finish>self.end:
finish=self.end
start-=self.beginning
finish-=self.beginning
scaledlocations.append(((float(start)/length)*self.track_length,(float(finish)/length)*self.track_length))
newfeature.feature_locations=scaledlocations
self.scaled_features.append(newfeature)
def draw_features(self):
if self.max_feature_length==-1:
return
else:
self.scale_feature_positions()
featuresort=[]
for x, feature in enumerate(self.scaled_features):
featuresort.append([feature.feature_locations[0][0], x])
joins=[]
for featurenum in featuresort[::-1]:
feature=self.scaled_features[featurenum[1]]
#if the feature is white, outline it in black so we can see it
if feature.strokecolour==colors.Color(1,1,1,1):
feature.strokecolour=colors.Color(0,0,0,1)
subfeaturesort=[]
for x, subfeature in enumerate(feature.feature_locations):
subfeaturesort.append([subfeature[0], x])
subfeaturesort.sort()
subfeature_locations=[]
for subfeaturenum in subfeaturesort:
subfeature_locations.append(feature.feature_locations[subfeaturenum[1]])
for x, location in enumerate(subfeature_locations):
if (location[0]>0 and location[0]<=self.track_length) or (location[1]>0 and location[1]<=self.track_length):
y=self.track_position[1]-((float(self.track_height)/4)*self.track_draw_proportion)
height=(float(self.track_height)*self.track_draw_proportion)/2
y1=self.track_position[1]
y2=self.track_position[1]+((float(self.track_height)/8)*self.track_draw_proportion)
if feature.arrows==0:
d.add(Rect(self.track_position[0]+location[0], y, location[1]-location[0], height, fillColor=feature.fillcolour, strokeColor=feature.strokecolour, strokeWidth=feature.strokeweight))
if len(subfeature_locations)>x+1 and subfeature_locations[x+1][0]<=self.track_length:
if subfeature_locations[x+1][0]<location[1]:
joinheight=y1
elif y2>y1:
if (y2-y1)>(float(subfeature_locations[x+1][0]-location[1])/2):
joinheight=y1+(float(subfeature_locations[x+1][0]-location[1])/2)
else:
joinheight=y2
else:
if (y1-y2)>(float(subfeature_locations[x+1][0]-location[1])/2):
joinheight=y1-(float(subfeature_locations[x+1][0]-location[1])/2)
else:
joinheight=y2
joins.append(Line(self.track_position[0]+location[1], y1, self.track_position[0]+location[1]+(float(subfeature_locations[x+1][0]-location[1])/2), joinheight, strokeDashArray=[0.5, 1], strokeWidth=0.5))
joins.append(Line(self.track_position[0]+((location[1]+subfeature_locations[x+1][0])/2), joinheight, self.track_position[0]+location[1]+(float(subfeature_locations[x+1][0]-location[1])), y1, strokeDashArray=[0.5, 1], strokeWidth=0.5))
for join in joins:
d.add(join)
self.scaled_features=[]
def draw_track(self):
self.draw_features()
def add_feature(self,locations=[(-1,-1)], fillcolour=colors.white, strokecolour=colors.black, strokeweight=0, label="", strand=0, arrows=0):
newfeature=Feature()
feature_locations=[]
for location in locations:
if location[0]>location[1]:
feature_locations.append((location[1],location[0]))
else:
feature_locations.append((location[0],location[1]))
newfeature.feature_locations=feature_locations
newfeature.fillcolour=fillcolour
newfeature.strokecolour=strokecolour
newfeature.strokeweight=strokeweight
newfeature.strand=strand
newfeature.label=label
newfeature.arrows=arrows
self.features.append(newfeature)
def sort_features_by_length(self):
featurelist=[]
ordered_features=[]
for x, feature in enumerate(self.features):
featurelist.append([feature.feature_locations[-1][1]-feature.feature_locations[0][0], x])
featurelist.sort()
#featurelist.reverse()
for feature in featurelist:
ordered_features.append(self.features[feature[1]])
self.features=ordered_features[:]
#################
# Feature class #
#################
class Feature:
def __init__(self):
self.feature_locations=[(-1,-1)]
self.strand=0
self.arrows=0
self.label=""
self.fillcolour=colors.blue
self.strokecolour=colors.black
self.strokeweight=0
################
# Main program #
################
if __name__ == "__main__":
options = main()
pagesize=pagesizes.A4
height, width = pagesize
if len(options.embl_file)==0:
print("Found nothing to draw")
sys.exit()
d = Drawing(width, height)
margin=0.5*inch
metadatanames={}
namecolours={}
colour_dict=[]
my_tracks={}
#create translator object for translating artemis colours to GenomeDiagram colours
translator = ColorTranslator()
track_count=0
tree_positions=[]
track_names={}
input_order=[]
new_tracks=add_ordered_tab_to_diagram(options.embl_file)
for track in new_tracks:
newtrack=new_tracks[track]
newtrack.beginning=0
newtrack.name=new_tracks[track].name
name=newtrack.name
x=1
while name in my_tracks:
name=newtrack.name+"_"+str(x)
x+=1
if not newtrack.name in track_names:
track_names[newtrack.name]=[]
input_order.append(name)
track_names[newtrack.name].append(name)
track_count+=1
newtrack.track_height=1
my_tracks[name]=newtrack
treenames=[]
tree_name_to_node={}
listnames=[]
if options.tree!="":
if not os.path.isfile(options.tree):
print("Cannot find file:", options.tree)
options.tree=""
else:
treestring=open(options.tree,"rU").read().strip()
tree=Trees.Tree(treestring, rooted=True)
tree.root
treeterminals=tree.get_terminals()
totalbr=0.0
for terminal_node in treeterminals:
terminal=tree.node(terminal_node).data.taxon
treenames.append(terminal)
if not terminal in track_names:
track_count+=1
tree_name_to_node[terminal]=terminal_node
tree.node(terminal_node).data.comment={}
tree.node(terminal_node).data.comment["name_colour"]=[(0,0,0)]
#from this we can work out a constant for the height of a track which takes into account the height of the page and margin sizes
vertical_scaling_factor=float(height-(margin*2))/(track_count)
#to make sure names can be printed in the space of a track, we can scale the name to the same size as the vertical scaling factor, but limit it to 12pt so it doesn't get crazily big
name_font_size=vertical_scaling_factor
if name_font_size>12:
name_font_size=12
left_proportion=0.3
treetrack=0
output_order=treenames[::-1]
for name in input_order[::-1]:
if not name in treenames:
output_order.append(name)
track_number=0
for track in output_order:
if(track not in my_tracks):
my_tracks = add_empty_track(my_tracks, track)
track_height=my_tracks[track].track_height
my_tracks[track].track_draw_proportion=0.8
my_tracks[track].track_height=track_height*vertical_scaling_factor
if left_proportion==1:
my_tracks[track].track_length=(width-margin)-((width-(margin*2))*0.2+margin)
my_tracks[track].track_position=[(width-(margin*2))*0.2+margin, margin+((track_number)*vertical_scaling_factor)+float((my_tracks[track].track_height)/2)]
else:
my_tracks[track].track_length=(width-margin)-((width-(margin*2))*left_proportion+margin)
my_tracks[track].track_position=[(width-(margin*2))*left_proportion+margin, margin+((track_number)*vertical_scaling_factor)+float((my_tracks[track].track_height)/2)]
my_tracks[track].track_number=track_number
if track in treenames:
tree.node(tree_name_to_node[track]).data.comment["vertpos"]=margin+((track_number)*vertical_scaling_factor)+float((my_tracks[track].track_height)/2)
my_tracks[track].grey_track_colour=colors.Color(0,0,0)
track_number+=track_height
#find the maximum feature endpoint to scale by
max_feature_length=0
for track in my_tracks:
max_track_feature_length=my_tracks[track].get_max_feature_length()
if max_track_feature_length>max_feature_length:
max_feature_length=max_track_feature_length
for plot in my_tracks[track].plots:
for data in plot.xdata:
if data[-1]>max_feature_length:
max_feature_length=data[-1]
#tell each track what the max feature length is
for track in my_tracks:
if my_tracks[track].max_feature_length<max_feature_length:
my_tracks[track].max_feature_length=max_feature_length
beginning=0
end=max_feature_length
for track in output_order:
if not track in my_tracks or (my_tracks[track].is_key and fragment!=1) or my_tracks[track].track_length==0:
continue
my_tracks[track].beginning=beginning
my_tracks[track].end=end
my_tracks[track].track_position[1]=margin+(((my_tracks[track].track_number)*vertical_scaling_factor)+(my_tracks[track].track_height)/2)
my_tracks[track].sort_features_by_length()
my_tracks[track].draw_track()
if options.tree!="":
drawtree(tree, height-(margin*2), (width-(margin*2))*left_proportion, margin, 0, 5)
renderPDF.drawToFile(d, options.outputfile)
class DrawerError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| gpl-2.0 | -7,902,293,239,757,884,000 | 31.198773 | 323 | 0.685657 | false |
jupyter/jupyterlab | examples/cell/main.py | 4 | 2644 | """
An example demonstrating a stand-alone "notebook".
Copyright (c) Jupyter Development Team.
Distributed under the terms of the Modified BSD License.
Example
-------
To run the example, see the instructions in the README to build it. Then
run ``python main.py``.
"""
import os
import json
from jupyterlab_server import LabServerApp
from jupyter_server.base.handlers import JupyterHandler
from jupyter_server.extension.handler import ExtensionHandlerMixin, ExtensionHandlerJinjaMixin
from jupyter_server.utils import url_path_join as ujoin
HERE = os.path.dirname(__file__)
with open(os.path.join(HERE, 'package.json')) as fid:
version = json.load(fid)['version']
def _jupyter_server_extension_points():
return [
{
'module': __name__,
'app': ExampleApp
}
]
class ExampleHandler(
ExtensionHandlerJinjaMixin,
ExtensionHandlerMixin,
JupyterHandler
):
"""Handle requests between the main app page and notebook server."""
def get(self):
"""Get the main page for the application's interface."""
config_data = {
# Use camelCase here, since that's what the lab components expect
"appVersion": version,
'baseUrl': self.base_url,
'token': self.settings['token'],
'fullStaticUrl': ujoin(self.base_url, 'static', self.name),
'frontendUrl': ujoin(self.base_url, 'example/'),
}
return self.write(
self.render_template(
'index.html',
static=self.static_url,
base_url=self.base_url,
token=self.settings['token'],
page_config=config_data
)
)
class ExampleApp(LabServerApp):
extension_url = '/example'
default_url = '/example'
app_url = "/example"
name = __name__
load_other_extensions = False
app_name = 'JupyterLab Example Cell'
static_dir = os.path.join(HERE, 'build')
templates_dir = os.path.join(HERE, 'templates')
app_version = version
app_settings_dir = os.path.join(HERE, 'build', 'application_settings')
schemas_dir = os.path.join(HERE, 'build', 'schemas')
themes_dir = os.path.join(HERE, 'build', 'themes')
user_settings_dir = os.path.join(HERE, 'build', 'user_settings')
workspaces_dir = os.path.join(HERE, 'build', 'workspaces')
def initialize_handlers(self):
"""Add example handler to Lab Server's handler list.
"""
self.handlers.append(
('/example', ExampleHandler)
)
if __name__ == '__main__':
ExampleApp.launch_instance()
| bsd-3-clause | -5,810,901,252,969,317,000 | 28.707865 | 94 | 0.621785 | false |
cjvogl/finite_volume_seismic_model | 3d/setplot_pwaves.py | 1 | 3701 |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
import numpy as np
import os, shutil
from mapping import Mapping
import dtopotools_horiz_okada_and_1d as dtopotools
length_scale = 1.0e-3 # m to km
xlimits = [-150.0e3*length_scale,200.0e3*length_scale]
zlimits = [-175.0e3*length_scale,0.0]
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of clawpack.visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
slice_number = 3
tmpdir = os.path.abspath(os.curdir)
os.chdir(plotdata.outdir)
for filename in os.listdir('.'):
if (filename.startswith('slice_%d' % slice_number)):
shutil.copyfile(filename,filename.replace('slice_%d' % slice_number,'fort',1))
fault = dtopotools.Fault()
fault.read('fault.data')
os.chdir(tmpdir)
mapping = Mapping(fault)
xp1 = mapping.xp1*length_scale
xp2 = mapping.xp2*length_scale
zp1 = mapping.zp1*length_scale
zp2 = mapping.zp2*length_scale
xcenter = mapping.xcenter
ycenter = mapping.ycenter
def mapc2p(xc,yc):
xp,yp = mapping.mapc2p_xz(xc,yc)
return xp*length_scale,yp*length_scale
def plot_fault(current_data):
from pylab import linspace, plot, xlabel, ylabel, tick_params
xl = linspace(xp1,xp2,100)
zl = linspace(zp1,zp2,100)
plot(xl,zl,'g',linewidth=3)
tick_params(labelsize=25)
xlabel('kilometers',fontsize=25)
ylabel('kilometers',fontsize=25)
from clawpack.visclaw import colormaps
plotdata.clearfigures() # clear any old figures,axes,items data
#plotdata.format = 'binary'
def sigmatr(current_data):
# return -trace(sigma)
q = current_data.q
return -(q[0,:,:] + q[1,:,:] + q[2,:,:])
# Figure for trace(sigma)
plotfigure = plotdata.new_plotfigure(name='fault', figno=1)
plotfigure.kwargs = {'figsize':(11,6)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = xlimits
plotaxes.ylimits = zlimits
plotaxes.title_with_t = False
plotaxes.title = ''
plotaxes.scaled = True
plotaxes.afteraxes = plot_fault
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = sigmatr
plotitem.pcolor_cmap = colormaps.blue_white_red
plotitem.pcolor_cmin = -1e6
plotitem.pcolor_cmax = 1e6
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0]
plotitem.MappedGrid = True
plotitem.mapc2p = mapc2p
# Parameters used only when creating html and/or latex hardcopy
# e.g., via clawpack.visclaw.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
# plotdata.parallel = True
return plotdata
| gpl-3.0 | -4,647,494,930,394,782,000 | 31.464912 | 90 | 0.640367 | false |
VitorHugoAguiar/ProBot | ProBot_BeagleBone/PIDControllersFile.py | 1 | 4310 | #!/usr/bin/python
# Python Standard Library Imports
import time
# Local files
import ProBotConstantsFile
# Initialization of classes from local files
Pconst = ProBotConstantsFile.Constants()
# PID functions
class PIDControllersClass():
# Build a constructor
def __init__(self):
self.error = 0
self.SaberTooth_KpV = 280
self.SaberTooth_KiV = 0.6
self.SaberTooth_KdV = 12
self.SaberTooth_KpA = 18
self.SaberTooth_KiA = 2.2
self.SaberTooth_KdA = -2
self.PWM_KpV = 75
self.PWM_KiV = 0.6
self.PWM_KdV = 0.2
self.PWM_KpA = 9
self.PWM_KiA = 3
self.PWM_KdA = -0.001
self.limitV = 800
self.limitA = 1000
self.integrated_error_V1 = 0
self.integrated_error_V2 = 0
self.integrated_error_A1 = 0
self.integrated_error_A2 = 0
self.last_error_V1 = 0
self.last_error_V2 = 0
self.last_error_A1 = 0
self.last_error_A2 = 0
def standardPID(self, reference, measured, type, userChoice):
self.error = float(reference - measured)
# Load the right values for the controllers, depending on if we are using Sabertooth of PWM controller
if userChoice=='1':
KpV = self.SaberTooth_KpV
KiV = self.SaberTooth_KiV
KdV = self.SaberTooth_KdV
KpA = self.SaberTooth_KpA
KiA = self.SaberTooth_KiA
KdA = self.SaberTooth_KdA
if userChoice=='2':
KpV = self.PWM_KpV
KiV = self.PWM_KiV
KdV = self.PWM_KdV
KpA = self.PWM_KpA
KiA = self.PWM_KiA
KdA = self.PWM_KdA
# Loading the variables for the controllers
typeController = {
'Velocity1': [KpV, KiV, KdV, self.limitV, self.integrated_error_V1, self.last_error_V1],
'Velocity2': [KpV, KiV, KdV, self.limitV, self.integrated_error_V2, self.last_error_V2],
'Angle1': [KpA, KiA, KdA, self.limitA, self.integrated_error_A1, self.last_error_A1],
'Angle2': [KpA, KiA, KdA, self.limitA, self.integrated_error_A2, self.last_error_A2]}
controllerVar = typeController[type]
# Code for the PID controllers
pTerm = float(controllerVar[0] * self.error)
controllerVar[4] += float(self.error)
# Limiting the integrated error, avoiding windup
controllerVar[4] = max(-controllerVar[3], min(controllerVar[4], controllerVar[3]))
iTerm = float(controllerVar[1] * controllerVar[4])
dTerm = float(controllerVar[2] * (self.error - controllerVar[5]))
controllerVar[5] = self.error
PID_result = float(pTerm + iTerm + dTerm)
# Updating the integrated error and the last error for the next loop
if(type is 'Velocity1'):
self.integrated_error_V1 = controllerVar[4]
self.last_error_V1 = controllerVar[5]
if(type is 'Velocity2'):
self.integrated_error_V2 = controllerVar[4]
self.last_error_V2 = controllerVar[5]
if(type is 'Angle1'):
self.integrated_error_A1 = controllerVar[4]
self.last_error_A1 = controllerVar[5]
if userChoice == '1':
PID_result = max(-127, min(PID_result, 127)) #Limiting the PID values because of the Sabertooth range (-127, 127)
if userChoice == '2':
PID_result = max(-100, min(PID_result, 100)) #Limiting the percentage of the PWM
if(type is 'Angle2'):
self.integrated_error_A2 = controllerVar[4]
self.last_error_A2 = controllerVar[5]
if userChoice=='1': #Limiting the PID values because of the Sabertooth range (-127, 127)
PID_result = max(-127, min(PID_result, 127))
if userChoice=='2':
PID_result = max(-100, min(PID_result, 100)) #Limiting the percentage of the PWM
return -PID_result
| agpl-3.0 | -3,683,972,053,555,202,600 | 39.280374 | 163 | 0.547796 | false |
FarzanHajian/CreateSwap | src/createswap2.py | 1 | 4347 | #!/usr/bin/env python
# encoding: utf-8
'''
create_swap.py
A Python 2 script for creating and removing Linux swap files.
Copyright (C) 2016 Farzan Hajian
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
@author: Farzan Hajian
@copyright: 2016. All rights reserved.
@license: GPL3
@contact: [email protected]
NOTE:
THIS SCRIPT WORKS ONLY WITH PYTHON VERSION 2.
FOR PYTHON 3, USE "createswap.py".
'''
import sys
import os
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='the file full name on which the swap space is going to be built (must be used with --size option)')
parser.add_argument('-s', '--size', help='size of the swap space in megabytes (must be used with --file option)', type=int)
parser.add_argument('-o', '--off', help='removes the swap file and disables its swap space', metavar='FILE')
parser.add_argument('--verbose', help='executes in the verbose mode (useful for tracking errors)', action='store_true')
args = parser.parse_args()
try:
if not args.file and not args.size and not args.off:
if not args.verbose:
parser.print_help()
raise Exception()
else:
raise Exception("--verbose option cannot be used alone")
if(args.file and not args.size) or (not args.file and args.size):
raise Exception("--file and --size options must be used together")
if args.off and (args.file or args.size):
raise Exception("--off option cannot be used with other options")
except Exception as ex:
show_error(ex, 3)
return args
def is_verbose():
return args.verbose
def print_header():
os.system('clear')
print('-'*50)
print('createswap.py v 2.0 (Python 2)\n')
print('This program is published under GPL v3 license')
print('You can contact me at [email protected]')
print('-'*50)
def print_step(message):
if is_verbose():
print ("")
print '%-40.40s'%message
else:
print '%-40.40s'%message,
def print_status(is_failed=False):
status = ('Failed' if is_failed else 'OK')
print('[%s]'%status)
def show_error(exception, exit_code):
print('\n%s'%exception)
sys.exit(exit_code)
def sudo():
os.system('sudo id > /dev/null')
def exec_step(message, command, arg_tuple=None):
print_step(message)
command = 'sudo ' + command
if not is_verbose(): command += ' > /dev/null 2>&1'
if arg_tuple != None:
exit_code = os.system(command.format(*arg_tuple))
else:
exit_code = os.system(command)
if exit_code == 0:
print_status()
else:
print_status(True)
def create_swap(filename, size):
try:
tuple1 = (filename, size)
tuple2 = (filename,)
exec_step('Creating the file', 'dd if=/dev/zero of={} bs=1M count={}', tuple1)
exec_step('Setting the file access mode', 'chmod 600 {}', tuple2)
exec_step('Setting up the swap space', 'mkswap {}', tuple2)
exec_step('Enabling the swap space', 'swapon {}', tuple2)
except Exception as ex:
show_error(ex, 2)
def drop_swap(filename):
try:
tuple1 = (filename,)
exec_step('Disabling the swap space', 'swapoff {}', tuple1)
exec_step('Removing the file', 'rm {}', tuple1)
except Exception as ex:
show_error(ex, 2)
# program entry point
print_header()
args = parse_args()
sudo()
if args.file:
create_swap(args.file, args.size)
elif args.off:
drop_swap(args.off)
print("")
| gpl-3.0 | -7,813,304,307,696,668,000 | 30.5 | 145 | 0.635381 | false |
Ghini/ghini.desktop | bauble/prefs.py | 1 | 10011 | # -*- coding: utf-8 -*-
#
# Copyright 2008-2010 Brett Adams
# Copyright 2015 Mario Frasca <[email protected]>.
#
# This file is part of ghini.desktop.
#
# ghini.desktop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ghini.desktop is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ghini.desktop. If not, see <http://www.gnu.org/licenses/>.
import os
from gi.repository import Gtk
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
import bauble
import bauble.db as db
import bauble.paths as paths
import bauble.pluginmgr as pluginmgr
testing = False # set this to True when testing
"""
The prefs module exposes an API for getting and setting user
preferences in the Ghini config file.
To use the preferences import bauble.prefs and access the prefs object
using a dictionary like interface. e.g. ::
import bauble.prefs
prefs.prefs[key] = value
"""
# TODO: maybe we should have a create method that creates the preferences
# todo a one time thing if the files doesn't exist
# TODO: Consider using ConfigObj since it does validation, type
# conversion and unicode automatically...the cons are that it adds
# another dependency and we would have to change the prefs interface
# throughout bauble
default_filename = 'config'
default_prefs_file = os.path.join(paths.appdata_dir(), default_filename)
"""
The default file for the preference settings file.
"""
config_version_pref = 'bauble.config.version'
"""
The preferences key for the bauble version of the preferences file.
"""
config_version = bauble.version_tuple[0], bauble.version_tuple[1]
date_format_pref = 'bauble.default_date_format'
"""
The preferences key for the default data format.
"""
picture_root_pref = 'bauble.picture_root'
"""
The preferences key for the default data format.
"""
ask_timeout_pref = 'bauble.network_timeout'
"""
The preferences key for remote server querying timeout.
"""
parse_dayfirst_pref = 'bauble.parse_dayfirst'
"""
The preferences key for to determine whether the date should come
first when parsing date string. For more information see the
:meth:`dateutil.parser.parse` method.
Values: True, False
"""
parse_yearfirst_pref = 'bauble.parse_yearfirst'
"""
The preferences key for to determine whether the date should come
first when parsing date string. For more information see the
:meth:`dateutil.parser.parse` method.
Values: True, False
"""
units_pref = 'bauble.units'
"""
The preferences key for the default units for Ghini.
Values: metric, imperial
"""
use_sentry_client_pref = 'bauble.use_sentry_client'
"""
During normal usage, Ghini produces a log file which contains
invaluable information for tracking down errors. This information is
normally saved in a file on the local workstation.
This preference key controls the option of sending exceptional
conditions (WARNING and ERROR, normally related to software problems)
to a central logging server, and developers will be notified by email
of the fact that you encountered a problem.
Logging messages at the levels Warning and Error do not contain personal
information. If you have completed the registration steps, a developer
might contact you to ask for further details, as it could be the
complete content of your log file.
Values: True, False (Default: False)
"""
from configparser import RawConfigParser
class _prefs(dict):
def __init__(self, filename=default_prefs_file):
self._filename = filename
def init(self):
'''
initialize the preferences, should only be called from app.main
'''
# create directory tree of filename if it doesn't yet exist
head, tail = os.path.split(self._filename)
if not os.path.exists(head):
os.makedirs(head)
# also make sure the templates and resources directories exists
if not os.path.exists(os.path.join(head, 'res', 'templates')):
os.makedirs(os.path.join(head, 'res', 'templates'))
self.config = RawConfigParser()
# set the version if the file doesn't exist
if not os.path.exists(self._filename):
self[config_version_pref] = config_version
else:
self.config.read(self._filename)
version = self[config_version_pref]
if version is None:
logger.warning('%s has no config version pref' % self._filename)
logger.warning('setting the config version to %s.%s'
% (config_version))
self[config_version_pref] = config_version
# set some defaults if they don't exist
self.setdefault(use_sentry_client_pref, False)
self.setdefault(picture_root_pref, '')
self.setdefault(date_format_pref, '%d-%m-%Y')
self.setdefault(units_pref, 'metric')
self.setdefault(ask_timeout_pref, 4)
if parse_dayfirst_pref not in self:
format = self[date_format_pref]
if format.find('%d') < format.find('%m'):
self[parse_dayfirst_pref] = True
else:
self[parse_dayfirst_pref] = False
if parse_yearfirst_pref not in self:
format = self[date_format_pref]
if format.find('%Y') == 0 or format.find('%y') == 0:
self[parse_yearfirst_pref] = True
else:
self[parse_yearfirst_pref] = False
@staticmethod
def _parse_key(name):
index = name.rfind(".")
return name[:index], name[index+1:]
def get(self, key, default):
'''
get value for key else return default
'''
value = self[key]
if value is None:
return default
return value
def __getitem__(self, key):
section, option = _prefs._parse_key(key)
# this doesn't allow None values for preferences
if not self.config.has_section(section) or \
not self.config.has_option(section, option):
return None
else:
i = self.config.get(section, option)
eval_chars = '{[('
if i == '':
return i
elif i[0] in eval_chars: # then the value is a dict, list or tuple
return eval(i)
elif i == 'True' or i == 'False':
return eval(i)
return i
def items(self):
return [('%s.%s' % (section, name), value)
for section in sorted(prefs.config.sections())
for name, value in prefs.config.items(section)]
def setdefault(self, key, default=None):
if key not in self:
self.__setitem__(key, default)
return self[key]
def __setitem__(self, key, value):
section, option = _prefs._parse_key(key)
if not self.config.has_section(section):
self.config.add_section(section)
self.config.set(section, option, str(value))
def __contains__(self, key):
section, option = _prefs._parse_key(key)
if self.config.has_section(section) and \
self.config.has_option(section, option):
return True
return False
def save(self, force=False):
if testing and not force:
return
try:
f = open(self._filename, "w+")
self.config.write(f)
f.close()
except Exception:
msg = _("Ghini can't save your user preferences. \n\nPlease "
"check the file permissions of your config file:\n %s") \
% self._filename
if bauble.gui is not None and bauble.gui.window is not None:
import bauble.utils as utils
utils.message_dialog(msg, type=Gtk.MessageType.ERROR,
parent=bauble.gui.window)
else:
logger.error(msg)
class PrefsView(pluginmgr.View):
"""
The PrefsView displays the values of in the preferences and the registry.
"""
pane_size_pref = 'bauble.prefs.pane_position'
def __init__(self):
logger.debug('PrefsView::__init__')
super().__init__(
filename=os.path.join(paths.lib_dir(), 'bauble.glade'),
root_widget_name='prefs_window')
self.view.connect_signals(self)
self.prefs_ls = self.view.widgets.prefs_prefs_ls
self.plugins_ls = self.view.widgets.prefs_plugins_ls
self.update()
def on_prefs_prefs_tv_row_activated(self, tv, path, column):
global prefs
key, repr_str, type_str = self.prefs_ls[path]
if type_str == 'bool':
prefs[key] = not prefs[key]
self.prefs_ls[path][1] = str(prefs[key])
prefs.save()
def update(self):
self.prefs_ls.clear()
global prefs
for key, value in sorted(prefs.items()):
self.prefs_ls.append(
(key, value, prefs[key].__class__.__name__))
self.plugins_ls.clear()
from bauble.pluginmgr import PluginRegistry
session = db.Session()
plugins = session.query(PluginRegistry.name, PluginRegistry.version)
for name, version in plugins:
self.plugins_ls.append((name, version))
session.close()
pass
class PrefsCommandHandler(pluginmgr.CommandHandler):
command = ('prefs', 'config')
view = None
def __call__(self, cmd, arg):
pass
def get_view(self):
if self.view is None:
self.__class__.view = PrefsView()
return self.view
pluginmgr.register_command(PrefsCommandHandler)
prefs = _prefs()
| gpl-2.0 | 2,704,010,954,225,511,000 | 30.984026 | 79 | 0.636899 | false |
hbldh/sudokuextract | sudokuextract/imgproc/binary.py | 1 | 1736 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`binary`
==================
Created by hbldh <[email protected]>
Created on 2016-01-26
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
from skimage.transform import resize
from skimage.filters import threshold_otsu
from skimage.filters import gaussian_filter, threshold_adaptive
def to_binary_otsu(img, invert=False):
if img.dtype == np.bool:
img = np.array(img, 'uint8')
if img.max() == img.min():
if img.min() == 1:
return np.array(img * 255, 'uint8')
else:
return np.array(img, 'uint8')
else:
t = threshold_otsu(img)
img[img <= t] = 255 if invert else 0
img[img > t] = 0 if invert else 255
return np.array(img, 'uint8')
def to_binary_adaptive(img):
sigma = 1.0
m = max(img.shape)
if m > 2000:
block_size = 80
elif m > 1500:
block_size = 50
elif m > 1000:
block_size = 35
else:
block_size = 20
bimg = gaussian_filter(img, sigma=sigma)
bimg = threshold_adaptive(bimg, block_size, offset=2 / 255)
bimg = np.array(bimg, 'uint8') * 255
return bimg
def add_border(img, size=(28, 28), border_size=0, background_value=255):
img = resize(img, (size[0] - border_size * 2,
size[1] - border_size * 2))
img = np.array(img * 255, 'uint8')
output_img = np.ones(size, 'uint8') * background_value
if border_size == 0:
output_img[:, :] = img
else:
output_img[border_size:-border_size, border_size:-border_size] = img
return output_img
| mit | 2,918,771,141,372,004,400 | 24.910448 | 76 | 0.597926 | false |
GerbenJavado/LinkFinder | linkfinder.py | 1 | 13951 | #!/usr/bin/env python
# Python 3
# LinkFinder
# By Gerben_Javado
# Fix webbrowser bug for MacOS
import os
os.environ["BROWSER"] = "open"
# Import libraries
import re, sys, glob, html, argparse, jsbeautifier, webbrowser, subprocess, base64, ssl, xml.etree.ElementTree
from gzip import GzipFile
from string import Template
try:
from StringIO import StringIO
readBytesCustom = StringIO
except ImportError:
from io import BytesIO
readBytesCustom = BytesIO
try:
from urllib.request import Request, urlopen
except ImportError:
from urllib2 import Request, urlopen
# Regex used
regex_str = r"""
(?:"|') # Start newline delimiter
(
((?:[a-zA-Z]{1,10}://|//) # Match a scheme [a-Z]*1-10 or //
[^"'/]{1,}\. # Match a domainname (any character + dot)
[a-zA-Z]{2,}[^"']{0,}) # The domainextension and/or path
|
((?:/|\.\./|\./) # Start with /,../,./
[^"'><,;| *()(%%$^/\\\[\]] # Next character can't be...
[^"'><,;|()]{1,}) # Rest of the characters can't be
|
([a-zA-Z0-9_\-/]{1,}/ # Relative endpoint with /
[a-zA-Z0-9_\-/]{1,} # Resource name
\.(?:[a-zA-Z]{1,4}|action) # Rest + extension (length 1-4 or action)
(?:[\?|#][^"|']{0,}|)) # ? or # mark with parameters
|
([a-zA-Z0-9_\-/]{1,}/ # REST API (no extension) with /
[a-zA-Z0-9_\-/]{3,} # Proper REST endpoints usually have 3+ chars
(?:[\?|#][^"|']{0,}|)) # ? or # mark with parameters
|
([a-zA-Z0-9_\-]{1,} # filename
\.(?:php|asp|aspx|jsp|json|
action|html|js|txt|xml) # . + extension
(?:[\?|#][^"|']{0,}|)) # ? or # mark with parameters
)
(?:"|') # End newline delimiter
"""
context_delimiter_str = "\n"
def parser_error(errmsg):
'''
Error Messages
'''
print("Usage: python %s [Options] use -h for help" % sys.argv[0])
print("Error: %s" % errmsg)
sys.exit()
def parser_input(input):
'''
Parse Input
'''
# Method 1 - URL
if input.startswith(('http://', 'https://',
'file://', 'ftp://', 'ftps://')):
return [input]
# Method 2 - URL Inspector Firefox
if input.startswith('view-source:'):
return [input[12:]]
# Method 3 - Burp file
if args.burp:
jsfiles = []
items = xml.etree.ElementTree.fromstring(open(args.input, "r").read())
for item in items:
jsfiles.append({"js":base64.b64decode(item.find('response').text).decode('utf-8',"replace"), "url":item.find('url').text})
return jsfiles
# Method 4 - Folder with a wildcard
if "*" in input:
paths = glob.glob(os.path.abspath(input))
for index, path in enumerate(paths):
paths[index] = "file://%s" % path
return (paths if len(paths) > 0 else parser_error('Input with wildcard does \
not match any files.'))
# Method 5 - Local file
path = "file://%s" % os.path.abspath(input)
return [path if os.path.exists(input) else parser_error("file could not \
be found (maybe you forgot to add http/https).")]
def send_request(url):
'''
Send requests with Requests
'''
q = Request(url)
q.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36')
q.add_header('Accept', 'text/html,\
application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
q.add_header('Accept-Language', 'en-US,en;q=0.8')
q.add_header('Accept-Encoding', 'gzip')
q.add_header('Cookie', args.cookies)
try:
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
response = urlopen(q, timeout=args.timeout, context=sslcontext)
except:
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
response = urlopen(q, timeout=args.timeout, context=sslcontext)
if response.info().get('Content-Encoding') == 'gzip':
data = GzipFile(fileobj=readBytesCustom(response.read())).read()
elif response.info().get('Content-Encoding') == 'deflate':
data = response.read().read()
else:
data = response.read()
return data.decode('utf-8', 'replace')
def getContext(list_matches, content, include_delimiter=0, context_delimiter_str="\n"):
'''
Parse Input
list_matches: list of tuple (link, start_index, end_index)
content: content to search for the context
include_delimiter Set 1 to include delimiter in context
'''
items = []
for m in list_matches:
match_str = m[0]
match_start = m[1]
match_end = m[2]
context_start_index = match_start
context_end_index = match_end
delimiter_len = len(context_delimiter_str)
content_max_index = len(content) - 1
while content[context_start_index] != context_delimiter_str and context_start_index > 0:
context_start_index = context_start_index - 1
while content[context_end_index] != context_delimiter_str and context_end_index < content_max_index:
context_end_index = context_end_index + 1
if include_delimiter:
context = content[context_start_index: context_end_index]
else:
context = content[context_start_index + delimiter_len: context_end_index]
item = {
"link": match_str,
"context": context
}
items.append(item)
return items
def parser_file(content, regex_str, mode=1, more_regex=None, no_dup=1):
'''
Parse Input
content: string of content to be searched
regex_str: string of regex (The link should be in the group(1))
mode: mode of parsing. Set 1 to include surrounding contexts in the result
more_regex: string of regex to filter the result
no_dup: remove duplicated link (context is NOT counted)
Return the list of ["link": link, "context": context]
The context is optional if mode=1 is provided.
'''
global context_delimiter_str
if mode == 1:
# Beautify
if len(content) > 1000000:
content = content.replace(";",";\r\n").replace(",",",\r\n")
else:
content = jsbeautifier.beautify(content)
regex = re.compile(regex_str, re.VERBOSE)
if mode == 1:
all_matches = [(m.group(1), m.start(0), m.end(0)) for m in re.finditer(regex, content)]
items = getContext(all_matches, content, context_delimiter_str=context_delimiter_str)
else:
items = [{"link": m.group(1)} for m in re.finditer(regex, content)]
if no_dup:
# Remove duplication
all_links = set()
no_dup_items = []
for item in items:
if item["link"] not in all_links:
all_links.add(item["link"])
no_dup_items.append(item)
items = no_dup_items
# Match Regex
filtered_items = []
for item in items:
# Remove other capture groups from regex results
if more_regex:
if re.search(more_regex, item["link"]):
filtered_items.append(item)
else:
filtered_items.append(item)
return filtered_items
def cli_output(endpoints):
'''
Output to CLI
'''
for endpoint in endpoints:
print(html.escape(endpoint["link"]).encode(
'ascii', 'ignore').decode('utf8'))
def html_save(html):
'''
Save as HTML file and open in the browser
'''
hide = os.dup(1)
os.close(1)
os.open(os.devnull, os.O_RDWR)
try:
s = Template(open('%s/template.html' % sys.path[0], 'r').read())
text_file = open(args.output, "wb")
text_file.write(s.substitute(content=html).encode('utf8'))
text_file.close()
print("URL to access output: file://%s" % os.path.abspath(args.output))
file = "file:///%s" % os.path.abspath(args.output)
if sys.platform == 'linux' or sys.platform == 'linux2':
subprocess.call(["xdg-open", file])
else:
webbrowser.open(file)
except Exception as e:
print("Output can't be saved in %s \
due to exception: %s" % (args.output, e))
finally:
os.dup2(hide, 1)
def check_url(url):
nopelist = ["node_modules", "jquery.js"]
if url[-3:] == ".js":
words = url.split("/")
for word in words:
if word in nopelist:
return False
if url[:2] == "//":
url = "https:" + url
if url[:4] != "http":
if url[:1] == "/":
url = args.input + url
else:
url = args.input + "/" + url
return url
else:
return False
if __name__ == "__main__":
# Parse command line
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--domain",
help="Input a domain to recursively parse all javascript located in a page",
action="store_true")
parser.add_argument("-i", "--input",
help="Input a: URL, file or folder. \
For folders a wildcard can be used (e.g. '/*.js').",
required="True", action="store")
parser.add_argument("-o", "--output",
help="Where to save the file, \
including file name. Default: output.html",
action="store", default="output.html")
parser.add_argument("-r", "--regex",
help="RegEx for filtering purposes \
against found endpoint (e.g. ^/api/)",
action="store")
parser.add_argument("-b", "--burp",
help="",
action="store_true")
parser.add_argument("-c", "--cookies",
help="Add cookies for authenticated JS files",
action="store", default="")
default_timeout = 10
parser.add_argument("-t", "--timeout",
help="How many seconds to wait for the server to send data before giving up (default: " + str(default_timeout) + " seconds)",
default=default_timeout, type=int, metavar="<seconds>")
args = parser.parse_args()
if args.input[-1:] == "/":
args.input = args.input[:-1]
mode = 1
if args.output == "cli":
mode = 0
# Convert input to URLs or JS files
urls = parser_input(args.input)
# Convert URLs to JS
output = ''
for url in urls:
if not args.burp:
try:
file = send_request(url)
except Exception as e:
parser_error("invalid input defined or SSL error: %s" % e)
else:
file = url['js']
url = url['url']
endpoints = parser_file(file, regex_str, mode, args.regex)
if args.domain:
for endpoint in endpoints:
endpoint = html.escape(endpoint["link"]).encode('ascii', 'ignore').decode('utf8')
endpoint = check_url(endpoint)
if endpoint is False:
continue
print("Running against: " + endpoint)
print("")
try:
file = send_request(endpoint)
new_endpoints = parser_file(file, regex_str, mode, args.regex)
if args.output == 'cli':
cli_output(new_endpoints)
else:
output += '''
<h1>File: <a href="%s" target="_blank" rel="nofollow noopener noreferrer">%s</a></h1>
''' % (html.escape(endpoint), html.escape(endpoint))
for endpoint2 in new_endpoints:
url = html.escape(endpoint2["link"])
header = "<div><a href='%s' class='text'>%s" % (
html.escape(url),
html.escape(url)
)
body = "</a><div class='container'>%s</div></div>" % html.escape(
endpoint2["context"]
)
body = body.replace(
html.escape(endpoint2["link"]),
"<span style='background-color:yellow'>%s</span>" %
html.escape(endpoint2["link"])
)
output += header + body
except Exception as e:
print("Invalid input defined or SSL error for: " + endpoint)
continue
if args.output == 'cli':
cli_output(endpoints)
else:
output += '''
<h1>File: <a href="%s" target="_blank" rel="nofollow noopener noreferrer">%s</a></h1>
''' % (html.escape(url), html.escape(url))
for endpoint in endpoints:
url = html.escape(endpoint["link"])
header = "<div><a href='%s' class='text'>%s" % (
html.escape(url),
html.escape(url)
)
body = "</a><div class='container'>%s</div></div>" % html.escape(
endpoint["context"]
)
body = body.replace(
html.escape(endpoint["link"]),
"<span style='background-color:yellow'>%s</span>" %
html.escape(endpoint["link"])
)
output += header + body
if args.output != 'cli':
html_save(output)
| mit | -3,957,937,958,894,511,600 | 33.70398 | 149 | 0.514587 | false |
ownport/ansiblite | src/ansiblite/utils/path.py | 1 | 2926 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from errno import EEXIST
from ansiblite.errors import AnsibleError
from ansiblite.utils._text import to_bytes, to_native, to_text
__all__ = ['unfrackpath', 'makedirs_safe']
def unfrackpath(path, follow=True):
'''
Returns a path that is free of symlinks (if follow=True), environment variables, relative path traversals and symbols (~)
:arg path: A byte or text string representing a path to be canonicalized
:arg follow: A boolean to indicate of symlinks should be resolved or not
:raises UnicodeDecodeError: If the canonicalized version of the path
contains non-utf8 byte sequences.
:rtype: A text string (unicode on pyyhon2, str on python3).
:returns: An absolute path with symlinks, environment variables, and tilde
expanded. Note that this does not check whether a path exists.
example::
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
if follow:
final_path = os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))))
else:
final_path = os.path.normpath(os.path.abspath(os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))))
return to_text(final_path, errors='surrogate_or_strict')
def makedirs_safe(path, mode=None):
'''Safe way to create dirs in muliprocess/thread environments.
:arg path: A byte or text string representing a directory to be created
:kwarg mode: If given, the mode to set the directory to
:raises AnsibleError: If the directory cannot be created and does not already exists.
:raises UnicodeDecodeError: if the path is not decodable in the utf-8 encoding.
'''
rpath = unfrackpath(path)
b_rpath = to_bytes(rpath)
if not os.path.exists(b_rpath):
try:
if mode:
os.makedirs(b_rpath, mode)
else:
os.makedirs(b_rpath)
except OSError as e:
if e.errno != EEXIST:
raise AnsibleError("Unable to create local directories(%s): %s" % (to_native(rpath), to_native(e)))
| gpl-3.0 | -6,000,778,393,953,600,000 | 40.211268 | 141 | 0.698906 | false |
rigdenlab/SIMBAD | simbad/command_line/simbad_full.py | 1 | 6513 | #!/usr/bin/env python
__author__ = "Adam Simpkin, and Felix Simkovic"
__contributing_authors__ = "Jens Thomas, and Ronan Keegan"
__credits__ = "Daniel Rigden, William Shepard, Charles Ballard, Villi Uski, and Andrey Lebedev"
__date__ = "05 May 2017"
__email__ = "[email protected]"
__version__ = "0.1"
import argparse
import os
import sys
from pyjob.stopwatch import StopWatch
import simbad.command_line
import simbad.exit
import simbad.util
import simbad.util.logging_util
import simbad.util.pyrvapi_results
logger = None
def simbad_argparse():
"""Create the argparse options"""
p = argparse.ArgumentParser(
description="SIMBAD: Sequence Independent Molecular replacement Based on Available Database",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
simbad.command_line._argparse_core_options(p)
simbad.command_line._argparse_job_submission_options(p)
simbad.command_line._argparse_contaminant_options(p)
simbad.command_line._argparse_morda_options(p)
simbad.command_line._argparse_lattice_options(p)
simbad.command_line._argparse_rot_options(p)
simbad.command_line._argparse_mr_options(p)
simbad.command_line._argparse_mtz_options(p)
p.add_argument('mtz', help="The path to the input mtz file")
return p
def main():
"""Main SIMBAD routine"""
args = simbad_argparse().parse_args()
args.work_dir = simbad.command_line.get_work_dir(
args.run_dir, work_dir=args.work_dir, ccp4_jobid=args.ccp4_jobid, ccp4i2_xml=args.ccp4i2_xml
)
log_file = os.path.join(args.work_dir, 'simbad.log')
debug_log_file = os.path.join(args.work_dir, 'debug.log')
global logger
logger = simbad.util.logging_util.setup_logging(args.debug_lvl, logfile=log_file, debugfile=debug_log_file)
if not os.path.isfile(args.amore_exe):
raise OSError("amore executable not found")
gui = simbad.util.pyrvapi_results.SimbadOutput(
args.rvapi_document, args.webserver_uri, args.display_gui, log_file, args.work_dir, ccp4i2_xml=args.ccp4i2_xml, tab_prefix=args.tab_prefix
)
simbad.command_line.print_header()
logger.info("Running in directory: %s\n", args.work_dir)
stopwatch = StopWatch()
stopwatch.start()
end_of_cycle, solution_found, all_results = False, False, {}
while not (solution_found or end_of_cycle):
# =====================================================================================
# Perform the lattice search
solution_found = simbad.command_line._simbad_lattice_search(args)
logger.info("Lattice search completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.lap.time_pretty)
if solution_found and not args.process_all:
logger.info(
"Lucky you! SIMBAD worked its charm and found a lattice match for you.")
continue
elif solution_found and args.process_all:
logger.info(
"SIMBAD thinks it has found a solution however process_all is set, continuing to contaminant search")
else:
logger.info("No results found - lattice search was unsuccessful")
if args.output_pdb and args.output_mtz:
csv = os.path.join(args.work_dir, 'latt/lattice_mr.csv')
all_results['latt'] = simbad.util.result_by_score_from_csv(csv, 'final_r_free', ascending=True)
gui.display_results(False, args.results_to_display)
# =====================================================================================
# Perform the contaminant search
solution_found = simbad.command_line._simbad_contaminant_search(args)
logger.info("Contaminant search completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.lap.time_pretty)
if solution_found and not args.process_all:
logger.info(
"Check you out, crystallizing contaminants! But don't worry, SIMBAD figured it out and found a solution.")
continue
elif solution_found and args.process_all:
logger.info(
"SIMBAD thinks it has found a solution however process_all is set, continuing to morda search")
else:
logger.info(
"No results found - contaminant search was unsuccessful")
if args.output_pdb and args.output_mtz:
csv = os.path.join(args.work_dir, 'cont/cont_mr.csv')
all_results['cont'] = simbad.util.result_by_score_from_csv(csv, 'final_r_free', ascending=True)
gui.display_results(False, args.results_to_display)
# =====================================================================================
# Perform the morda search
solution_found = simbad.command_line._simbad_morda_search(args)
logger.info("Full MoRDa domain search completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.lap.time_pretty)
if solution_found:
logger.info("... and SIMBAD worked once again. Get in!")
continue
else:
logger.info("No results found - full search was unsuccessful")
if args.output_pdb and args.output_mtz:
csv = os.path.join(args.work_dir, 'morda/morda_mr.csv')
all_results['morda'] = simbad.util.result_by_score_from_csv(csv, 'final_r_free', ascending=True)
gui.display_results(False, args.results_to_display)
# =====================================================================================
# Make sure we only run the loop once for now
end_of_cycle = True
if len(all_results) >= 1:
if sys.version_info.major == 3:
sorted_results = sorted(all_results.items(), key=lambda kv: (kv[1], kv))
else:
sorted_results = sorted(all_results.iteritems(), key=lambda kv: (kv[1], kv))
result = sorted_results[0][1]
simbad.util.output_files(args.work_dir, result, args.output_pdb, args.output_mtz)
stopwatch.stop()
logger.info("All processing completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.time_pretty)
gui.display_results(True, args.results_to_display)
if args.rvapi_document:
gui.save_document()
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.NOTSET)
try:
main()
except Exception:
simbad.exit.exit_error(*sys.exc_info())
| bsd-3-clause | 6,700,612,757,377,132,000 | 39.70625 | 146 | 0.614617 | false |
mdavidsaver/spicetools | spicetools/bench/fileframe.py | 1 | 2613 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Michael Davidsaver
License is GPL3+, see file LICENSE for details
"""
import logging
_log=logging.getLogger(__name__)
import os, os.path
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
from .fileframe_ui import Ui_FileFrame
class FileFrame(QtGui.QFrame):
fileChanged = QtCore.pyqtSignal(QtCore.QString)
typeChanged = QtCore.pyqtSignal(bool)
def __init__(self, parent):
super(FileFrame, self).__init__(parent)
self.ui = Ui_FileFrame()
self.ui.setupUi(self)
self.dia = QtGui.QFileDialog(self, "Select Net of Schem.",
os.getcwd(),
"Net/Schem. (*.net *.sch);;All (*)")
self.dia.fileSelected.connect(self.setFile)
self.ui.fileBox.activated.connect(self._fileChange)
self.ui.typeBox.currentIndexChanged.connect(self._typeChanged)
self.ui.fileBtn.clicked.connect(self._select_existing)
A = QtGui.QAction("&Create file", self.ui.fileBtn)
self.ui.fileBtn.addAction(A)
A.activated.connect(self._select_new)
A = QtGui.QAction("S&elect file", self.ui.fileBtn)
A.activated.connect(self._select_existing)
self.ui.fileBtn.addAction(A)
def _select_existing(self):
self.dia.setFileMode(self.dia.ExistingFile)
self.dia.setAcceptMode(self.dia.AcceptOpen)
self.dia.exec_()
def _select_new(self):
self.dia.setFileMode(self.dia.AnyFile)
self.dia.setAcceptMode(self.dia.AcceptSave)
R = self.dia.exec_()
if not R:
return
F = str(self.dia.selectedFiles()[0])
_log.info("Create %s", F)
with open(F, 'w') as F:
pass # create empty file
def clear(self):
self.setFile('')
self.setType(True)
def _fileChange(self):
self.fileChanged.emit(self.ui.fileBox.currentText())
def _typeChanged(self, i):
self.typeChanged.emit(i==1)
def setFile(self, fname):
self.dia.selectFile(fname)
self.ui.fileBox.setEditText(fname)
self.fileChanged.emit(fname)
def setType(self, B):
self.ui.typeBox.setCurrentIndex(1 if B else 0)
def file(self):
return self.ui.fileBox.currentText()
def type(self):
return self.ui.typeBox.currentIndex()==1
file = QtCore.pyqtProperty(QtCore.QString, file, setFile,
notify=fileChanged)
type = QtCore.pyqtProperty(bool, type, setType,
notify=typeChanged)
| gpl-3.0 | -8,681,363,298,104,596,000 | 28.359551 | 73 | 0.606965 | false |
zathras777/pywind | pywind/ofgem/objects.py | 1 | 8218 | # coding=utf-8
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
from datetime import datetime
from pprint import pprint
from pywind.utils import map_xml_to_dict
class OfgemObjectBase(object):
XML_MAPPING = None
def __init__(self, node):
""" Extract information from the supplied XML node.
The factor figure is MWh per certificate.
"""
if self.XML_MAPPING is None:
raise NotImplementedError("Child classes should define their XML_MAPPING")
self.attrs = map_xml_to_dict(node, self.XML_MAPPING)
# pprint(self.attrs)
def __getattr__(self, item):
if item in self.attrs:
return self.attrs[item]
raise AttributeError(item)
def as_row(self):
"""
Return the information in correct format for :func:`rows()` usage
:returns: Formatted attribute dict
:rtype: dict
"""
return {'@{}'.format(key): self.attrs[key] for key in self.attrs.keys()}
class Certificates(OfgemObjectBase):
""" Certificate Number Fact Sheet
https://www.ofgem.gov.uk/sites/default/files/docs/roc_identifier_fact_sheet_dec_2015.pdf
"""
XML_MAPPING = (
('textbox4', 'generator_id'),
('textbox13', 'name'),
('textbox5', 'scheme'),
('textbox19', 'capacity', 'float', 0.0),
('textbox12', 'country'),
('textbox15', 'technology'),
('textbox31', 'generation_type'),
('textbox18', 'period'),
('textbox21', 'certs', 'int', 0),
('textbox24', 'start_no'),
('textbox27', 'finish_no'),
('textbox37', 'factor', 'float', 0.0),
('textbox30', 'issue_dt', 'date'),
('textbox33', 'status'),
('textbox36', 'status_dt', 'date'),
('textbox39', 'current_holder'),
('textbox45', 'reg_no')
)
def __init__(self, node):
OfgemObjectBase.__init__(self, node)
if self.attrs['period'].startswith("01"):
dt = datetime.strptime(self.attrs['period'][:10], '%d/%m/%Y')
self.attrs['period'] = dt.strftime("%b-%Y")
def __str__(self):
return " {} {} {:5d} {}".format(self.issue_dt.strftime("%Y %b %d"), self.start_no,
self.certs, self.current_holder)
@property
def digits(self):
""" Number of digits that store the certificate number.
:rtype: int
"""
return 10 if self.scheme == 'REGO' else 6
@property
def certificates(self):
""" Number of certificates covered by this object.
:rtype: int
"""
return self.finish - self.start + 1
@property
def start(self):
""" Return the numeric start number for the certificates.
Each certificate number contains the station, period and the number of the certificate,
so this function extracts the numeric part.
:returns: Start number of the certificates referenced
:rtype: int
"""
return int(self.start_no[10:10 + self.digits])
@property
def finish(self):
""" Return the numeric finish number for the certificates.
Each certificate number contains the station, period and the number of the certificate,
so this function extracts the numeric part.
:returns: Finish number of the certificates referenced
:rtype: integer
"""
return int(self.finish_no[10:10 + self.digits])
def output_summary(self):
""" Return a string with the output for the certificates.
:rtype: str
"""
perc = (float(self['certs']) / self['capacity']) * 100
return "%s: %s %s vs %s => %.02f%%" % (self.period, self.name, self.certs,
self.capacity, perc)
def station_details(self):
""" Get a dict object with the station information for these certificates.
:returns: Dict with just information relevant to identifying the station
:rtype: dict
"""
rv_dict = {fld: self.attrs[fld] for fld in ['generator_id',
'name',
'scheme',
'capacity',
'country',
'technology']}
rv_dict['output'] = self.output
return rv_dict
@property
def output(self):
""" Calculate the output based on the number of certs issued and factor.
:returns: Numeric output or 0
:rtype: float
"""
return self.certs / self.factor
class Station(OfgemObjectBase):
"""
Store details of a single station using data from Ofgem.
The exposed object makes the individual pieces of data available by \
acting as a dict, i.e.
.. :code::
name = station['name']
The convenience function :func:`as_string` will return a full list of the data \
formatted for display in a terminal.
"""
XML_MAPPING = (
('GeneratorID', 'generator_id'),
('StatusName', 'status'),
('GeneratorName', 'name'),
('SchemeName', 'scheme'),
('Capacity', '', 'float'),
('Country',),
('TechnologyName', 'technology'),
('OutputType', 'output'),
('AccreditationDate', 'accreditation_dt', 'date'),
('CommissionDate', 'commission_dt', 'date'),
('textbox6', 'developer'),
('textbox61', 'developer_address', 'address'),
('textbox65', 'address', 'address'),
('FaxNumber', 'fax')
)
def __init__(self, node):
OfgemObjectBase.__init__(self, node)
# catch/correct some odd results I have observed...
if self.attrs['technology'] is not None and '\n' in self.attrs['technology']:
self.attrs['technology'] = self.attrs['technology'].split('\n')[0]
class CertificateStation(object):
""" We are normally interested in knowing about certificates issued to
a station, so this class attempts to simplify this process.
Once issued all certificates will be accounted for, but the final
owner and status may change. This class attempts to take a bunch of
Certificate objects and simplify them into a final set, with ownership
and status correctly attributed.
"""
def __init__(self, name, g_id, capacity, scheme):
self.name = name
self.generator_id = g_id
self.scheme = scheme
self.capacity = capacity
self.certs = []
def __len__(self):
return len(self.certs)
def __iter__(self):
for c in self.certs:
yield c
def add_cert(self, cert):
self.certs.append(cert)
def as_row(self):
return [cert.as_row() for cert in self.certs]
| unlicense | 3,872,703,660,219,905,000 | 33.970213 | 100 | 0.586152 | false |
robertnishihara/ray | python/ray/autoscaler/_private/aws/utils.py | 1 | 4590 | from collections import defaultdict
from ray.autoscaler._private.cli_logger import cli_logger
import colorful as cf
class LazyDefaultDict(defaultdict):
"""
LazyDefaultDict(default_factory[, ...]) --> dict with default factory
The default factory is call with the key argument to produce
a new value when a key is not present, in __getitem__ only.
A LazyDefaultDict compares equal to a dict with the same items.
All remaining arguments are treated the same as if they were
passed to the dict constructor, including keyword arguments.
"""
def __missing__(self, key):
"""
__missing__(key) # Called by __getitem__ for missing key; pseudo-code:
if self.default_factory is None: raise KeyError((key,))
self[key] = value = self.default_factory(key)
return value
"""
self[key] = self.default_factory(key)
return self[key]
def handle_boto_error(exc, msg, *args, **kwargs):
if cli_logger.old_style:
# old-style logging doesn't do anything here
# so we exit early
return
error_code = None
error_info = None
# todo: not sure if these exceptions always have response
if hasattr(exc, "response"):
error_info = exc.response.get("Error", None)
if error_info is not None:
error_code = error_info.get("Code", None)
generic_message_args = [
"{}\n"
"Error code: {}",
msg.format(*args, **kwargs),
cf.bold(error_code)
]
# apparently
# ExpiredTokenException
# ExpiredToken
# RequestExpired
# are all the same pretty much
credentials_expiration_codes = [
"ExpiredTokenException", "ExpiredToken", "RequestExpired"
]
if error_code in credentials_expiration_codes:
# "An error occurred (ExpiredToken) when calling the
# GetInstanceProfile operation: The security token
# included in the request is expired"
# "An error occurred (RequestExpired) when calling the
# DescribeKeyPairs operation: Request has expired."
token_command = (
"aws sts get-session-token "
"--serial-number arn:aws:iam::" + cf.underlined("ROOT_ACCOUNT_ID")
+ ":mfa/" + cf.underlined("AWS_USERNAME") + " --token-code " +
cf.underlined("TWO_FACTOR_AUTH_CODE"))
secret_key_var = (
"export AWS_SECRET_ACCESS_KEY = " + cf.underlined("REPLACE_ME") +
" # found at Credentials.SecretAccessKey")
session_token_var = (
"export AWS_SESSION_TOKEN = " + cf.underlined("REPLACE_ME") +
" # found at Credentials.SessionToken")
access_key_id_var = (
"export AWS_ACCESS_KEY_ID = " + cf.underlined("REPLACE_ME") +
" # found at Credentials.AccessKeyId")
# fixme: replace with a Github URL that points
# to our repo
aws_session_script_url = ("https://gist.github.com/maximsmol/"
"a0284e1d97b25d417bd9ae02e5f450cf")
cli_logger.verbose_error(*generic_message_args)
cli_logger.verbose(vars(exc))
cli_logger.panic("Your AWS session has expired.")
cli_logger.newline()
cli_logger.panic("You can request a new one using")
cli_logger.panic(cf.bold(token_command))
cli_logger.panic("then expose it to Ray by setting")
cli_logger.panic(cf.bold(secret_key_var))
cli_logger.panic(cf.bold(session_token_var))
cli_logger.panic(cf.bold(access_key_id_var))
cli_logger.newline()
cli_logger.panic("You can find a script that automates this at:")
cli_logger.panic(cf.underlined(aws_session_script_url))
# Do not re-raise the exception here because it looks awful
# and we already print all the info in verbose
cli_logger.abort()
# todo: any other errors that we should catch separately?
cli_logger.panic(*generic_message_args)
cli_logger.newline()
with cli_logger.verbatim_error_ctx("Boto3 error:"):
cli_logger.verbose("{}", str(vars(exc)))
cli_logger.panic("{}", str(exc))
cli_logger.abort()
def boto_exception_handler(msg, *args, **kwargs):
# todo: implement timer
class ExceptionHandlerContextManager():
def __enter__(self):
pass
def __exit__(self, type, value, tb):
import botocore
if type is botocore.exceptions.ClientError:
handle_boto_error(value, msg, *args, **kwargs)
return ExceptionHandlerContextManager()
| apache-2.0 | 369,375,581,428,346,560 | 34.859375 | 78 | 0.621133 | false |
zaneveld/picrust | picrust/util.py | 1 | 16630 | #!/usr/bin/env python
# File created on 23 Nov 2011
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2015, The PICRUSt Project"
__credits__ = ["Greg Caporaso", "Morgan Langille", "Daniel McDonald"]
__license__ = "GPL"
__version__ = "1.1.0"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
__status__ = "Development"
from json import dumps
from os.path import abspath, dirname, isdir
from os import makedirs
from cogent.core.tree import PhyloNode, TreeError
from numpy import array, asarray, atleast_1d
from biom import Table, parse_table
from biom.table import vlen_list_of_str_formatter
from biom.util import biom_open, HAVE_H5PY
from subprocess import Popen, PIPE
import StringIO
def make_sample_transformer(scaling_factors):
def transform_sample(sample_value,sample_id,sample_metadata):
scaling_factor = scaling_factors[sample_id]
new_val = sample_value * scaling_factor
return new_val
return transform_sample
def scale_metagenomes(metagenome_table,scaling_factors):
""" scale metagenomes from metagenome table and scaling factors
"""
transform_sample_f = make_sample_transformer(scaling_factors)
new_metagenome_table = metagenome_table.transform(transform_sample_f)
return new_metagenome_table
def convert_precalc_to_biom(precalc_in, ids_to_load=None,transpose=True,md_prefix='metadata_'):
"""Loads PICRUSTs tab-delimited version of the precalc file and outputs a BIOM object"""
#if given a string convert to a filehandle
if type(precalc_in) ==str or type(precalc_in) == unicode:
fh = StringIO.StringIO(precalc_in)
else:
fh=precalc_in
#first line has to be header
header_ids=fh.readline().strip().split('\t')
col_meta_locs={}
for idx,col_id in enumerate(header_ids):
if col_id.startswith(md_prefix):
col_meta_locs[col_id[len(md_prefix):]]=idx
end_of_data=len(header_ids)-len(col_meta_locs)
trait_ids = header_ids[1:end_of_data]
col_meta=[]
row_meta=[{} for i in trait_ids]
if ids_to_load is not None and len(ids_to_load) > 0:
ids_to_load=set(ids_to_load)
load_all_ids=False
else:
load_all_ids=True
matching=[]
otu_ids=[]
for line in fh:
fields = line.strip().split('\t')
row_id=fields[0]
if(row_id.startswith(md_prefix)):
#handle metadata
#determine type of metadata (this may not be perfect)
metadata_type=determine_metadata_type(line)
for idx,trait_name in enumerate(trait_ids):
row_meta[idx][row_id[len(md_prefix):]]=parse_metadata_field(fields[idx+1],metadata_type)
elif load_all_ids or (row_id in set(ids_to_load)):
otu_ids.append(row_id)
matching.append(map(float,fields[1:end_of_data]))
#add metadata
col_meta_dict={}
for meta_name in col_meta_locs:
col_meta_dict[meta_name]=fields[col_meta_locs[meta_name]]
col_meta.append(col_meta_dict)
if not load_all_ids:
ids_to_load.remove(row_id)
if not otu_ids:
raise ValueError,"No OTUs match identifiers in precalculated file. PICRUSt requires an OTU table reference/closed picked against GreenGenes.\nExample of the first 5 OTU ids from your table: {0}".format(', '.join(list(ids_to_load)[:5]))
if ids_to_load:
raise ValueError,"One or more OTU ids were not found in the precalculated file!\nAre you using the correct --gg_version?\nExample of (the {0}) unknown OTU ids: {1}".format(len(ids_to_load),', '.join(list(ids_to_load)[:5]))
#note that we transpose the data before making biom obj
matching = asarray(matching)
if transpose:
return Table(matching.T, trait_ids, otu_ids, row_meta, col_meta,
type='Gene table')
else:
return Table(matching, otu_ids, trait_ids, col_meta, row_meta,
type='Gene table')
def convert_biom_to_precalc(biom_table):
"""Converts a biom table into a PICRUSt precalculated tab-delimited file """
col_ids = biom_table.ids(axis='observation')
row_ids = biom_table.ids()
lines = []
header = ['#OTU_IDs'] + list(col_ids)
col_metadata_names = []
# peak at metadata for Samples (e.g. NSTI) so we can set the header
if biom_table.metadata():
col_metadata_names = biom_table.metadata()[0].keys()
#add the metadata names to the header
for col_metadata_name in col_metadata_names:
header.append('metadata_' + col_metadata_name)
lines.append(map(str, header))
row_metadata_names = []
# peak at metadata for observations (e.g. KEGG_Pathways)
if biom_table.metadata(axis='observation'):
row_metadata_names = biom_table.metadata(axis='observation')[0].keys()
for metadata_name in row_metadata_names:
metadata_line = ['metadata_' + metadata_name]
# do the observation metadata now
for col_id in col_ids:
metadata = biom_table.metadata(axis='observation')[biom_table.index(col_id, axis='observation')]
metadata_line.append(biom_meta_to_string(metadata[metadata_name]))
lines.append(map(str, metadata_line))
# transpose the actual count data
transposed_table = biom_table._data.T
for idx, count in enumerate(transposed_table.toarray()):
line = [row_ids[idx]] + map(str, count)
# add the metadata values to the end of the row now
for meta_name in col_metadata_names:
line.append(biom_table.metadata()[idx][meta_name])
lines.append(line)
return "\n".join("\t".join(map(str, x)) for x in lines)
def determine_metadata_type(line):
if ';' in line:
if '|' in line:
return 'list_of_lists'
else:
return 'list'
else:
return 'string'
def parse_metadata_field(metadata_str,metadata_format='string'):
if metadata_format == 'string':
return metadata_str
elif metadata_format == 'list':
return [e.strip() for e in metadata_str.split(';')]
elif metadata_format == 'list_of_lists':
return [[e.strip() for e in y.split(';')] for y in metadata_str.split('|')]
def biom_meta_to_string(metadata):
""" Determine which format the metadata is and then convert to a string"""
#Note that since ';' and '|' are used as seperators we must replace them if they exist
if type(metadata) ==str or type(metadata)==unicode:
return metadata.replace(';',':')
elif type(metadata) == list:
if type(metadata[0]) == list:
return "|".join(";".join([y.replace(';',':').replace('|',':') for y in x]) for x in metadata)
else:
return ";".join(x.replace(';',':') for x in metadata)
def system_call(cmd, shell=True):
"""Call cmd and return (stdout, stderr, return_value).
cmd can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
Please see Python's subprocess.Popen for a description of the shell
parameter and how cmd is interpreted differently based on its value.
This code was copied from QIIME's qiime_system_call() (util.py) function on June 3rd, 2013.
"""
proc = Popen(cmd, shell=shell, universal_newlines=True, stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
def file_contains_nulls(file):
"""Checks given file for null characters. These are sometimes created on SGE clusters when system IO is overloaded."""
return '\x00' in open(file,'rb').read()
def parse_table_to_biom(table_lines, table_format="tab-delimited",\
biom_format = 'otu table'):
"""Read the lines of an open trait table file, and output a .biom table object
The trait table must be either a biom file, or a picrust tab-delimited file
table_format -- must be either 'tab-delimited' or 'biom'
"""
return parse_table(table_lines)
def get_picrust_project_dir():
""" Returns the top-level PICRUST directory
"""
# Get the full path of util.py
current_file_path = abspath(__file__)
# Get the directory containing util.py
current_dir_path = dirname(current_file_path)
# Return the directory containing the directory containing util.py
return dirname(current_dir_path)
def transpose_trait_table_fields(data_fields,header,id_row_idx=0,\
input_header_delimiter="\t",output_delimiter="\t"):
"""Transpose the fields of a trait table, returning new data_fields,header
data_fields: list of lists for data fields
header: a string describing the header_line
id_row_idx: index of row labels. Almost always 0 but included for
but included for completeness
input_header_delimiter: delimiter for fields in the header string
output_delimiter: use this delimiter to join header fields
NOTE: typically the header and data fields are generated
by parse_trait_table in picrust.parse
"""
header_fields = header.split(input_header_delimiter)
# ensure no trailing newlines
old_header_fields = [h.strip() for h in header_fields]
new_header_fields = [old_header_fields[0]] + \
[df[id_row_idx].strip() for df in data_fields]
non_label_data_fields = []
for row in data_fields:
non_label_fields = [e for i, e in enumerate(row) if i != id_row_idx]
non_label_data_fields.append(non_label_fields)
data_array = array(non_label_data_fields)
new_data_array = data_array.T
new_rows = []
for i,row in enumerate(new_data_array):
label = old_header_fields[i+1]
# this is i+1 not i because i is the blank/meaningless
# upper left corner entry.
new_row = [label] + list(row)
new_rows.append(new_row)
new_header = output_delimiter.join(new_header_fields)
return new_header + "\n", new_rows
def make_output_dir_for_file(filepath):
"""Create sub-directories for a new file if they don't already exist"""
dirpath = dirname(filepath)
if not isdir(dirpath) and not dirpath == '':
makedirs(dirpath)
def write_biom_table(biom_table, biom_table_fp, compress=True,
write_hdf5=HAVE_H5PY, format_fs=None):
"""Writes a BIOM table to the specified filepath
Parameters
----------
biom_table : biom.Table
The table object to write out
biom_table_fp : str
The path to the output file
compress : bool, optional
Defaults to ``True``. If True, built-in compression on the output HDF5
file will be enabled. This option is only relevant if ``write_hdf5`` is
``True``.
write_hdf5 : bool, optional
Defaults to ``True`` if H5PY is installed and to ``False`` if H5PY is
not installed. If ``True`` the output biom table will be written as an
HDF5 binary file, otherwise it will be a JSON string.
format_fs : dict, optional
Formatting functions to be passed to `Table.to_hdf5`
Notes
-----
This code was adapted from QIIME 1.9
"""
generated_by = "PICRUSt " + __version__
if write_hdf5:
with biom_open(biom_table_fp, 'w') as biom_file:
biom_table.to_hdf5(biom_file, generated_by, compress,
format_fs=format_fs)
else:
with open(biom_table_fp, 'w') as biom_file:
biom_table.to_json(generated_by, biom_file)
def make_output_dir(dirpath, strict=False):
"""Make an output directory if it doesn't exist
Returns the path to the directory
dirpath -- a string describing the path to the directory
strict -- if True, raise an exception if dir already
exists
"""
dirpath = abspath(dirpath)
#Check if directory already exists
if isdir(dirpath):
if strict == True:
err_str = "Directory '%s' already exists" % dirpath
raise IOError(err_str)
return dirpath
try:
makedirs(dirpath)
except IOError,e:
err_str = "Could not create directory '%s'. Are permissions set correctly? Got error: '%s'" %e
raise IOError(err_str)
return dirpath
class PicrustNode(PhyloNode):
def multifurcating(self, num, eps=None, constructor=None):
"""Return a new tree with every node having num or few children
num : the number of children a node can have max
eps : default branch length to set if self or constructor is of
PhyloNode type
constructor : a TreeNode or subclass constructor. If None, uses self
"""
if num < 2:
raise TreeError, "Minimum number of children must be >= 2"
if eps is None:
eps = 0.0
if constructor is None:
constructor = self.__class__
if hasattr(constructor, 'Length'):
set_branchlength = True
else:
set_branchlength = False
new_tree = self.copy()
for n in new_tree.preorder(include_self=True):
while len(n.Children) > num:
new_node = constructor(Children=n.Children[-num:])
if set_branchlength:
new_node.Length = eps
n.append(new_node)
return new_tree
def bifurcating(self, eps=None, constructor=None):
"""Wrap multifurcating with a num of 2"""
return self.multifurcating(2, eps, constructor)
def nameUnnamedNodes(self):
"""sets the Data property of unnamed nodes to an arbitrary value
Internal nodes are often unnamed and so this function assigns a
value for referencing.
Note*: This method is faster then pycogent nameUnnamedNodes()
because it uses a dict instead of an array. Also, we traverse
only over internal nodes (and not including tips)
"""
#make a list of the names that are already in the tree
names_in_use = {}
for node in self.iterNontips(include_self=True):
if node.Name:
names_in_use[node.Name]=1
#assign unique names to the Data property of nodes where Data = None
name_index = 1
for node in self.iterNontips(include_self=True):
#if (not node.Name) or re.match('edge',node.Name):
if not node.Name:
new_name = 'node' + str(name_index)
#choose a new name if name is already in tree
while new_name in names_in_use:
name_index += 1
new_name = 'node' + str(name_index)
node.Name = new_name
names_in_use[node.Name]=1
name_index += 1
def getSubTree(self,names):
"""return a new subtree with just the tips in names
assumes names is a set
assumes all names in names are present as tips in tree
"""
tcopy = self.deepcopy()
while len(tcopy.tips()) != len(names):
# for each tip, remove it if we do not want to keep it
for n in tcopy.tips():
if n.Name not in names:
n.Parent.removeNode(n)
# reduce single-child nodes
tcopy.prune()
return tcopy
def list_of_list_of_str_formatter(grp, header, md, compression):
"""Serialize [[str]] into a BIOM hdf5 compatible form
Parameters
----------
grp : h5py.Group
This is ignored. Provided for passthrough
header : str
The key in each dict to pull out
md : list of dict
The axis metadata
compression : bool
Whether to enable dataset compression. This is ignored, provided for
passthrough
Returns
-------
grp : h5py.Group
The h5py.Group
header : str
The key in each dict to pull out
md : list of dict
The modified metadata that can be formatted in hdf5
compression : bool
Whether to enable dataset compression.
Notes
-----
This method is intended to be a "passthrough" to BIOM's
vlen_list_of_str_formatter method. It is a transform method.
"""
new_md = [{header: atleast_1d(asarray(dumps(m[header])))} for m in md]
return (grp, header, new_md, compression)
def picrust_formatter(*args):
"""Transform, and format"""
return vlen_list_of_str_formatter(*list_of_list_of_str_formatter(*args))
| gpl-3.0 | 4,356,464,400,882,689,000 | 33.936975 | 243 | 0.632111 | false |
certik/chess | common/appenginepatch/appenginepatcher/patch.py | 1 | 9983 | # -*- coding: utf-8 -*-
from google.appengine.ext import db
import logging, os, sys
def patch_all():
patch_python()
patch_app_engine()
patch_django()
setup_logging()
def patch_python():
# Remove modules that we want to override
for module in ('httplib', 'urllib', 'urllib2', 'memcache',):
if module in sys.modules:
del sys.modules[module]
# For some reason the imp module can't be replaced via sys.path
from appenginepatcher import have_appserver
if have_appserver:
from appenginepatcher import imp
sys.modules['imp'] = imp
# Add fake error and gaierror to socket module. Required for boto support.
import socket
class error(Exception):
pass
class gaierror(Exception):
pass
socket.error = error
socket.gaierror = gaierror
if have_appserver:
def unlink(_):
raise NotImplementedError('App Engine does not support FS writes!')
os.unlink = unlink
def patch_app_engine():
# This allows for using Paginator on a Query object. We limit the number
# of results to 301, so there won't be any timeouts (301 because you can
# say "more than 300 results").
def __len__(self):
return self.count(301)
db.Query.__len__ = __len__
# Add "model" property to Query (needed by generic views)
class ModelProperty(object):
def __get__(self, query, unused):
try:
return query._Query__model_class
except:
return query._model_class
db.Query.model = ModelProperty()
# Add a few Model methods that are needed for serialization
def _get_pk_val(self):
return unicode(self.key())
db.Model._get_pk_val = _get_pk_val
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._get_pk_val() == other._get_pk_val()
db.Model.__eq__ = __eq__
def __ne__(self, other):
return not self.__eq__(other)
db.Model.__ne__ = __ne__
# Make Property more Django-like (needed for serialization)
db.Property.serialize = True
db.Property.rel = None
class Relation(object):
field_name = 'key_name'
db.ReferenceProperty.rel = Relation
# Add repr to make debugging a little bit easier
def __repr__(self):
d = dict([(k, getattr(self, k)) for k in self.properties()])
return '%s(**%s)' % (self.__class__.__name__, repr(d))
db.Model.__repr__ = __repr__
# Replace save() method with one that calls put(), so a monkey-patched
# put() will also work if someone uses save()
def save(self):
return self.put()
db.Model.save = save
# Add _meta to Model, so porting code becomes easier (generic views,
# xheaders, and serialization depend on it).
class _meta(object):
many_to_many = []
class pk:
name = 'key_name'
def __init__(self, model):
self.app_label = model.__module__.split('.')[-2]
self.object_name = model.__name__
self.module_name = self.object_name.lower()
self.verbose_name = self.object_name.lower()
self.verbose_name_plural = None
self.abstract = False
self.model = model
def __str__(self):
return '%s.%s' % (self.app_label, self.module_name)
@property
def local_fields(self):
return self.model.properties().values()
# Register models with Django
old_init = db.PropertiedClass.__init__
def __init__(cls, name, bases, attrs):
"""Creates a combined appengine and Django model.
The resulting model will be known to both the appengine libraries and
Django.
"""
cls._meta = _meta(cls)
cls._default_manager = cls
old_init(cls, name, bases, attrs)
from django.db.models.loading import register_models
register_models(cls._meta.app_label, cls)
db.PropertiedClass.__init__ = __init__
def log_exception(*args, **kwargs):
logging.exception('Exception in request:')
def patch_django():
# In order speed things up and consume less memory we lazily replace
# modules if possible. This requires some __path__ magic. :)
# Add fake 'appengine' DB backend
# This also creates a separate datastore for each project.
from appenginepatcher.db_backends import appengine
sys.modules['django.db.backends.appengine'] = appengine
base_path = os.path.abspath(os.path.dirname(__file__))
# Replace generic views
from django.views import generic
generic.__path__.insert(0, os.path.join(base_path, 'generic_views'))
# Replace db session backend and tests
from django.contrib import sessions
sessions.__path__.insert(0, os.path.join(base_path, 'sessions'))
from django.contrib.sessions import backends
backends.__path__.insert(0, os.path.join(base_path, 'session_backends'))
# Replace the dispatchers.
from django.core import signals
# Log errors.
signals.got_request_exception.connect(log_exception)
# Unregister the rollback event handler.
import django.db
signals.got_request_exception.disconnect(django.db._rollback_on_exception)
# Replace auth models
# This MUST happen before any other modules import User or they'll
# get Django's original User model!!!
from appenginepatcher.auth import models
sys.modules['django.contrib.auth.models'] = models
# Replace rest of auth app
from django.contrib import auth
auth.__path__.insert(0, os.path.join(base_path, 'auth'))
# Replace ModelForm
# This MUST happen as early as possible, but after User got replaced!
from google.appengine.ext.db import djangoforms as aeforms
from django import forms
from django.forms import models as modelforms
forms.ModelForm = modelforms.ModelForm = aeforms.ModelForm
forms.ModelFormMetaclass = aeforms.ModelFormMetaclass
modelforms.ModelFormMetaclass = aeforms.ModelFormMetaclass
# Fix handling of verbose_name. Google resolves lazy translation objects
# immedately which of course breaks translation support.
from django.utils.text import capfirst
def get_form_field(self, form_class=forms.CharField, **kwargs):
defaults = {'required': self.required}
if self.verbose_name:
defaults['label'] = capfirst(self.verbose_name)
if self.choices:
choices = []
if not self.required or (self.default is None and
'initial' not in kwargs):
choices.append(('', '---------'))
for choice in self.choices:
choices.append((str(choice), unicode(choice)))
defaults['widget'] = forms.Select(choices=choices)
if self.default is not None:
defaults['initial'] = self.default
defaults.update(kwargs)
return form_class(**defaults)
db.Property.get_form_field = get_form_field
# Extend ModelForm with support for EmailProperty
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an email property."""
defaults = {'form_class': forms.EmailField}
defaults.update(kwargs)
return super(db.EmailProperty, self).get_form_field(**defaults)
db.EmailProperty.get_form_field = get_form_field
# Fix default value of UserProperty (Google resolves the user too early)
def get_form_field(self, **kwargs):
from django.contrib.auth.models import User
from django.utils.functional import lazy
from google.appengine.api import users
defaults = {'initial': lazy(users.GetCurrentUser, User)}
defaults.update(kwargs)
return super(db.UserProperty, self).get_form_field(**defaults)
db.UserProperty.get_form_field = get_form_field
# Replace mail backend
from appenginepatcher import mail as gmail
from django.core import mail
mail.SMTPConnection = gmail.GoogleSMTPConnection
mail.mail_admins = gmail.mail_admins
mail.mail_managers = gmail.mail_managers
# Fix translation support if we're in a zip file. We change the path
# of the django.conf module, so the translation code tries to load
# Django's translations from the common/django-locale/locale folder.
from django import conf
from aecmd import COMMON_DIR
if '.zip' + os.sep in conf.__file__:
conf.__file__ = os.path.join(COMMON_DIR, 'django-locale', 'fake.py')
# Patch login_required if using Google Accounts
from django.conf import settings
if 'ragendja.auth.middleware.GoogleAuthenticationMiddleware' in \
settings.MIDDLEWARE_CLASSES:
from ragendja.auth.decorators import google_login_required, \
redirect_to_google_login
from django.contrib.auth import decorators, views
decorators.login_required = google_login_required
views.redirect_to_login = redirect_to_google_login
# Activate ragendja's GLOBALTAGS support (automatically done on import)
from ragendja import template
# Patch auth forms
from appenginepatcher import auth_forms_patch
# Add XML serializer
if not hasattr(settings, 'SERIALIZATION_MODULES'):
settings.SERIALIZATION_MODULES = {}
for name in ('xml', 'python', 'json', 'yaml'):
settings.SERIALIZATION_MODULES[name] = 'appenginepatcher.serializers.' \
+ name
# Patch DeserializedObject
from django.core.serializers import base
class DeserializedObject(base.DeserializedObject):
def save(self, save_m2m=True):
self.object.save()
self.object._parent = None
base.DeserializedObject = DeserializedObject
def setup_logging():
from django.conf import settings
if settings.DEBUG:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
| mit | 7,800,223,992,368,336,000 | 36.389513 | 80 | 0.650005 | false |
ronin13/pyvolume | pyvolume/sshfs.py | 1 | 4118 | # -*- coding: utf-8 -*-
""" Module providing SSHFileSystem implementation."""
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import os
import os.path
from plumbum import ProcessExecutionError
from plumbum.cmd import sshfs
from plumbum.cmd import sudo
from plumbum.cmd import umount
from pyvolume.exceptions import NeedOptionsException
log = logging.getLogger(__name__)
class SSHFileSystem(object):
"""
Mounts an external directory pointed by `remote_path`
onto `base` (/mnt by default) and passes it to Docker
to use as a volume. Uses vol_dict to keep track of
different volumes.
"""
def __init__(self, base):
self.base = base
self.sshfs_options = [
"-o",
"reconnect,cache_timeout=60,allow_other,uid=1000,gid=1000,intr",
]
self.vol_dict = {}
def create(self, volname, options):
""" Creates the directories but does not mount it yet."""
if "remote_path" not in options:
raise NeedOptionsException("remote_path is a required option for sshfs")
remote_path = options["remote_path"]
local_path = os.path.join(self.base, volname)
log.info("Creating directory " + local_path)
os.mkdir(local_path)
cmdline = []
if "ssh_config" in options:
cmdline += ["-F", options["ssh_config"]]
if "sshfs_options" in options:
sshfs_options = [options["sshfs_options"]]
else:
sshfs_options = self.sshfs_options
cmdline += [remote_path]
cmdline += [local_path]
cmdline += sshfs_options
self.vol_dict[volname] = {
"Local": local_path,
"Remote": remote_path,
"cmdline": cmdline,
"mounted": False,
}
def list(self):
""" Lists the existing volumes being managed."""
vol_list = []
for volumes in self.vol_dict:
vol_list += [volumes]
return vol_list
def mount_check(self, volname):
"""Check if the volume is already mounted.
If mounted, return its path.
"""
if not self.vol_dict[volname]["mounted"]:
log.error("Volume {0} is not mounted".format(volname))
return None
return self.vol_dict[volname]["Local"]
def path(self, volname):
"""Check if the volume is already mounted.
If mounted, return its path.
"""
if not self.mount_check(volname):
return None
return self.vol_dict[volname]["Local"]
def remove(self, volname):
"""
Removes the volume.
It unmounts the remote if necessary, tolerates
if already unmounted.
After which, it removes the mounted directory.
"""
local_path = self.vol_dict[volname]["Local"]
try:
self.umount(volname)
except ProcessExecutionError as e:
if e.retcode != 1:
raise
log.info("Removing local path " + local_path)
if os.path.exists(local_path):
os.rmdir(local_path)
return True
def mount(self, volname):
""" Mount the remote onto local for volname. """
check = self.mount_check(volname)
if check:
return check
cmdline = self.vol_dict[volname]["cmdline"]
mount_cmd = sshfs[cmdline]
mount_cmd()
self.vol_dict[volname]["mounted"] = True
return self.vol_dict[volname]["Local"]
def umount(self, volname):
if not self.mount_check(volname):
return None
local_path = self.vol_dict[volname]["Local"]
umount_cmd = sudo[umount[local_path]]
umount_cmd()
self.vol_dict[volname]["mounted"] = False
return True
def cleanup(self):
""" Unmounts and removes mount paths when shutting down."""
for volume in self.vol_dict:
self.remove(volume)
def scope(self):
""" Returns scope of this - global."""
return "global"
| mit | -1,407,974,807,998,446,800 | 29.279412 | 84 | 0.578679 | false |
PhilLidar-DAD/geonode | geonode/eula/models.py | 1 | 2090 | from django.db import models
from geonode.layers.models import Layer
from geonode.documents.models import Document
from datetime import datetime
from django.utils.translation import ugettext_lazy as _
from geonode.base.models import ResourceBase
from geonode.people.models import OrganizationType
from django_enumfield import enum
try:
from django.conf import settings
User = settings.AUTH_USER_MODEL
except ImportError:
from django.contrib.auth.models import User
from geonode.datarequests.models import LipadOrgType
# Create your models here.
class EULALayerDownload(models.Model):
date_time = models.DateTimeField(default=datetime.now)
user = models.ForeignKey(User, null=False, blank=False)
layer = models.ForeignKey(Layer, null=False, blank=False)
def __unicode__(self):
return "{0}:{1}".format(self.user.username, self.layer.title)
class AnonDownloader(models.Model):
ORG_TYPE_CHOICES = LipadOrgType.objects.values_list('val', 'display_val')
date = models.DateTimeField(auto_now=True)
anon_first_name = models.CharField(_('First Name'), max_length=100)
anon_last_name = models.CharField(_('Last Name'), max_length=100)
anon_email = models.EmailField(_('Email'), max_length=50)
anon_organization = models.CharField(_('Organization'), max_length=100)
anon_purpose = models.CharField(_('Purpose'), max_length=100)
anon_layer = models.CharField(_('Layer Name'), max_length=100, null=True, blank=True,)
anon_orgtype = models.CharField(
_('Organization Type'),
max_length=100,
choices=ORG_TYPE_CHOICES,
default="Other",
help_text='Organization type based on Phil-LiDAR1 Data Distribution Policy'
)
anon_orgother = models.CharField(
_('If Other, please specify'),
max_length=255,
blank=True,
null=True,
)
# anon_resourcebase = models.ForeignKey(ResourceBase, null=True, blank=True, related_name='anon_resourcebase')
anon_document = models.CharField(_('Document Name'), max_length=100, null=True, blank=True,)
| gpl-3.0 | 5,668,980,210,110,807,000 | 42.541667 | 114 | 0.710526 | false |
wikimedia/pywikibot-core | tests/textlib_tests.py | 1 | 76796 | # -*- coding: utf-8 -*-
"""Test textlib module."""
#
# (C) Pywikibot team, 2011-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import codecs
from collections import OrderedDict
import functools
import os
import re
import pywikibot
import pywikibot.textlib as textlib
from pywikibot.textlib import _MultiTemplateMatchBuilder, extract_sections
from pywikibot import config, UnknownSite
from pywikibot.site import _IWEntry
from pywikibot.tools import suppress_warnings
from tests.aspects import (
unittest, require_modules, TestCase, DefaultDrySiteTestCase,
PatchingTestCase, SiteAttributeTestCase,
)
from tests import mock
files = {}
dirname = os.path.join(os.path.dirname(__file__), 'pages')
for f in ['enwiki_help_editing']:
with codecs.open(os.path.join(dirname, f + '.page'),
'r', 'utf-8') as content:
files[f] = content.read()
class TestSectionFunctions(TestCase):
"""Test wikitext section handling function."""
net = False
def setUp(self):
"""Setup tests."""
self.catresult1 = ('[[Category:Cat1]]%(LS)s[[Category:Cat2]]%(LS)s'
% {'LS': config.LS})
super(TestSectionFunctions, self).setUp()
def contains(self, fn, sn):
"""Invoke does_text_contain_section()."""
return textlib.does_text_contain_section(
files[fn], sn)
def assertContains(self, fn, sn, *args, **kwargs):
"""Test that files[fn] contains sn."""
self.assertEqual(self.contains(fn, sn), True, *args, **kwargs)
def assertNotContains(self, fn, sn, *args, **kwargs):
"""Test that files[fn] does not contain sn."""
self.assertEqual(self.contains(fn, sn), False, *args, **kwargs)
def testCurrentBehaviour(self):
"""Test that 'Editing' is found."""
self.assertContains('enwiki_help_editing', 'Editing')
def testSpacesInSection(self):
"""Test with spaces in section."""
self.assertContains('enwiki_help_editing', 'Minor_edits')
self.assertNotContains('enwiki_help_editing', '#Minor edits',
"Incorrect, '#Minor edits' does not work")
self.assertNotContains('enwiki_help_editing', 'Minor Edits',
'section hashes are case-sensitive')
self.assertNotContains('enwiki_help_editing', 'Minor_Edits',
'section hashes are case-sensitive')
@unittest.expectedFailure # TODO: T133276
def test_encoded_chars_in_section(self):
"""Test encoded chars in section."""
self.assertContains(
'enwiki_help_editing', 'Talk_.28discussion.29_pages',
'As used in the TOC')
def test_underline_characters_in_section(self):
"""Test with underline chars in section."""
self.assertContains('enwiki_help_editing', 'Talk_(discussion)_pages',
'Understood by mediawiki')
def test_spaces_outside_section(self):
"""Test with spaces around section."""
self.assertContains('enwiki_help_editing', 'Naming and_moving')
self.assertContains('enwiki_help_editing', ' Naming and_moving ')
self.assertContains('enwiki_help_editing', ' Naming and_moving_')
def test_link_in_section(self):
"""Test with link inside section."""
# section is ==[[Wiki markup]]==
self.assertContains('enwiki_help_editing', '[[Wiki markup]]',
'Link as section header')
self.assertContains('enwiki_help_editing', '[[:Wiki markup]]',
'section header link with preleading colon')
self.assertNotContains('enwiki_help_editing', 'Wiki markup',
'section header must be a link')
# section is ===[[:Help]]ful tips===
self.assertContains('enwiki_help_editing', '[[Help]]ful tips',
'Containing link')
self.assertContains('enwiki_help_editing', '[[:Help]]ful tips',
'Containing link with preleading colon')
self.assertNotContains('enwiki_help_editing', 'Helpful tips',
'section header must contain a link')
class TestFormatInterwiki(TestCase):
"""Test format functions."""
family = 'wikipedia'
code = 'en'
cached = True
def test_interwiki_format_Page(self):
"""Test formatting interwiki links using Page instances."""
interwikis = {
'de': pywikibot.Page(pywikibot.Link('de:German', self.site)),
'fr': pywikibot.Page(pywikibot.Link('fr:French', self.site))
}
self.assertEqual('[[de:German]]%(LS)s[[fr:French]]%(LS)s'
% {'LS': config.LS},
textlib.interwikiFormat(interwikis, self.site))
def test_interwiki_format_Link(self):
"""Test formatting interwiki links using Page instances."""
interwikis = {
'de': pywikibot.Link('de:German', self.site),
'fr': pywikibot.Link('fr:French', self.site),
}
self.assertEqual('[[de:German]]%(LS)s[[fr:French]]%(LS)s'
% {'LS': config.LS},
textlib.interwikiFormat(interwikis, self.site))
class TestFormatCategory(DefaultDrySiteTestCase):
"""Test category formatting."""
catresult = ('[[Category:Cat1]]%(LS)s[[Category:Cat2]]%(LS)s'
% {'LS': config.LS})
def test_category_format_raw(self):
"""Test formatting categories as strings formatted as links."""
self.assertEqual(self.catresult,
textlib.categoryFormat(['[[Category:Cat1]]',
'[[Category:Cat2]]'],
self.site))
def test_category_format_bare(self):
"""Test formatting categories as strings."""
self.assertEqual(self.catresult,
textlib.categoryFormat(['Cat1', 'Cat2'], self.site))
def test_category_format_Category(self):
"""Test formatting categories as Category instances."""
data = [pywikibot.Category(self.site, 'Cat1'),
pywikibot.Category(self.site, 'Cat2')]
self.assertEqual(self.catresult,
textlib.categoryFormat(data, self.site))
def test_category_format_Page(self):
"""Test formatting categories as Page instances."""
data = [pywikibot.Page(self.site, 'Category:Cat1'),
pywikibot.Page(self.site, 'Category:Cat2')]
self.assertEqual(self.catresult,
textlib.categoryFormat(data, self.site))
class TestCategoryRearrangement(DefaultDrySiteTestCase):
"""
Ensure that sorting keys are not being lost.
Tests .getCategoryLinks() and .replaceCategoryLinks(),
with both a newline and an empty string as separators.
"""
old = ('[[Category:Cat1]]%(LS)s[[Category:Cat2|]]%(LS)s'
'[[Category:Cat1| ]]%(LS)s[[Category:Cat2|key]]'
% {'LS': config.LS})
def test_standard_links(self):
"""Test getting and replacing categories."""
cats = textlib.getCategoryLinks(self.old, site=self.site)
new = textlib.replaceCategoryLinks(self.old, cats, site=self.site)
self.assertEqual(self.old, new)
def test_adjoining_links(self):
"""Test getting and replacing adjacent categories."""
cats_std = textlib.getCategoryLinks(self.old, site=self.site)
old = self.old.replace(config.LS, '')
cats = textlib.getCategoryLinks(old, site=self.site)
self.assertEqual(cats_std, cats)
sep = config.LS
config.line_separator = '' # use an empty separator temporarily
new = textlib.replaceCategoryLinks(old, cats, site=self.site)
# Restore the default separator.
config.line_separator = sep
self.assertEqual(old, new)
def test_indentation(self):
"""Test indentation from previous block."""
# Block of text
old = 'Some text%(LS)s%(LS)s' % {'LS': config.LS} + self.old
cats = textlib.getCategoryLinks(old, site=self.site)
new = textlib.replaceCategoryLinks(old, cats, site=self.site)
self.assertEqual(old, new)
# DEFAULTSORT
old_ds = '{{DEFAULTSORT:key}}%(LS)s' % {'LS': config.LS} + self.old
cats_ds = textlib.getCategoryLinks(old_ds, site=self.site)
new_ds = textlib.replaceCategoryLinks(old_ds, cats_ds, site=self.site)
self.assertEqual(old_ds, new_ds)
def test_in_place_replace(self):
"""Test in-place category change is reversible."""
dummy = pywikibot.Category(self.site, 'foo')
dummy.sortKey = 'bah'
cats = textlib.getCategoryLinks(self.old, site=self.site)
# Sanity checking
temp = textlib.replaceCategoryInPlace(self.old, cats[0], dummy,
site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cats[0],
site=self.site)
self.assertEqual(self.old, new)
temp = textlib.replaceCategoryInPlace(self.old, cats[1], dummy,
site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cats[1],
site=self.site)
self.assertEqual(self.old, new)
temp = textlib.replaceCategoryInPlace(self.old, cats[2], dummy,
site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cats[2],
site=self.site)
self.assertEqual(self.old, new)
temp = textlib.replaceCategoryInPlace(self.old, cats[3],
dummy, site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cats[3],
site=self.site)
self.assertEqual(self.old, new)
# Testing removing categories
temp = textlib.replaceCategoryInPlace(self.old, cats[0],
None, site=self.site)
self.assertNotEqual(temp, self.old)
temp_cats = textlib.getCategoryLinks(temp, site=self.site)
self.assertNotIn(cats[0], temp_cats)
# First and third categories are the same
self.assertEqual([cats[1], cats[3]], temp_cats)
# Testing adding categories
temp = textlib.replaceCategoryInPlace(
self.old, cats[0], cats[1], site=self.site,
add_only=True)
self.assertNotEqual(temp, self.old)
temp_cats = textlib.getCategoryLinks(temp, site=self.site)
self.assertEqual([cats[0], cats[1], cats[1],
cats[2], cats[1], cats[3]], temp_cats)
new_cats = textlib.getCategoryLinks(new, site=self.site)
self.assertEqual(cats, new_cats)
def test_in_place_retain_sort(self):
"""Test in-place category change does not alter the sortkey."""
# sort key should be retained when the new cat sortKey is None
dummy = pywikibot.Category(self.site, 'foo')
self.assertIsNone(dummy.sortKey)
cats = textlib.getCategoryLinks(self.old, site=self.site)
self.assertEqual(cats[3].sortKey, 'key')
orig_sortkey = cats[3].sortKey
temp = textlib.replaceCategoryInPlace(self.old, cats[3],
dummy, site=self.site)
self.assertNotEqual(self.old, temp)
new_dummy = textlib.getCategoryLinks(temp, site=self.site)[3]
self.assertIsNotNone(new_dummy.sortKey)
self.assertEqual(orig_sortkey, new_dummy.sortKey)
class TestTemplatesInCategory(TestCase):
"""Tests to verify that templates in category links are handled."""
family = 'wikipedia'
code = 'en'
cached = True
def test_templates(self):
"""Test normal templates inside category links."""
self.site = self.get_site()
self.assertEqual(textlib.getCategoryLinks(
'[[Category:{{P1|Foo}}]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:{{P1|Foo}}|bar]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:{{P1|{{P2|L33t|Foo}}}}|bar]]',
self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:Foo{{!}}bar]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:Foo{{!}}bar]][[Category:Wiki{{P2||pedia}}]]',
self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar'),
pywikibot.page.Category(self.site, 'Wikipedia')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:Foo{{!}}and{{!}}bar]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='and|bar')])
with mock.patch.object(pywikibot, 'warning', autospec=True) as warn:
textlib.getCategoryLinks('[[Category:nasty{{{!}}]]', self.site)
warn.assert_called_once_with(
'Invalid category title extracted: nasty{{{!}}')
class TestTemplateParams(TestCase):
"""Test to verify that template params extraction works."""
net = False
def _common_results(self, func):
"""Common cases."""
self.assertEqual(func('{{a}}'), [('a', OrderedDict())])
self.assertEqual(func('{{ a}}'), [('a', OrderedDict())])
self.assertEqual(func('{{a }}'), [('a', OrderedDict())])
self.assertEqual(func('{{ a }}'), [('a', OrderedDict())])
self.assertEqual(func('{{a|b=c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b|c=d}}'),
[('a', OrderedDict((('1', 'b'), ('c', 'd'))))])
self.assertEqual(func('{{a|b=c|f=g|d=e|1=}}'),
[('a', OrderedDict((('b', 'c'), ('f', 'g'),
('d', 'e'), ('1', ''))))])
self.assertEqual(func('{{a|1=2|c=d}}'),
[('a', OrderedDict((('1', '2'), ('c', 'd'))))])
self.assertEqual(func('{{a|c=d|1=2}}'),
[('a', OrderedDict((('c', 'd'), ('1', '2'))))])
self.assertEqual(func('{{a|5=d|a=b}}'),
[('a', OrderedDict((('5', 'd'), ('a', 'b'))))])
self.assertEqual(func('{{a|=2}}'),
[('a', OrderedDict((('', '2'), )))])
self.assertEqual(func('{{a|}}'), [('a', OrderedDict((('1', ''), )))])
self.assertEqual(func('{{a|=|}}'),
[('a', OrderedDict((('', ''), ('1', ''))))])
self.assertEqual(func('{{a||}}'),
[('a', OrderedDict((('1', ''), ('2', ''))))])
self.assertEqual(func('{{a|b={{{1}}}}}'),
[('a', OrderedDict((('b', '{{{1}}}'), )))])
self.assertEqual(func('{{a|b=<noinclude>{{{1}}}</noinclude>}}'),
[('a', OrderedDict(
(('b', '<noinclude>{{{1}}}</noinclude>'), )))])
self.assertEqual(func('{{subst:a|b=c}}'),
[('subst:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{safesubst:a|b=c}}'),
[('safesubst:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{msgnw:a|b=c}}'),
[('msgnw:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{Template:a|b=c}}'),
[('Template:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{template:a|b=c}}'),
[('template:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{:a|b=c}}'),
[(':a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{subst::a|b=c}}'),
[('subst::a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b={{{1}}}|c={{{2}}}}}'),
[('a', OrderedDict((('b', '{{{1}}}'),
('c', '{{{2}}}'))))])
self.assertEqual(func('{{a|b=c}}{{d|e=f}}'),
[('a', OrderedDict((('b', 'c'), ))),
('d', OrderedDict((('e', 'f'), )))])
# initial '{' and '}' should be ignored as outer wikitext
self.assertEqual(func('{{{a|b}}X}'),
[('a', OrderedDict((('1', 'b'), )))])
# sf.net bug 1575: unclosed template
self.assertEqual(func('{{a'), [])
self.assertEqual(func('{{a}}{{foo|'), [('a', OrderedDict())])
def _unstripped(self, func):
"""Common cases of unstripped results."""
self.assertEqual(func('{{a|b=<!--{{{1}}}-->}}'),
[('a', OrderedDict((('b', '<!--{{{1}}}-->'), )))])
self.assertEqual(func('{{a| }}'),
[('a', OrderedDict((('1', ' '), )))])
self.assertEqual(func('{{a| | }}'),
[('a', OrderedDict((('1', ' '), ('2', ' '))))])
self.assertEqual(func('{{a| =|}}'),
[('a', OrderedDict(((' ', ''), ('1', ''))))])
self.assertEqual(func('{{a| b=c}}'),
[('a', OrderedDict(((' b', 'c'), )))])
self.assertEqual(func('{{a|b =c}}'),
[('a', OrderedDict((('b ', 'c'), )))])
self.assertEqual(func('{{a|b= c}}'),
[('a', OrderedDict((('b', ' c'), )))])
self.assertEqual(func('{{a|b=c }}'),
[('a', OrderedDict((('b', 'c '), )))])
self.assertEqual(func('{{a| foo |2= bar }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' bar '))))])
# The correct entry 'bar' is removed
self.assertEqual(func('{{a| foo |2= bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
# However whitespace prevents the correct item from being removed
self.assertEqual(func('{{a| foo | 2 = bar | baz }}'),
[('a', OrderedDict((('1', ' foo '), (' 2 ', ' bar '),
('2', ' baz '))))])
def _stripped(self, func):
"""Common cases of stripped results."""
self.assertEqual(func('{{a| }}'),
[('a', OrderedDict((('1', ' '), )))])
self.assertEqual(func('{{a| | }}'),
[('a', OrderedDict((('1', ' '), ('2', ' '))))])
self.assertEqual(func('{{a| =|}}'),
[('a', OrderedDict((('', ''), ('1', ''))))])
self.assertEqual(func('{{a| b=c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b =c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b= c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b=c }}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a| foo |2= bar }}'),
[('a', OrderedDict((('1', ' foo '), ('2', 'bar'))))])
# 'bar' is always removed
self.assertEqual(func('{{a| foo |2= bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
self.assertEqual(func('{{a| foo | 2 = bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
def _etp_regex_differs(self, func):
"""Common cases not handled the same by ETP_REGEX."""
# inner {} should be treated as part of the value
self.assertEqual(func('{{a|b={} }}'),
[('a', OrderedDict((('b', '{} '), )))])
def _order_differs(self, func):
"""Common cases where the order of templates differs."""
self.assertCountEqual(func('{{a|b={{c}}}}'),
[('a', OrderedDict((('b', '{{c}}'), ))),
('c', OrderedDict())])
self.assertCountEqual(func('{{a|{{c|d}}}}'),
[('c', OrderedDict((('1', 'd'), ))),
('a', OrderedDict([('1', '{{c|d}}')]))])
# inner '}' after {{b|c}} should be treated as wikitext
self.assertCountEqual(func('{{a|{{b|c}}}|d}}'),
[('a', OrderedDict([('1', '{{b|c}}}'),
('2', 'd')])),
('b', OrderedDict([('1', 'c')]))])
@require_modules('mwparserfromhell')
def test_extract_templates_params_mwpfh(self):
"""Test using mwparserfromhell."""
func = textlib.extract_templates_and_params_mwpfh
self._common_results(func)
self._order_differs(func)
self._unstripped(func)
self._etp_regex_differs(func)
self.assertCountEqual(func('{{a|{{c|{{d}}}}}}'),
[('c', OrderedDict((('1', '{{d}}'), ))),
('a', OrderedDict([('1', '{{c|{{d}}}}')])),
('d', OrderedDict())
])
self.assertCountEqual(func('{{a|{{c|{{d|}}}}}}'),
[('c', OrderedDict((('1', '{{d|}}'), ))),
('a', OrderedDict([('1', '{{c|{{d|}}}}')])),
('d', OrderedDict([('1', '')]))
])
@require_modules('mwparserfromhell')
def test_extract_templates_params_mwpfh_stripped(self):
"""Test using mwparserfromhell with stripping."""
func = functools.partial(textlib.extract_templates_and_params_mwpfh,
strip=True)
self._common_results(func)
self._order_differs(func)
self._stripped(func)
def test_extract_templates_params_regex(self):
"""Test using many complex regexes."""
func = functools.partial(textlib.extract_templates_and_params_regex,
remove_disabled_parts=False, strip=False)
self._common_results(func)
self._order_differs(func)
self._unstripped(func)
self.assertEqual(func('{{a|b={} }}'), []) # FIXME: {} is normal text
def test_extract_templates_params_regex_stripped(self):
"""Test using many complex regexes with stripping."""
func = textlib.extract_templates_and_params_regex
self._common_results(func)
self._order_differs(func)
self._stripped(func)
self.assertEqual(func('{{a|b=<!--{{{1}}}-->}}'),
[('a', OrderedDict((('b', ''), )))])
# Identical to mwpfh
self.assertCountEqual(func('{{a|{{c|{{d}}}}}}'),
[('c', OrderedDict((('1', '{{d}}'), ))),
('a', OrderedDict([('1', '{{c|{{d}}}}')])),
('d', OrderedDict())
])
# However fails to correctly handle three levels of balanced brackets
# with empty parameters
self.assertCountEqual(func('{{a|{{c|{{d|}}}}}}'),
[('c', OrderedDict((('1', '{{d|}}}'), ))),
('d', OrderedDict([('1', '}')]))
])
def test_extract_templates_params(self):
"""Test that the normal entry point works."""
func = functools.partial(textlib.extract_templates_and_params,
remove_disabled_parts=False, strip=False)
self._common_results(func)
self._unstripped(func)
func = functools.partial(textlib.extract_templates_and_params,
remove_disabled_parts=False, strip=True)
self._common_results(func)
self._stripped(func)
def test_template_simple_regex(self):
"""Test using simple regex."""
func = textlib.extract_templates_and_params_regex_simple
self._common_results(func)
self._etp_regex_differs(func)
# The simple regex copies the whitespace of mwpfh, but does
# not have additional entries for nested templates.
self.assertEqual(func('{{a| b={{c}}}}'),
[('a', OrderedDict(((' b', '{{c}}'), )))])
self.assertEqual(func('{{a|b={{c}}}}'),
[('a', OrderedDict((('b', '{{c}}'), )))])
self.assertEqual(func('{{a|b= {{c}}}}'),
[('a', OrderedDict((('b', ' {{c}}'), )))])
self.assertEqual(func('{{a|b={{c}} }}'),
[('a', OrderedDict((('b', '{{c}} '), )))])
# These three are from _order_differs, and while the first works
self.assertEqual(func('{{a|{{c}} }}'),
[('a', OrderedDict((('1', '{{c}} '), )))])
# an inner '|' causes extract_template_and_params_regex_simple to
# split arguments incorrectly in the next two cases.
self.assertEqual(func('{{a|{{c|d}} }}'),
[('a', OrderedDict([('1', '{{c'),
('2', 'd}} ')]))])
self.assertEqual(func('{{a|{{b|c}}}|d}}'),
[('a', OrderedDict([('1', '{{b'),
('2', 'c}}}'),
('3', 'd')]))])
# Safe fallback to handle arbitrary template levels
# by merging top level templates together.
# i.e. 'b' is not recognised as a template, and 'foo' is also
# consumed as part of 'a'.
self.assertEqual(func('{{a|{{c|{{d|{{e|}}}} }} }} foo {{b}}'),
[(None, OrderedDict())])
def test_regexes(self):
"""Test _ETP_REGEX, NESTED_TEMPLATE_REGEX and TEMP_REGEX."""
func = textlib._ETP_REGEX.search
self.assertIsNotNone(func('{{{1}}}'))
self.assertIsNotNone(func('{{a|b={{{1}}} }}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|{{c}} }}'))
self.assertIsNotNone(func('{{a|{{c|d}} }}'))
func = textlib._ETP_REGEX.match
self.assertIsNone(func('{{{1}}}'))
self.assertIsNotNone(func('{{#if:foo}}'))
self.assertIsNotNone(func('{{foo:}}'))
self.assertIsNotNone(func('{{CURRENTYEAR}}'))
self.assertIsNotNone(func('{{1}}'))
self.assertIsNone(func('{{a|b={{CURRENTYEAR}} }}'))
self.assertIsNone(func('{{a|b={{{1}}} }}'))
self.assertIsNone(func('{{a|b={{c}} }}'))
self.assertIsNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNone(func('{{a|b={} }}'))
self.assertIsNone(func('{{:a|b={{c|d=1}} }}'))
self.assertIsNone(func('{{a|{{c}} }}'))
self.assertIsNone(func('{{a|{{c|d}} }}'))
with suppress_warnings('textlib.TEMP_REGEX is deprecated'):
func = textlib.TEMP_REGEX.search
self.assertIsNotNone(func('{{{1}}}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|{{c}} }}'))
self.assertIsNotNone(func('{{a|{{c|d}} }}'))
with suppress_warnings('textlib.TEMP_REGEX is deprecated'):
func = textlib.TEMP_REGEX.match
self.assertIsNotNone(func('{{#if:foo}}'))
self.assertIsNotNone(func('{{foo:}}'))
self.assertIsNotNone(func('{{CURRENTYEAR}}'))
self.assertIsNotNone(func('{{1}}'))
self.assertIsNotNone(func('{{a|b={{CURRENTYEAR}} }}'))
self.assertIsNotNone(func('{{a|b={{{1}}} }}'))
self.assertIsNone(func('{{a|b={{c}} }}'))
self.assertIsNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|b={} }}'))
self.assertIsNone(func('{{:a|b={{c|d=1}} }}'))
self.assertIsNone(func('{{a|{{c}} }}'))
self.assertIsNone(func('{{a|{{c|d}} }}'))
func = textlib.NESTED_TEMPLATE_REGEX.search
# Numerically named templates are rejected
self.assertIsNone(func('{{1}}'))
self.assertIsNone(func('{{#if:foo}}'))
self.assertIsNone(func('{{{1}}}'))
self.assertIsNone(func('{{{1|}}}'))
self.assertIsNone(func('{{{15|a}}}'))
self.assertIsNone(func('{{{1|{{{2|a}}} }}}'))
self.assertIsNone(func('{{{1|{{2|a}} }}}'))
func = textlib.NESTED_TEMPLATE_REGEX.match
self.assertIsNotNone(func('{{CURRENTYEAR}}'))
self.assertIsNotNone(func('{{foo:bar}}'))
self.assertIsNone(func('{{1}}'))
self.assertIsNotNone(func('{{a|b={{CURRENTYEAR}} }}'))
self.assertIsNotNone(func('{{a|b={{{1}}} }}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|b={} }}'))
self.assertIsNotNone(func('{{:a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|{{c}} }}'))
self.assertIsNotNone(func('{{a|{{c|d}} }}'))
# All templates are captured when template depth is greater than 2
m = func('{{a|{{c|{{d|}} }} | foo = bar }} foo {{bar}} baz')
self.assertIsNotNone(m)
self.assertIsNotNone(m.group(0))
self.assertIsNone(m.group('name'))
self.assertIsNone(m.group(1))
self.assertIsNone(m.group('params'))
self.assertIsNone(m.group(2))
self.assertIsNotNone(m.group('unhandled_depth'))
self.assertTrue(m.group(0).endswith('foo {{bar}}'))
m = func('{{a|\n{{c|{{d|}} }}\n| foo = bar }} foo {{bar}} baz')
self.assertIsNotNone(m)
self.assertIsNotNone(m.group(0))
self.assertIsNone(m.group('name'))
self.assertIsNone(m.group(1))
self.assertIsNone(m.group('params'))
self.assertIsNone(m.group(2))
self.assertIsNotNone(m.group('unhandled_depth'))
self.assertTrue(m.group(0).endswith('foo {{bar}}'))
class TestGenericTemplateParams(PatchingTestCase):
"""Test whether the generic function forwards the call correctly."""
net = False
@PatchingTestCase.patched(textlib, 'extract_templates_and_params_mwpfh')
def extract_mwpfh(self, text, *args, **kwargs):
"""Patched call to extract_templates_and_params_mwpfh."""
self._text = text
self._args = args
self._mwpfh = True
@PatchingTestCase.patched(textlib, 'extract_templates_and_params_regex')
def extract_regex(self, text, *args, **kwargs):
"""Patched call to extract_templates_and_params_regex."""
self._text = text
self._args = args
self._mwpfh = False
def test_removing_disabled_parts_regex(self):
"""Test removing disabled parts when using the regex variant."""
self.patch(config, 'use_mwparserfromhell', False)
textlib.extract_templates_and_params('{{a<!-- -->}}', True)
self.assertEqual(self._text, '{{a}}')
self.assertFalse(self._mwpfh)
textlib.extract_templates_and_params('{{a<!-- -->}}', False)
self.assertEqual(self._text, '{{a<!-- -->}}')
self.assertFalse(self._mwpfh)
textlib.extract_templates_and_params('{{a<!-- -->}}')
self.assertEqual(self._text, '{{a}}')
self.assertFalse(self._mwpfh)
@require_modules('mwparserfromhell')
def test_removing_disabled_parts_mwpfh(self):
"""Test removing disabled parts when using the mwpfh variant."""
self.patch(config, 'use_mwparserfromhell', True)
textlib.extract_templates_and_params('{{a<!-- -->}}', True)
self.assertEqual(self._text, '{{a}}')
self.assertTrue(self._mwpfh)
textlib.extract_templates_and_params('{{a<!-- -->}}', False)
self.assertEqual(self._text, '{{a<!-- -->}}')
self.assertTrue(self._mwpfh)
textlib.extract_templates_and_params('{{a<!-- -->}}')
self.assertEqual(self._text, '{{a<!-- -->}}')
self.assertTrue(self._mwpfh)
def test_strip_regex(self):
"""Test stripping values when using the regex variant."""
self.patch(config, 'use_mwparserfromhell', False)
textlib.extract_templates_and_params('{{a| foo }}', False, True)
self.assertEqual(self._args, (False, True))
self.assertFalse(self._mwpfh)
textlib.extract_templates_and_params('{{a| foo }}', False, False)
self.assertEqual(self._args, (False, False))
self.assertFalse(self._mwpfh)
textlib.extract_templates_and_params('{{a| foo }}', False)
self.assertEqual(self._args, (False, True))
self.assertFalse(self._mwpfh)
@require_modules('mwparserfromhell')
def test_strip_mwpfh(self):
"""Test stripping values when using the mwpfh variant."""
self.patch(config, 'use_mwparserfromhell', True)
textlib.extract_templates_and_params('{{a| foo }}', None, True)
self.assertEqual(self._args, (True, ))
self.assertTrue(self._mwpfh)
textlib.extract_templates_and_params('{{a| foo }}', None, False)
self.assertEqual(self._args, (False, ))
self.assertTrue(self._mwpfh)
textlib.extract_templates_and_params('{{a| foo }}')
self.assertEqual(self._args, (False, ))
self.assertTrue(self._mwpfh)
class TestReplaceLinks(TestCase):
"""Test the replace_links function in textlib."""
sites = {
'wt': {
'family': 'wiktionary',
'code': 'en',
},
'wp': {
'family': 'wikipedia',
'code': 'en',
}
}
dry = True
text = ('Hello [[World]], [[how|are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
@classmethod
def setUpClass(cls):
"""Create a fake interwiki cache."""
super(TestReplaceLinks, cls).setUpClass()
# make APISite.interwiki work and prevent it from doing requests
for site in cls.sites.values():
mapping = {}
for iw in cls.sites.values():
mapping[iw['family']] = _IWEntry(True, 'invalid')
mapping[iw['family']]._site = iw['site']
mapping['bug'] = _IWEntry(False, 'invalid')
mapping['bug']._site = UnknownSite('Not a wiki')
mapping['en'] = _IWEntry(True, 'invalid')
mapping['en']._site = site['site']
site['site']._interwikimap._map = mapping
site['site']._interwikimap._site = None # prevent it from loading
cls.wp_site = cls.get_site('wp')
def test_replacements_function(self):
"""Test a dynamic function as the replacements."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
return pywikibot.Link('Homeworld', link.site)
elif link.title.lower() == 'you':
return False
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello [[Homeworld]], [[how|are]] you? Are you a [[bug:1337]]?')
def test_replacements_once(self):
"""Test dynamic replacement."""
def callback(link, text, groups, rng):
if link.title.lower() == 'you':
self._count += 1
if link.section:
return pywikibot.Link(
'{0}#{1}'
.format(self._count, link.section), link.site)
else:
return pywikibot.Link('{0}'
.format(self._count), link.site)
self._count = 0 # buffer number of found instances
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello [[World]], [[how|are]] [[1#section]]? Are [[2]] a '
'[[bug:1337]]?')
del self._count
def test_unlink_all(self):
"""Test unlinking."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
return False
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello World, are you? Are you a [[bug:1337]]?')
def test_unlink_some(self):
"""Test unlinking only some links."""
self.assertEqual(
textlib.replace_links(self.text, ('World', False), self.wp_site),
'Hello World, [[how|are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
self.assertEqual(
textlib.replace_links('[[User:Namespace|Label]]\n'
'[[User:Namespace#Section|Labelz]]\n'
'[[Nothing]]',
('User:Namespace', False),
self.wp_site),
'Label\nLabelz\n[[Nothing]]')
def test_replace_neighbour(self):
"""Test that it replaces two neighbouring links."""
self.assertEqual(
textlib.replace_links('[[A]][[A]][[C]]',
('A', 'B'),
self.wp_site),
'[[B|A]][[B|A]][[C]]')
def test_replacements_simplify(self):
"""Test a tuple as replacement removing the need for a piped link."""
self.assertEqual(
textlib.replace_links(self.text,
('how', 'are'),
self.wp_site),
'Hello [[World]], [[are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
def test_replace_file(self):
"""Test that it respects the namespace."""
self.assertEqual(
textlib.replace_links(
'[[File:Meh.png|thumb|Description of [[fancy]]]] '
'[[Fancy]]...', ('File:Meh.png', 'File:Fancy.png'),
self.wp_site),
'[[File:Fancy.png|thumb|Description of [[fancy]]]] [[Fancy]]...')
def test_replace_strings(self):
"""Test if strings can be used."""
self.assertEqual(
textlib.replace_links(self.text, ('how', 'are'), self.wp_site),
'Hello [[World]], [[are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
def test_replace_invalid_link_text(self):
"""Test that it doesn't pipe a link when it's an invalid link."""
self.assertEqual(
textlib.replace_links('[[Target|Foo:]]', ('Target', 'Foo'),
self.wp_site), '[[Foo|Foo:]]')
def test_replace_modes(self):
"""Test replacing with or without label and section."""
source_text = '[[Foo#bar|baz]]'
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar'), self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site, 'Bar')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar', self.wp_site)),
self.wp_site),
'[[Bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar#snafu'),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar#snafu')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar#snafu',
self.wp_site)),
self.wp_site),
'[[Bar#snafu]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar|foo'),
self.wp_site), '[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar|foo')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar|foo',
self.wp_site)),
self.wp_site),
'[[Bar|foo]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar#snafu|foo'),
self.wp_site), '[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar#snafu|foo')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar#snafu|foo',
self.wp_site)),
self.wp_site),
'[[Bar#snafu|foo]]')
def test_replace_different_case(self):
"""Test that it uses piped links when the case is different."""
source_text = '[[Foo|Bar]] and [[Foo|bar]]'
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'bar'),
self.get_site('wp')),
'[[Bar]] and [[bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'bar'),
self.get_site('wt')),
'[[bar|Bar]] and [[bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar'),
self.get_site('wt')),
'[[Bar]] and [[Bar|bar]]')
@unittest.expectedFailure
def test_label_diff_namespace(self):
"""Test that it uses the old label when the new doesn't match."""
# These tests require to get the actual part which is before the title
# (interwiki and namespace prefixes) which could be then compared
# case insensitive.
self.assertEqual(
textlib.replace_links('[[Image:Foobar]]',
('File:Foobar', 'File:Foo'), self.wp_site),
'[[File:Foo|Image:Foobar]]')
self.assertEqual(
textlib.replace_links('[[en:File:Foobar]]',
('File:Foobar', 'File:Foo'), self.wp_site),
'[[File:Foo|en:File:Foobar]]')
def test_linktrails(self):
"""Test that the linktrails are used or applied."""
self.assertEqual(
textlib.replace_links('[[Foobar]]', ('Foobar', 'Foo'),
self.wp_site),
'[[Foo]]bar')
self.assertEqual(
textlib.replace_links('[[Talk:test]]s',
('Talk:Test', 'Talk:Tests'), self.wp_site),
'[[Talk:tests]]')
self.assertEqual(
textlib.replace_links('[[Talk:test]]s',
('Talk:Test', 'Project:Tests'),
self.wp_site),
'[[Project:Tests|Talk:tests]]')
def test_unicode_callback(self):
"""Test returning unicode in the callback."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
# This must be a unicode instance not bytes
return 'homewörlder'
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello homewörlder, [[how|are]] [[you#section|you]]? '
'Are [[you]] a [[bug:1337]]?')
def test_bytes_callback(self):
"""Test returning bytes in the callback."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
# This must be a bytes instance not unicode
return b'homeworlder'
self.assertRaisesRegex(
ValueError, r'unicode \(str.*bytes \(str',
textlib.replace_links, self.text, callback, self.wp_site)
def test_replace_interwiki_links(self):
"""Make sure interwiki links can not be replaced."""
link = '[[fr:how]]'
self.assertEqual(
textlib.replace_links(link, ('fr:how', 'de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, (':fr:how', ':de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, ('how', 'de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, ('de:how', 'de:are'), self.wp_site),
link)
class TestReplaceLinksNonDry(TestCase):
"""Test the replace_links function in textlib non-dry."""
family = 'wikipedia'
code = 'en'
cached = True
def test_replace_interlanguage_links(self):
"""Test replacing interlanguage links."""
link = '[[:fr:how]]'
self.assertEqual(
textlib.replace_links(link, (':fr:how', ':de:are'),
self.site),
'[[:de:Are|fr:how]]')
self.assertEqual(
textlib.replace_links(link, ('fr:how', 'de:are'),
self.site),
'[[:de:Are|fr:how]]')
self.assertEqual(
textlib.replace_links(link, ('how', ':de:are'),
self.site),
link)
self.assertEqual(
textlib.replace_links(link, (':de:how', ':de:are'),
self.site),
link)
class TestLocalDigits(TestCase):
"""Test to verify that local digits are correctly being handled."""
net = False
def test_to_local(self):
"""Test converting Latin digits to local digits."""
self.assertEqual(textlib.to_local_digits(299792458, 'en'), 299792458)
self.assertEqual(
textlib.to_local_digits(299792458, 'fa'), '۲۹۹۷۹۲۴۵۸')
self.assertEqual(
textlib.to_local_digits(
'299792458 flash', 'fa'), '۲۹۹۷۹۲۴۵۸ flash')
self.assertEqual(
textlib.to_local_digits(
'299792458', 'km'), '២៩៩៧៩២៤៥៨')
class TestReplaceExcept(DefaultDrySiteTestCase):
"""Test to verify the replacements with exceptions are done correctly."""
def test_no_replace(self):
"""Test replacing when the old text does not match."""
self.assertEqual(textlib.replaceExcept('12345678', 'x', 'y', [],
site=self.site),
'12345678')
def test_simple_replace(self):
"""Test replacing without regex."""
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxxB', 'x', 'y', [],
site=self.site),
'AyyB')
self.assertEqual(textlib.replaceExcept('AxyxB', 'x', 'y', [],
site=self.site),
'AyyyB')
def test_regex_replace(self):
"""Test replacing with a regex."""
self.assertEqual(textlib.replaceExcept('A123B', r'\d', r'x', [],
site=self.site),
'AxxxB')
self.assertEqual(textlib.replaceExcept('A123B', r'\d+', r'x', [],
site=self.site),
'AxB')
self.assertEqual(textlib.replaceExcept('A123B',
r'A(\d)2(\d)B', r'A\1x\2B', [],
site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('', r'(a?)', r'\1B', [], site=self.site),
'B')
self.assertEqual(
textlib.replaceExcept('abc', r'x*', r'-', [], site=self.site),
'-a-b-c-')
# This is different from re.sub() as re.sub() doesn't
# allow None groups
self.assertEqual(
textlib.replaceExcept('', r'(a)?', r'\1\1', [], site=self.site),
'')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(\d)2(\d)B', r'A\g<1>x\g<2>B',
[], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(?P<b>\d)B',
r'A\g<a>x\g<b>B', [], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(\d)B',
r'A\g<a>x\g<2>B', [], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(\d)B',
r'A\g<a>x\2B', [], site=self.site),
'A1x3B')
# test regex with lookbehind.
self.assertEqual(
textlib.replaceExcept('A behindB C', r'(?<=behind)\w',
r'Z', [], site=self.site),
'A behindZ C')
# test regex with lookbehind and groups.
self.assertEqual(
textlib.replaceExcept('A behindB C D', r'(?<=behind)\w( )',
r'\g<1>Z', [], site=self.site),
'A behind ZC D')
# test regex with lookahead.
self.assertEqual(
textlib.replaceExcept('A Bahead C', r'\w(?=ahead)',
r'Z', [], site=self.site),
'A Zahead C')
# test regex with lookahead and groups.
self.assertEqual(
textlib.replaceExcept('A Bahead C D', r'( )\w(?=ahead)',
r'Z\g<1>', [], site=self.site),
'AZ ahead C D')
def test_case_sensitive(self):
"""Test replacing with different case sensitivity."""
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
caseInsensitive=False,
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxB', 'X', 'y', [],
caseInsensitive=False,
site=self.site),
'AxB')
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
caseInsensitive=True,
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxB', 'X', 'y', [],
caseInsensitive=True,
site=self.site),
'AyB')
def test_replace_with_marker(self):
"""Test replacing with a marker."""
self.assertEqual(textlib.replaceExcept('AxyxB', 'x', 'y', [],
marker='.',
site=self.site),
'Ayyy.B')
self.assertEqual(textlib.replaceExcept('AxyxB', '1', 'y', [],
marker='.',
site=self.site),
'AxyxB.')
def test_overlapping_replace(self):
"""Test replacing with and without overlap."""
self.assertEqual(textlib.replaceExcept('1111', '11', '21', [],
allowoverlap=False,
site=self.site),
'2121')
self.assertEqual(textlib.replaceExcept('1111', '11', '21', [],
allowoverlap=True,
site=self.site),
'2221')
def test_replace_exception(self):
"""Test replacing not inside a specific regex."""
self.assertEqual(textlib.replaceExcept('123x123', '123', '000', [],
site=self.site),
'000x000')
self.assertEqual(textlib.replaceExcept('123x123', '123', '000',
[re.compile(r'\w123')],
site=self.site),
'000x123')
def test_replace_tags(self):
"""Test replacing not inside various tags."""
self.assertEqual(textlib.replaceExcept('A <!-- x --> B', 'x', 'y',
['comment'], site=self.site),
'A <!-- x --> B')
self.assertEqual(textlib.replaceExcept('\n==x==\n', 'x', 'y',
['header'], site=self.site),
'\n==x==\n')
self.assertEqual(textlib.replaceExcept('\n<!--'
'\ncomment-->==x==<!--comment'
'\n-->\n', 'x', 'y',
['header'], site=self.site),
'\n<!--\ncomment-->==x==<!--comment\n-->\n')
self.assertEqual(textlib.replaceExcept('<pre>x</pre>', 'x', 'y',
['pre'], site=self.site),
'<pre>x</pre>')
self.assertEqual(textlib.replaceExcept('<nowiki >x</nowiki >x',
'x', 'y', ['nowiki'],
site=self.site),
'<nowiki >x</nowiki >y') # T191559
self.assertEqual(textlib.replaceExcept('<source lang="xml">x</source>',
'x', 'y', ['source'],
site=self.site),
'<source lang="xml">x</source>')
self.assertEqual(textlib.replaceExcept('<source>x</source>',
'x', 'y', ['source'],
site=self.site),
'<source>x</source>')
self.assertEqual(textlib.replaceExcept(
'<syntaxhighlight lang="xml">x</syntaxhighlight>',
'x', 'y', ['source'], site=self.site),
'<syntaxhighlight lang="xml">x</syntaxhighlight>')
self.assertEqual(
textlib.replaceExcept('<syntaxhighlight>x</syntaxhighlight>',
'x', 'y', ['source'], site=self.site),
'<syntaxhighlight>x</syntaxhighlight>')
self.assertEqual(textlib.replaceExcept('<includeonly>x</includeonly>',
'x', 'y', ['includeonly'],
site=self.site),
'<includeonly>x</includeonly>')
self.assertEqual(textlib.replaceExcept('<ref>x</ref>', 'x', 'y',
['ref'], site=self.site),
'<ref>x</ref>')
self.assertEqual(textlib.replaceExcept('<ref name="x">A</ref>',
'x', 'y',
['ref'], site=self.site),
'<ref name="x">A</ref>')
self.assertEqual(textlib.replaceExcept(' xA ', 'x', 'y',
['startspace'], site=self.site),
' xA ')
self.assertEqual(textlib.replaceExcept(':xA ', 'x', 'y',
['startcolon'], site=self.site),
':xA ')
self.assertEqual(textlib.replaceExcept('<table>x</table>', 'x', 'y',
['table'], site=self.site),
'<table>x</table>')
self.assertEqual(textlib.replaceExcept('x [http://www.sample.com x]',
'x', 'y', ['hyperlink'],
site=self.site),
'y [http://www.sample.com y]')
self.assertEqual(textlib.replaceExcept(
'x http://www.sample.com/x.html', 'x', 'y',
['hyperlink'], site=self.site), 'y http://www.sample.com/x.html')
self.assertEqual(textlib.replaceExcept('<gallery>x</gallery>',
'x', 'y', ['gallery'],
site=self.site),
'<gallery>x</gallery>')
self.assertEqual(textlib.replaceExcept('[[x]]', 'x', 'y', ['link'],
site=self.site),
'[[x]]')
self.assertEqual(textlib.replaceExcept('{{#property:p171}}', '1', '2',
['property'], site=self.site),
'{{#property:p171}}')
self.assertEqual(textlib.replaceExcept('{{#invoke:x}}', 'x', 'y',
['invoke'], site=self.site),
'{{#invoke:x}}')
self.assertEqual(
textlib.replaceExcept(
'<ref name=etwa /> not_in_ref <ref> in_ref </ref>',
'not_in_ref', 'text', ['ref'], site=self.site),
'<ref name=etwa /> text <ref> in_ref </ref>')
self.assertEqual(
textlib.replaceExcept(
'<ab> content </a>', 'content', 'text', ['a'], site=self.site),
'<ab> text </a>')
def test_replace_with_count(self):
"""Test replacing with count argument."""
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site),
'y [[y]] y y')
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site, count=5),
'y [[y]] y y')
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site, count=2),
'y [[y]] x x')
self.assertEqual(textlib.replaceExcept(
'x [[x]] x x', 'x', 'y', ['link'], site=self.site, count=2),
'y [[x]] y x')
def test_replace_tag_category(self):
"""Test replacing not inside category links."""
for ns_name in self.site.namespaces[14]:
self.assertEqual(textlib.replaceExcept('[[{}:x]]'.format(ns_name),
'x', 'y', ['category'],
site=self.site),
'[[{}:x]]'.format(ns_name))
def test_replace_tag_file(self):
"""Test replacing not inside file links."""
for ns_name in self.site.namespaces[6]:
self.assertEqual(textlib.replaceExcept('[[{}:x]]'.format(ns_name),
'x', 'y', ['file'],
site=self.site),
'[[{}:x]]'.format(ns_name))
self.assertEqual(
textlib.replaceExcept(
'[[File:x|foo]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|foo]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|foo|bar x]] x',
'x', 'y', ['file'], site=self.site),
'[[File:x|foo|bar x]] y')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|]][[File:x|foo]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|]][[File:x|foo]]')
self.assertEqual(
textlib.replaceExcept(
'[[NonFile:x]]',
'x', 'y', ['file'], site=self.site),
'[[NonFile:y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:]]',
'File:', 'NonFile:', ['file'], site=self.site),
'[[File:]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|[[foo]].]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|[[foo]].]]')
# ensure only links inside file are captured
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]][[bar]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]][[bar]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]][[bar]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]][[bar]].x]][[y]]')
# Correctly handle single brackets in the text.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [bar].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [bar].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[bar] [[foo]] .x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[bar] [[foo]] .x]][[y]]')
def test_replace_tag_file_invalid(self):
"""Test replacing not inside file links with invalid titles."""
# Correctly handle [ and ] inside wikilinks inside file link
# even though these are an invalid title.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [invalid ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid ]].x]][[y]]')
@unittest.expectedFailure
def test_replace_tag_file_failure(self):
"""Test showing limits of the file link regex."""
# When the double brackets are unbalanced, the regex
# does not correctly detect the end of the file link.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [[invalid ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[y]]')
def test_replace_tags_interwiki(self):
"""Test replacing not inside interwiki links."""
if ('es' not in self.site.family.langs
or 'ey' in self.site.family.langs):
raise unittest.SkipTest("family {} doesn't have languages"
.format(self.site))
self.assertEqual(textlib.replaceExcept('[[es:s]]', 's', 't',
['interwiki'], site=self.site),
'[[es:s]]') # "es" is a valid interwiki code
self.assertEqual(textlib.replaceExcept('[[ex:x]]', 'x', 'y',
['interwiki'], site=self.site),
'[[ey:y]]') # "ex" is not a valid interwiki code
def test_replace_template(self):
"""Test replacing not inside templates."""
template_sample = (r'a {{templatename '
r' | accessdate={{Fecha|1993}} '
r' |atitle=The [[real title]] }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
template_sample = (r'a {{templatename '
r' | 1={{a}}2{{a}} '
r' | 2={{a}}1{{a}} }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
template_sample = (r'a {{templatename '
r' | 1={{{a}}}2{{{a}}} '
r' | 2={{{a}}}1{{{a}}} }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
# sf.net bug 1575: unclosed template
template_sample = template_sample[:-2]
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
def test_replace_source_reference(self):
"""Test replacing in text which contains back references."""
# Don't use a valid reference number in the original string,
# in case it tries to apply that as a reference.
self.assertEqual(textlib.replaceExcept(r'\42', r'^(.*)$', r'X\1X',
[], site=self.site),
r'X\42X')
self.assertEqual(textlib.replaceExcept(
r'\g<bar>', r'^(?P<foo>.*)$', r'X\g<foo>X', [], site=self.site),
r'X\g<bar>X')
class TestMultiTemplateMatchBuilder(DefaultDrySiteTestCase):
"""Test _MultiTemplateMatchBuilder."""
@classmethod
def setUpClass(cls):
"""Cache namespace 10 (Template) case sensitivity."""
super(TestMultiTemplateMatchBuilder, cls).setUpClass()
cls._template_not_case_sensitive = (
cls.get_site().namespaces.TEMPLATE.case != 'case-sensitive')
def test_no_match(self):
"""Test text without any desired templates."""
string = 'The quick brown fox'
builder = _MultiTemplateMatchBuilder(self.site)
self.assertIsNone(re.search(builder.pattern('quick'), string))
def test_match(self):
"""Test text with one match without parameters."""
string = 'The {{quick}} brown fox'
builder = _MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_with_params(self):
"""Test text with one match with parameters."""
string = 'The {{quick|brown}} fox'
builder = _MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_msg(self):
"""Test text with {{msg:..}}."""
string = 'The {{msg:quick}} brown fox'
builder = _MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_template_prefix(self):
"""Test pages with {{template:..}}."""
string = 'The {{%s:%s}} brown fox'
template = 'template'
builder = _MultiTemplateMatchBuilder(self.site)
if self._template_not_case_sensitive:
quick_list = ('quick', 'Quick')
else:
quick_list = ('quick', )
for t in (template.upper(), template.lower(), template.title()):
for q in quick_list:
self.assertIsNotNone(re.search(builder.pattern('quick'),
string % (t, q)))
self.assertEqual(bool(re.search(builder.pattern('Quick'),
string % (t, q))),
self._template_not_case_sensitive)
class TestGetLanguageLinks(SiteAttributeTestCase):
"""Test L{textlib.getLanguageLinks} function."""
sites = {
'enwp': {
'family': 'wikipedia',
'code': 'en',
},
'dewp': {
'family': 'wikipedia',
'code': 'de',
},
'commons': {
'family': 'commons',
'code': 'commons',
},
}
example_text = ('[[en:Site]] [[de:Site|Piped]] [[commons:Site]] '
'[[baden:Site]] [[fr:{{PAGENAME}}]]')
@classmethod
def setUpClass(cls):
"""Define set of valid targets for the example text."""
super(TestGetLanguageLinks, cls).setUpClass()
cls.sites_set = {cls.enwp, cls.dewp}
def test_getLanguageLinks(self, key):
"""Test if the function returns the correct titles and sites."""
with mock.patch('pywikibot.output') as m:
lang_links = textlib.getLanguageLinks(self.example_text,
self.site)
m.assert_called_once_with(
'[getLanguageLinks] Text contains invalid interwiki link '
'[[fr:{{PAGENAME}}]].')
self.assertEqual({page.title() for page in lang_links.values()},
{'Site'})
self.assertEqual(set(lang_links), self.sites_set - {self.site})
class TestUnescape(TestCase):
"""Test to verify that unescaping HTML chars are correctly done."""
net = False
def test_unescape(self):
"""Test unescaping HTML chars."""
self.assertEqual(textlib.unescape('!23<>'"&&'),
'!23<>\'"&&')
class TestStarList(TestCase):
"""Test starlist."""
net = False
def test_basic(self):
"""Test standardizing {{linkfa}} without parameters."""
self.assertEqual(
'foo\n{{linkfa}}\nbar\n\n',
textlib.standardize_stars('foo\n{{linkfa}}\nbar'))
def test_with_params(self):
"""Test standardizing text with {{linkfa|...}}."""
self.assertEqual(
'foo\nbar\n\n{{linkfa|...}}\n',
textlib.standardize_stars('foo\n{{linkfa|...}}\nbar'))
def test_with_sorting_params(self):
"""Test standardizing text with sorting parameters."""
self.assertEqual(
'foo\n\n{{linkfa|bar}}\n{{linkfa|de}}\n'
'{{linkfa|en}}\n{{linkfa|fr}}\n',
textlib.standardize_stars(
'foo\n{{linkfa|en}}\n{{linkfa|de}}\n'
'{{linkfa|fr}}\n{{linkfa|bar}}'))
def test_get_stars(self):
"""Test get_starts method."""
self.assertEqual(
['{{linkfa|en}}\n', '{{linkfa|de}}\n',
'{{linkfa|fr}}\n', '{{linkfa|bar}}'],
textlib.get_stars(
'foo\n{{linkfa|en}}\n{{linkfa|de}}\n'
'{{linkfa|fr}}\n{{linkfa|bar}}'))
def test_remove_stars(self):
"""Test remove_stars method."""
self.assertEqual(
'foo\n{{linkfa|en}}\n{{linkfa|fr}}\n{{linkfa|bar}}',
textlib.remove_stars(
'foo\n{{linkfa|en}}\n{{linkfa|de}}\n'
'{{linkfa|fr}}\n{{linkfa|bar}}', ['{{linkfa|de}}\n']))
def test_append_stars(self):
"""Test append_stars method."""
self.assertEqual(
'foo\n\n{{linkfa|bar}}\n{{linkfa|de}}\n'
'{{linkfa|en}}\n{{linkfa|fr}}\n',
textlib.append_stars(
'foo', ['{{linkfa|en}}\n', '{{linkfa|de}}\n',
'{{linkfa|fr}}\n', '{{linkfa|bar}}']))
class TestExtractSections(DefaultDrySiteTestCase):
"""Test the extract_sections function."""
def test_no_sections_no_footer(self):
"""Test for text having no sections or footer."""
self.assertEqual(
extract_sections('text', self.site),
('text', [], '')
)
def test_no_sections_with_footer(self):
"""Test for text having footer but no section."""
self.assertEqual(
extract_sections('text\n\n[[Category:A]]', self.site),
('text\n\n', [], '[[Category:A]]')
)
def test_with_section_no_footer(self):
"""Test for text having sections but no footer."""
self.assertEqual(
extract_sections(
'text\n\n'
'==title==\n'
'content',
self.site),
('text\n\n', [('==title==', '\ncontent')], '')
)
def test_with_section_with_footer(self):
"""Test for text having sections and footer."""
self.assertEqual(
extract_sections(
'text\n\n'
'==title==\n'
'content\n'
'[[Category:A]]\n',
self.site),
('text\n\n', [('==title==', '\ncontent\n')], '[[Category:A]]\n')
)
def test_with_h1_and_h2_sections(self):
"""Test for text having h1 and h2 sections."""
self.assertEqual(
extract_sections(
'text\n\n'
'=first level=\n'
'foo\n'
'==title==\n'
'bar',
self.site),
('text\n\n',
[('=first level=', '\nfoo\n'), ('==title==', '\nbar')],
'')
)
def test_with_h4_and_h2_sections(self):
"""Test for text having h4 and h2 sections."""
self.assertEqual(
extract_sections(
'text\n\n'
'====title====\n'
'==title 2==\n'
'content',
self.site),
('text\n\n',
[('====title====', '\n'), ('==title 2==', '\ncontent')],
'')
)
def test_long_comment(self):
r"""Test for text having a long expanse of white space.
This is to catch certain regex issues caused by patterns like
r'(\s+)*$' (as found in older versions of extract_section).
They may not halt.
c.f.
https://www.regular-expressions.info/catastrophic.html
"""
text = '<!-- -->'
self.assertEqual(
extract_sections(text, self.site),
(text, [], '')
)
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| mit | -5,682,923,183,788,997,000 | 41.833705 | 79 | 0.480028 | false |
Griger/Intel-CervicalCancer-KaggleCompetition | featureHOG.py | 1 | 1456 | from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import numpy as np
from math import pi
from keras.preprocessing.image import ImageDataGenerator
import cv2
from sklearn.cluster import KMeans
import sklearn.preprocessing as prepro
# Generamos nuevos ejemplos
'''
datagen = ImageDataGenerator(
rotation_range=180,
shear_range=pi,
fill_mode='nearest')
train_data = np.load('Datos/train244all.npy')
train_labels = np.load('Datos/train_target244all.npy')
datagen.fit(train_data,rounds=2)
i = 0
nuevas_imagenes = []
tam = 1
for batch in datagen.flow(train_data,train_labels,batch_size = (len(train_data))):
i += 1
if i > tam:
break
nuevas_imagenes.append(batch[0])
nuevas_imagenes = np.array(nuevas_imagenes)
nuevas_imagenes = np.reshape(nuevas_imagenes, (len(train_data)*tam,244,244,3))
np.save('Datos/extraRotations.npy', nuevas_imagenes, allow_pickle=True, fix_imports=True)
'''
train_data = np.load('Datos/train244all.npy')
test_data = np.load('Datos/test244.npy')
hog = cv2.HOGDescriptor()
def getHist(image):
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = image * 255
image = image.astype('uint8')
return hog.compute(image)
histograms = [getHist(img) for img in train_data]
if __name__ == '__main__':
# Guardar los histogramas
| gpl-3.0 | 4,660,950,177,554,977,000 | 21.483871 | 94 | 0.665522 | false |
m3z/HT | openstack_dashboard/api/swift.py | 1 | 9568 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import swiftclient
from django.conf import settings
from django.utils.translation import ugettext as _
from horizon import exceptions
from openstack_dashboard.api.base import url_for, APIDictWrapper
LOG = logging.getLogger(__name__)
FOLDER_DELIMITER = "/"
class Container(APIDictWrapper):
pass
class StorageObject(APIDictWrapper):
def __init__(self, apidict, container_name, orig_name=None, data=None):
super(StorageObject, self).__init__(apidict)
self.container_name = container_name
self.orig_name = orig_name
self.data = data
class PseudoFolder(APIDictWrapper):
"""
Wrapper to smooth out discrepencies between swift "subdir" items
and swift pseudo-folder objects.
"""
def __init__(self, apidict, container_name):
super(PseudoFolder, self).__init__(apidict)
self.container_name = container_name
def _has_content_type(self):
content_type = self._apidict.get("content_type", None)
return content_type == "application/directory"
@property
def name(self):
if self._has_content_type():
return self._apidict['name']
return self.subdir.rstrip(FOLDER_DELIMITER)
@property
def bytes(self):
if self._has_content_type():
return self._apidict['bytes']
return None
@property
def content_type(self):
return "application/directory"
def _objectify(items, container_name):
""" Splits a listing of objects into their appropriate wrapper classes. """
objects = {}
subdir_markers = []
# Deal with objects and object pseudo-folders first, save subdirs for later
for item in items:
if item.get("content_type", None) == "application/directory":
objects[item['name']] = PseudoFolder(item, container_name)
elif item.get("subdir", None) is not None:
subdir_markers.append(PseudoFolder(item, container_name))
else:
objects[item['name']] = StorageObject(item, container_name)
# Revisit subdirs to see if we have any non-duplicates
for item in subdir_markers:
if item.name not in objects.keys():
objects[item.name] = item
return objects.values()
def swift_api(request):
endpoint = url_for(request, 'object-store')
LOG.debug('Swift connection created using token "%s" and url "%s"'
% (request.user.token.id, endpoint))
return swiftclient.client.Connection(None,
request.user.username,
None,
preauthtoken=request.user.token.id,
preauthurl=endpoint,
auth_version="2.0")
def swift_container_exists(request, container_name):
try:
swift_api(request).head_container(container_name)
return True
except swiftclient.client.ClientException:
return False
def swift_object_exists(request, container_name, object_name):
try:
swift_api(request).head_object(container_name, object_name)
return True
except swiftclient.client.ClientException:
return False
def swift_get_containers(request, marker=None):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
headers, containers = swift_api(request).get_account(limit=limit + 1,
marker=marker,
full_listing=True)
container_objs = [Container(c) for c in containers]
if(len(container_objs) > limit):
return (container_objs[0:-1], True)
else:
return (container_objs, False)
def swift_create_container(request, name):
if swift_container_exists(request, name):
raise exceptions.AlreadyExists(name, 'container')
swift_api(request).put_container(name)
return Container({'name': name})
def swift_delete_container(request, name):
swift_api(request).delete_container(name)
return True
def swift_get_objects(request, container_name, prefix=None, marker=None,
limit=None):
limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)
kwargs = dict(prefix=prefix,
marker=marker,
limit=limit + 1,
delimiter=FOLDER_DELIMITER,
full_listing=True)
headers, objects = swift_api(request).get_container(container_name,
**kwargs)
object_objs = _objectify(objects, container_name)
if(len(object_objs) > limit):
return (object_objs[0:-1], True)
else:
return (object_objs, False)
def swift_filter_objects(request, filter_string, container_name, prefix=None,
marker=None):
# FIXME(kewu): Swift currently has no real filtering API, thus the marker
# parameter here won't actually help the pagination. For now I am just
# getting the largest number of objects from a container and filtering
# based on those objects.
limit = 9999
objects = swift_get_objects(request,
container_name,
prefix=prefix,
marker=marker,
limit=limit)
filter_string_list = filter_string.lower().strip().split(' ')
def matches_filter(obj):
for q in filter_string_list:
return wildcard_search(obj.name.lower(), q)
return filter(matches_filter, objects[0])
def wildcard_search(string, q):
q_list = q.split('*')
if all(map(lambda x: x == '', q_list)):
return True
elif q_list[0] not in string:
return False
else:
if q_list[0] == '':
tail = string
else:
head, delimiter, tail = string.partition(q_list[0])
return wildcard_search(tail, '*'.join(q_list[1:]))
def swift_copy_object(request, orig_container_name, orig_object_name,
new_container_name, new_object_name):
try:
# FIXME(gabriel): The swift currently fails at unicode in the
# copy_to method, so to provide a better experience we check for
# unicode here and pre-empt with an error message rather than
# letting the call fail.
str(orig_container_name)
str(orig_object_name)
str(new_container_name)
str(new_object_name)
except UnicodeEncodeError:
raise exceptions.HorizonException(_("Unicode is not currently "
"supported for object copy."))
if swift_object_exists(request, new_container_name, new_object_name):
raise exceptions.AlreadyExists(new_object_name, 'object')
headers = {"X-Copy-From": FOLDER_DELIMITER.join([orig_container_name,
orig_object_name])}
return swift_api(request).put_object(new_container_name,
new_object_name,
None,
headers=headers)
def swift_create_subfolder(request, container_name, folder_name):
headers = {'content-type': 'application/directory',
'content-length': 0}
etag = swift_api(request).put_object(container_name,
folder_name,
None,
headers=headers)
obj_info = {'subdir': folder_name, 'etag': etag}
return PseudoFolder(obj_info, container_name)
def swift_upload_object(request, container_name, object_name, object_file):
headers = {}
headers['X-Object-Meta-Orig-Filename'] = object_file.name
etag = swift_api(request).put_object(container_name,
object_name,
object_file,
headers=headers)
obj_info = {'name': object_name, 'bytes': object_file.size, 'etag': etag}
return StorageObject(obj_info, container_name)
def swift_delete_object(request, container_name, object_name):
swift_api(request).delete_object(container_name, object_name)
return True
def swift_get_object(request, container_name, object_name):
headers, data = swift_api(request).get_object(container_name, object_name)
orig_name = headers.get("x-object-meta-orig-filename")
obj_info = {'name': object_name, 'bytes': len(data)}
return StorageObject(obj_info,
container_name,
orig_name=orig_name,
data=data)
| apache-2.0 | -1,708,836,969,548,699,400 | 35.10566 | 79 | 0.599289 | false |
chrisseto/osf.io | api_tests/reviews/mixins/comment_settings.py | 1 | 3030 | from datetime import timedelta
import pytest
from furl import furl
from osf_tests.factories import (
AuthUserFactory,
PreprintFactory,
PreprintProviderFactory,
)
from reviews.permissions import GroupHelper
from reviews_tests.factories import ReviewLogFactory
from website.util import permissions as osf_permissions
@pytest.mark.django_db
class ReviewLogCommentSettingsMixin(object):
@pytest.fixture()
def url(self):
raise NotImplementedError
@pytest.fixture()
def provider(self):
return PreprintProviderFactory()
@pytest.fixture()
def preprint(self, provider):
return PreprintFactory(provider=provider)
@pytest.fixture()
def logs(self, preprint):
return [ReviewLogFactory(reviewable=preprint) for _ in range(5)]
@pytest.fixture()
def provider_admin(self, provider):
user = AuthUserFactory()
user.groups.add(GroupHelper(provider).get_group('admin'))
return user
@pytest.fixture()
def provider_moderator(self, provider):
user = AuthUserFactory()
user.groups.add(GroupHelper(provider).get_group('moderator'))
return user
@pytest.fixture()
def node_admin(self, preprint):
user = AuthUserFactory()
preprint.node.add_contributor(user, permissions=[osf_permissions.READ, osf_permissions.WRITE, osf_permissions.ADMIN])
return user
def test_comment_settings(self, app, url, provider, logs, provider_admin, provider_moderator, node_admin):
expected_ids = set([l._id for l in logs])
for anonymous in [True, False]:
for private in [True, False]:
provider.reviews_comments_anonymous = anonymous
provider.reviews_comments_private = private
provider.save()
# admin always sees comment/creator
res = app.get(url, auth=provider_admin.auth)
self.__assert_fields(res, expected_ids, False, False)
# moderator always sees comment/creator
res = app.get(url, auth=provider_moderator.auth)
self.__assert_fields(res, expected_ids, False, False)
# node admin sees what the settings allow
res = app.get(url, auth=node_admin.auth)
self.__assert_fields(res, expected_ids, anonymous, private)
def __assert_fields(self, res, expected_ids, hidden_creator, hidden_comment):
data = res.json['data']
actual_ids = set([l['id'] for l in data])
if expected_ids != actual_ids:
raise Exception((expected_ids, actual_ids))
assert expected_ids == actual_ids
for log in data:
if hidden_creator:
assert 'creator' not in log['relationships']
else:
assert 'creator' in log['relationships']
if hidden_comment:
assert 'comment' not in log['attributes']
else:
assert 'comment' in log['attributes']
| apache-2.0 | -4,667,580,536,789,115,000 | 33.431818 | 125 | 0.630033 | false |
350dotorg/Django | django/core/mail/message.py | 1 | 10976 | import mimetypes
import os
import random
import time
from email import Charset, Encoders
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.Header import Header
from email.Utils import formatdate, getaddresses, formataddr
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import smart_str, force_unicode
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
Charset.add_charset('utf-8', Charset.SHORTEST, Charset.QP, 'utf-8')
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python standard library, with the following modifications:
# * Used cached hostname for performance.
# * Added try/except to support lack of getpid() in Jython (#5496).
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<[email protected]>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example.
pid = 1
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_unicode(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val = val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ('to', 'from', 'cc'):
result = []
for nm, addr in getaddresses((val,)):
nm = str(Header(nm.encode(encoding), encoding))
try:
addr = addr.encode('ascii')
except UnicodeEncodeError: # IDN
addr = str(Header(addr.encode(encoding), encoding))
result.append(formataddr((nm, addr)))
val = ', '.join(result)
else:
val = Header(val.encode(encoding), encoding)
else:
if name.lower() == 'subject':
val = Header(val)
return name, val
class SafeMIMEText(MIMEText):
def __init__(self, text, subtype, charset):
self.encoding = charset
MIMEText.__init__(self, text, subtype, charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
class SafeMIMEMultipart(MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
assert not isinstance(to, basestring), '"to" argument must be a list or tuple'
self.to = list(to)
else:
self.to = []
if bcc:
assert not isinstance(bcc, basestring), '"bcc" argument must be a list or tuple'
self.bcc = list(bcc)
else:
self.bcc = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(smart_str(self.body, encoding),
self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(self.to))
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
msg['Message-ID'] = make_msgid()
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Bcc entries).
"""
return self.to + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content == mimetype == None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
content = open(path, 'rb').read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(smart_str(content, encoding), subtype, encoding)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(subject, body, from_email, to, bcc, connection, attachments, headers)
self.alternatives=alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
| bsd-3-clause | 5,907,717,041,566,670,000 | 36.979239 | 122 | 0.622449 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/gtk/_gtk/InputDialog.py | 1 | 10376 | # encoding: utf-8
# module gtk._gtk
# from /usr/lib/python2.7/dist-packages/gtk-2.0/gtk/_gtk.so
# by generator 1.135
# no doc
# imports
import atk as __atk
import gio as __gio
import gobject as __gobject
import gobject._gobject as __gobject__gobject
from Dialog import Dialog
class InputDialog(Dialog):
"""
Object GtkInputDialog
Signals from GtkInputDialog:
enable-device (GdkDevice)
disable-device (GdkDevice)
Signals from GtkDialog:
response (gint)
close ()
Properties from GtkDialog:
has-separator -> gboolean: Has separator
The dialog has a separator bar above its buttons
Signals from GtkWindow:
set-focus (GtkWidget)
frame-event (GdkEvent) -> gboolean
activate-focus ()
activate-default ()
keys-changed ()
Properties from GtkWindow:
type -> GtkWindowType: Window Type
The type of the window
title -> gchararray: Window Title
The title of the window
role -> gchararray: Window Role
Unique identifier for the window to be used when restoring a session
allow-shrink -> gboolean: Allow Shrink
If TRUE, the window has no mimimum size. Setting this to TRUE is 99% of the time a bad idea
allow-grow -> gboolean: Allow Grow
If TRUE, users can expand the window beyond its minimum size
resizable -> gboolean: Resizable
If TRUE, users can resize the window
modal -> gboolean: Modal
If TRUE, the window is modal (other windows are not usable while this one is up)
window-position -> GtkWindowPosition: Window Position
The initial position of the window
default-width -> gint: Default Width
The default width of the window, used when initially showing the window
default-height -> gint: Default Height
The default height of the window, used when initially showing the window
destroy-with-parent -> gboolean: Destroy with Parent
If this window should be destroyed when the parent is destroyed
icon -> GdkPixbuf: Icon
Icon for this window
icon-name -> gchararray: Icon Name
Name of the themed icon for this window
screen -> GdkScreen: Screen
The screen where this window will be displayed
type-hint -> GdkWindowTypeHint: Type hint
Hint to help the desktop environment understand what kind of window this is and how to treat it.
skip-taskbar-hint -> gboolean: Skip taskbar
TRUE if the window should not be in the task bar.
skip-pager-hint -> gboolean: Skip pager
TRUE if the window should not be in the pager.
urgency-hint -> gboolean: Urgent
TRUE if the window should be brought to the user's attention.
accept-focus -> gboolean: Accept focus
TRUE if the window should receive the input focus.
focus-on-map -> gboolean: Focus on map
TRUE if the window should receive the input focus when mapped.
decorated -> gboolean: Decorated
Whether the window should be decorated by the window manager
deletable -> gboolean: Deletable
Whether the window frame should have a close button
gravity -> GdkGravity: Gravity
The window gravity of the window
transient-for -> GtkWindow: Transient for Window
The transient parent of the dialog
opacity -> gdouble: Opacity for Window
The opacity of the window, from 0 to 1
is-active -> gboolean: Is Active
Whether the toplevel is the current active window
has-toplevel-focus -> gboolean: Focus in Toplevel
Whether the input focus is within this GtkWindow
startup-id -> gchararray: Startup ID
Unique startup identifier for the window used by startup-notification
mnemonics-visible -> gboolean: Mnemonics Visible
Whether mnemonics are currently visible in this window
Signals from GtkContainer:
add (GtkWidget)
remove (GtkWidget)
check-resize ()
set-focus-child (GtkWidget)
Properties from GtkContainer:
border-width -> guint: Border width
The width of the empty border outside the containers children
resize-mode -> GtkResizeMode: Resize mode
Specify how resize events are handled
child -> GtkWidget: Child
Can be used to add a new child to the container
Signals from GtkWidget:
composited-changed ()
show ()
hide ()
map ()
unmap ()
realize ()
unrealize ()
size-request (GtkRequisition)
size-allocate (GdkRectangle)
state-changed (GtkStateType)
parent-set (GtkWidget)
hierarchy-changed (GtkWidget)
style-set (GtkStyle)
direction-changed (GtkTextDirection)
grab-notify (gboolean)
child-notify (GParam)
mnemonic-activate (gboolean) -> gboolean
grab-focus ()
focus (GtkDirectionType) -> gboolean
move-focus (GtkDirectionType)
event (GdkEvent) -> gboolean
event-after (GdkEvent)
button-press-event (GdkEvent) -> gboolean
button-release-event (GdkEvent) -> gboolean
scroll-event (GdkEvent) -> gboolean
motion-notify-event (GdkEvent) -> gboolean
keynav-failed (GtkDirectionType) -> gboolean
delete-event (GdkEvent) -> gboolean
destroy-event (GdkEvent) -> gboolean
expose-event (GdkEvent) -> gboolean
key-press-event (GdkEvent) -> gboolean
key-release-event (GdkEvent) -> gboolean
enter-notify-event (GdkEvent) -> gboolean
leave-notify-event (GdkEvent) -> gboolean
configure-event (GdkEvent) -> gboolean
focus-in-event (GdkEvent) -> gboolean
focus-out-event (GdkEvent) -> gboolean
map-event (GdkEvent) -> gboolean
unmap-event (GdkEvent) -> gboolean
property-notify-event (GdkEvent) -> gboolean
selection-clear-event (GdkEvent) -> gboolean
selection-request-event (GdkEvent) -> gboolean
selection-notify-event (GdkEvent) -> gboolean
selection-received (GtkSelectionData, guint)
selection-get (GtkSelectionData, guint, guint)
proximity-in-event (GdkEvent) -> gboolean
proximity-out-event (GdkEvent) -> gboolean
drag-leave (GdkDragContext, guint)
drag-begin (GdkDragContext)
drag-end (GdkDragContext)
drag-data-delete (GdkDragContext)
drag-failed (GdkDragContext, GtkDragResult) -> gboolean
drag-motion (GdkDragContext, gint, gint, guint) -> gboolean
drag-drop (GdkDragContext, gint, gint, guint) -> gboolean
drag-data-get (GdkDragContext, GtkSelectionData, guint, guint)
drag-data-received (GdkDragContext, gint, gint, GtkSelectionData, guint, guint)
visibility-notify-event (GdkEvent) -> gboolean
client-event (GdkEvent) -> gboolean
no-expose-event (GdkEvent) -> gboolean
window-state-event (GdkEvent) -> gboolean
damage-event (GdkEvent) -> gboolean
grab-broken-event (GdkEvent) -> gboolean
query-tooltip (gint, gint, gboolean, GtkTooltip) -> gboolean
popup-menu () -> gboolean
show-help (GtkWidgetHelpType) -> gboolean
accel-closures-changed ()
screen-changed (GdkScreen)
can-activate-accel (guint) -> gboolean
Properties from GtkWidget:
name -> gchararray: Widget name
The name of the widget
parent -> GtkContainer: Parent widget
The parent widget of this widget. Must be a Container widget
width-request -> gint: Width request
Override for width request of the widget, or -1 if natural request should be used
height-request -> gint: Height request
Override for height request of the widget, or -1 if natural request should be used
visible -> gboolean: Visible
Whether the widget is visible
sensitive -> gboolean: Sensitive
Whether the widget responds to input
app-paintable -> gboolean: Application paintable
Whether the application will paint directly on the widget
can-focus -> gboolean: Can focus
Whether the widget can accept the input focus
has-focus -> gboolean: Has focus
Whether the widget has the input focus
is-focus -> gboolean: Is focus
Whether the widget is the focus widget within the toplevel
can-default -> gboolean: Can default
Whether the widget can be the default widget
has-default -> gboolean: Has default
Whether the widget is the default widget
receives-default -> gboolean: Receives default
If TRUE, the widget will receive the default action when it is focused
composite-child -> gboolean: Composite child
Whether the widget is part of a composite widget
style -> GtkStyle: Style
The style of the widget, which contains information about how it will look (colors etc)
events -> GdkEventMask: Events
The event mask that decides what kind of GdkEvents this widget gets
extension-events -> GdkExtensionMode: Extension events
The mask that decides what kind of extension events this widget gets
no-show-all -> gboolean: No show all
Whether gtk_widget_show_all() should not affect this widget
has-tooltip -> gboolean: Has tooltip
Whether this widget has a tooltip
tooltip-markup -> gchararray: Tooltip markup
The contents of the tooltip for this widget
tooltip-text -> gchararray: Tooltip Text
The contents of the tooltip for this widget
window -> GdkWindow: Window
The widget's window if it is realized
double-buffered -> gboolean: Double Buffered
Whether or not the widget is double buffered
Signals from GtkObject:
destroy ()
Properties from GtkObject:
user-data -> gpointer: User Data
Anonymous User Data Pointer
Signals from GObject:
notify (GParam)
"""
@classmethod
def do_disable_device(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def do_enable_device(cls, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
__gtype__ = None # (!) real value is ''
| gpl-2.0 | -1,359,174,951,522,913,800 | 39.217054 | 104 | 0.667406 | false |
erichaase/topcoder-python | topcoder/knights_tour.py | 1 | 2012 | """
`KnightsTour <http://community.topcoder.com/stat?c=problem_statement&pm=10577>`__
"""
def solution (board):
b, n = Board(board), 1
while b.update(): n += 1
return n
class Board:
def __init__ (self, board):
self.board = [list(row) for row in board]
def update (self):
k, t = self.next_move()
if k and t:
self.board[k[0]][k[1]] = "*"
self.board[t[0]][t[1]] = "K"
return True
else:
return False
def next_move (self):
k = self.knight()
m = self.moves(k)
m.sort(key = lambda p: p[1])
m.sort(key = lambda p: p[0])
m.sort(key = lambda p: len(self.moves(p)))
t = None
if len(m) > 0:
t = m[0]
return k, t
def knight (self):
for x, row in enumerate(self.board):
for y, cell in enumerate(row):
if cell == "K":
return x, y
return None, None
def moves (self, p):
x, y = p[0], p[1]
targets = [
[x - 2, y - 1],
[x - 2, y + 1],
[x - 1, y + 2],
[x + 1, y + 2],
[x + 2, y - 1],
[x + 2, y + 1],
[x - 1, y - 2],
[x + 1, y - 2],
]
m = []
for target in targets:
if self.valid(target):
m.append(target)
return m
def valid (self, p):
x, y = p[0], p[1]
if x < 0:
return False
if x >= len(self.board):
return False
if y < 0:
return False
if y >= len(self.board[0]):
return False
c = self.board[x][y]
if c == "*":
return False
if c == "K":
return False
if c == ".":
return True
return False
def __str__ (self):
s = ""
for row in self.board:
s += "".join(row)
s += "\n"
return s
| mit | -7,622,723,604,843,863,000 | 21.863636 | 81 | 0.394135 | false |
WladimirSidorenko/SentiLex | scripts/vo.py | 1 | 10974 | #!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8; -*-
"""Module for generating lexicon using Velikovich's method (2010).
"""
##################################################################
# Imports
from __future__ import unicode_literals, print_function
from collections import Counter
from copy import deepcopy
from datetime import datetime
from itertools import chain
from theano import tensor as TT
from sklearn.model_selection import train_test_split
import codecs
import numpy as np
import sys
import theano
from common import BTCH_SIZE, ENCODING, EPSILON, ESC_CHAR, FMAX, FMIN, \
INFORMATIVE_TAGS, MIN_TOK_CNT, \
NEGATIVE_IDX, NEUTRAL_IDX, POSITIVE_IDX, NONMATCH_RE, SENT_END_RE, \
TAB_RE, check_word, floatX, sgd_updates_adadelta
from common import POSITIVE as POSITIVE_LBL
from common import NEGATIVE as NEGATIVE_LBL
from germanet import normalize
##################################################################
# Constants
DFLT_T = 20
FASTMODE = False
MAX_NGHBRS = 25
TOK_WINDOW = 4 # it actually corresponds to a window of six
MAX_POS_IDS = 10000
MAX_EPOCHS = 5
MIN_EPOCHS = 3
UNK = "%unk%"
UNK_I = 0
##################################################################
# Methods
def _read_files_helper(a_crp_files, a_encoding=ENCODING):
"""Read corpus files and execute specified function.
@param a_crp_files - files of the original corpus
@param a_encoding - encoding of the vector file
@return (Iterator over file lines)
"""
i = 0
tokens_seen = False
for ifname in a_crp_files:
with codecs.open(ifname, 'r', a_encoding) as ifile:
for iline in ifile:
iline = iline.strip().lower()
if not iline or SENT_END_RE.match(iline):
continue
elif iline[0] == ESC_CHAR:
if FASTMODE:
i += 1
if i > 300:
break
if tokens_seen:
tokens_seen = False
yield None, None, None
continue
try:
iform, itag, ilemma = TAB_RE.split(iline)
except:
print("Invalid line format at line: {:s}".format(
repr(iline)), file=sys.stderr
)
continue
tokens_seen = True
yield iform, itag, normalize(ilemma)
yield None, None, None
def _read_files(a_crp_files, a_pos, a_neg, a_neut,
a_pos_re=NONMATCH_RE, a_neg_re=NONMATCH_RE,
a_encoding=ENCODING):
"""Read corpus files and populate one-directional co-occurrences.
@param a_crp_files - files of the original corpus
@param a_pos - initial set of positive terms
@param a_neg - initial set of negative terms
@param a_neut - initial set of neutral terms
@param a_pos_re - regular expression for matching positive terms
@param a_neg_re - regular expression for matching negative terms
@param a_encoding - encoding of the vector file
@return (word2vecid, x, y)
@note constructs statistics in place
"""
print("Populating corpus statistics...",
end="", file=sys.stderr)
word2cnt = Counter(ilemma
for _, itag, ilemma in _read_files_helper(a_crp_files,
a_encoding)
if ilemma is not None and itag[:2] in INFORMATIVE_TAGS
and check_word(ilemma))
print(" done", file=sys.stderr)
word2vecid = {UNK: UNK_I}
for w in chain(a_pos, a_neg, a_neut):
word2vecid[w] = len(word2vecid)
# convert words to vector ids if their counters are big enough
for w, cnt in word2cnt.iteritems():
if cnt >= MIN_TOK_CNT or a_pos_re.search(w) or a_neg_re.search(w):
word2vecid[w] = len(word2vecid)
word2cnt.clear()
# generate the training set
def check_in_seeds(a_form, a_lemma, a_seeds, a_seed_re):
if a_seed_re.search(a_form) or a_seed_re.search(a_lemma) \
or a_form in a_seeds or normalize(a_form) in a_seeds \
or a_lemma in a_seeds:
return True
return False
max_sent_len = 0
X = []
Y = []
toks = []
label = None
for iform, itag, ilemma in _read_files_helper(a_crp_files):
if ilemma is None:
if toks:
if label is not None:
max_sent_len = max(max_sent_len, len(toks))
X.append(deepcopy(toks))
Y.append(label)
del toks[:]
label = None
continue
if ilemma in word2vecid:
toks.append(word2vecid[ilemma])
if check_in_seeds(iform, ilemma, a_pos, a_pos_re):
label = POSITIVE_IDX
elif check_in_seeds(iform, ilemma, a_neg, a_neg_re):
label = NEGATIVE_IDX
elif label is None and check_in_seeds(iform, ilemma,
a_neut, NONMATCH_RE):
label = NEUTRAL_IDX
X = np.array(
[x + [UNK_I] * (max_sent_len - len(x))
for x in X], dtype="int32")
Y = np.array(Y, dtype="int32")
return (word2vecid, max_sent_len, X, Y)
def init_embeddings(vocab_size, k=3):
"""Uniformly initialze lexicon scores for each vocabulary word.
Args:
vocab_size (int): vocabulary size
k (int): dimensionality of embeddings
Returns:
2-tuple(theano.shared, int): embedding matrix, vector dimmensionality
"""
rand_vec = np.random.uniform(-0.25, 0.25, k)
W = floatX(np.broadcast_to(rand_vec,
(vocab_size, k)))
# zero-out the vector of unknown terms
W[UNK_I] *= 0.
return theano.shared(value=W, name='W'), k
def init_nnet(W, k):
"""Initialize neural network.
Args:
W (theano.shared): embedding matrix
k: dimensionality of the vector
"""
# `x' will be a matrix of size `m x n', where `m' is the mini-batch size
# and `n' is the maximum observed sentence length times the dimensionality
# of embeddings (`k')
x = TT.imatrix(name='x')
# `y' will be a vectors of size `m', where `m' is the mini-batch size
y = TT.ivector(name='y')
# `emb_sum' will be a matrix of size `m x k', where `m' is the mini-batch
# size and `k' is dimensionality of embeddings
emb_sum = W[x].sum(axis=1)
# it actually does not make sense to have an identity matrix in the
# network, but that's what the original Vo implemenation actually does
# W2S = theano.shared(value=floatX(np.eye(3)), name="W2S")
# y_prob = TT.nnet.softmax(TT.dot(W2S, emb_sum.T))
y_prob = TT.nnet.softmax(emb_sum)
y_pred = TT.argmax(y_prob, axis=1)
params = [W]
cost = -TT.mean(TT.log(y_prob)[TT.arange(y.shape[0]), y])
updates = sgd_updates_adadelta(params, cost)
train = theano.function([x, y], cost, updates=updates)
acc = TT.sum(TT.eq(y, y_pred))
validate = theano.function([x, y], acc)
zero_vec = TT.basic.zeros(k)
zero_out = theano.function([],
updates=[(W,
TT.set_subtensor(W[UNK_I, :],
zero_vec))])
return (train, validate, zero_out, params)
def vo(a_N, a_crp_files, a_pos, a_neg, a_neut,
a_pos_re=NONMATCH_RE, a_neg_re=NONMATCH_RE, a_encoding=ENCODING):
"""Method for generating sentiment lexicons using Velikovich's approach.
@param a_N - number of terms to extract
@param a_crp_files - files of the original corpus
@param a_pos - initial set of positive terms to be expanded
@param a_neg - initial set of negative terms to be expanded
@param a_neut - initial set of neutral terms to be expanded
@param a_pos_re - regular expression for matching positive terms
@param a_neg_re - regular expression for matching negative terms
@param a_encoding - encoding of the vector file
@return list of terms sorted according to their polarities
"""
# digitize training set
word2vecid, max_sent_len, X, Y = _read_files(
a_crp_files, a_pos, a_neg, a_neut, a_pos_re, a_neg_re,
a_encoding
)
# initianlize neural net and embedding matrix
W, k = init_embeddings(len(word2vecid))
train, validate, zero_out, params = init_nnet(W, k)
# organize minibatches and run the training
N = len(Y)
assert N, "Training set is empty."
train_idcs, devtest_idcs = train_test_split(
np.arange(N), test_size=0.1)
train_N = len(train_idcs)
devtest_N = float(len(devtest_idcs))
devtest_x = X[devtest_idcs[:]]
devtest_y = Y[devtest_idcs[:]]
btch_size = min(N, BTCH_SIZE)
epoch_i = 0
acc = 0
best_acc = -1
prev_acc = FMIN
best_params = []
while epoch_i < MAX_EPOCHS:
np.random.shuffle(train_idcs)
cost = acc = 0.
start_time = datetime.utcnow()
for start in np.arange(0, train_N, btch_size):
end = min(train_N, start + btch_size)
btch_x = X[train_idcs[start:end]]
btch_y = Y[train_idcs[start:end]]
cost += train(btch_x, btch_y)
zero_out()
acc = validate(devtest_x, devtest_y) / devtest_N
if acc >= best_acc:
best_params = [p.get_value() for p in params]
best_acc = acc
sfx = " *"
else:
sfx = ''
end_time = datetime.utcnow()
tdelta = (end_time - start_time).total_seconds()
print("Iteration #{:d} ({:.2f} sec): cost = {:.2f}, "
"accuracy = {:.2%};{:s}".format(epoch_i, tdelta, cost,
acc, sfx),
file=sys.stderr)
if abs(prev_acc - acc) < EPSILON and epoch_i > MIN_EPOCHS:
break
else:
prev_acc = acc
epoch_i += 1
if best_params:
for p, val in zip(params, best_params):
p.set_value(val)
W = W.get_value()
ret = []
for w, w_id in word2vecid.iteritems():
if w_id == UNK_I:
continue
elif w in a_pos or a_pos_re.search(w):
w_score = FMAX
elif w in a_neg or a_neg_re.search(w):
w_score = FMIN
else:
w_pol = np.argmax(W[w_id])
if w_pol == NEUTRAL_IDX:
continue
w_score = np.max(W[w_id])
if (w_pol == POSITIVE_IDX and w_score < 0.) \
or (w_pol == NEGATIVE_IDX and w_score > 0.):
w_score *= -1
ret.append((w,
POSITIVE_LBL if w_score > 0. else NEGATIVE_LBL,
w_score))
ret.sort(key=lambda el: abs(el[-1]), reverse=True)
if a_N >= 0:
del ret[a_N:]
return ret
| mit | -5,583,059,324,348,303,000 | 34.286174 | 78 | 0.551394 | false |
fupadev/FuME | fume/threads/DownloadProcessor.py | 1 | 5680 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# --------------------------------------------------------------------------
# FuME FuPa Match Explorer Copyright (c) 2017 Andreas Feldl <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The full license of the GNU General Public License is in the file LICENCE,
# distributed with this software; if not, see http://www.gnu.org/licenses/.
# --------------------------------------------------------------------------
import sqlite3
import lxml.html
import requests
from PyQt5 import QtCore
class DownloadProcessor(QtCore.QThread):
loggerSignal = QtCore.pyqtSignal(str)
statusBarSignal = QtCore.pyqtSignal(str)
def __init__(self, options):
super(DownloadProcessor, self).__init__(options['parent'])
self.region = options['region']
self.date_from = options['date-from']
self.date_to = options['date-to']
self.dbPath = options['database-path']
# def __del__(self):
# self.wait()
def download(self, date):
uAStr = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
headers = {'User-Agent': uAStr}
url = 'https://www.fupa.net/index.php?page=kalender&site_linkurl=%s&date=%s' % (self.region, date)
r = requests.get(url, headers=headers)
doc = lxml.html.fromstring(r.content)
path = '/html/body//table[@class]//tr/td/a[not(contains(@class, "spielbericht_icon"))]//text() | ' \
'/html/body//table[@class]//tr/td//img/@src | ' \
'/html/body//table[@class]//th//text() | ' \
'/html/body//table[@class]//th/a/@href | ' \
'/html/body//table[@class]//tr/td[@style]/a/@href'
raw = doc.xpath(path)
# replacing '-Live-' with '-:-'
raw = [i.replace('https://www.fupa.net/fupa/images/buttons/tipp_live.jpg', '-:-') for i in raw]
# From
# ['/liga/bezirksliga-west-31261.html', 'Bezirksliga West', '19:15 Uhr', 'TSV Abensberg', '-:-',
# '/spielberichte/tsv-abensberg-spvgg-mariaposching-3679861.html', 'SpVgg Mariaposching',
# To
# [['Bezirksliga West', '19:15 Uhr', 'TSV Abensberg', '-:-', '3679861', 'SpVgg Mariaposching'],
matches = []
for i, d in enumerate(raw):
if 'Relegation' in d:
league = 'Relegation'
elif '/liga/' in d:
league = raw[i + 1]
elif 'Test' in d:
league = raw[i]
if 'Uhr' in d:
# print(i)
current = [league]
for i in raw[i:i + 5]:
if '/spielberichte/' in i:
i = i.split('.')[0].split('-')[-1]
if '/spielberichte/' in i: # Fehler in Fupa: URL = '/spielberichte/.html'
i = ''
current.append(i)
matches.append(current)
# rearrange
# ['3679861', 'Bezirksliga West', '19:15 Uhr', 'TSV Abensberg', 'SpVgg Mariaposching', '-:-']
tmp = []
for spiel in matches:
order = [4, 0, 1, 2, 5, 3]
spiel = [spiel[i] for i in order]
spiel[2] = date + ' ' + spiel[2][0:5]
tmp.append(spiel)
data = tmp
connection = sqlite3.connect(self.dbPath)
cursor = connection.cursor()
for p in data:
format_str = """INSERT OR IGNORE INTO calendar(match_id, league, match_date, home, guest, result, region)
VALUES ("{match_id}", "{league}", "{match_date}", "{home}", "{guest}", "{result}", "{region}");"""
sql_command = format_str.format(match_id=p[0], league=p[1], match_date=p[2],
home=p[3], guest=p[4], result=p[5], region=self.region)
try:
cursor.execute(sql_command)
except:
self.loggerSignal.emit('Folgendes Spiel wurde nicht hinzugefügt: %s' % p)
update_str = """UPDATE calendar
SET match_date="{match_date}", result="{result}", league="{league}" WHERE match_id = "{match_id}";"""
sql_command = update_str.format(match_id=p[0], match_date=p[2], league=p[1], result=p[5])
try:
cursor.execute(sql_command)
except:
self.loggerSignal.emit('Folgendes Spiel wurde nicht hinzugefügt: %s' % p)
connection.commit()
connection.close()
return len(data)
def run(self):
self.statusBarSignal.emit("Download")
date_from = self.date_from
date_to = self.date_to.addDays(1)
counter = 0
while date_from != date_to:
try:
counter += self.download(date_from.toString("yyyy-MM-dd"))
except Exception as e:
self.loggerSignal.emit('Fehler beim importieren: %s' % e)
return
date_from = date_from.addDays(1)
self.statusBarSignal.emit("Download: #%s Spiele" % counter)
self.loggerSignal.emit('%s Spiele erfolgreich hinzugefügt' % counter)
self.statusBarSignal.emit("Bereit")
| gpl-3.0 | -4,301,365,850,361,170,000 | 38.423611 | 121 | 0.543773 | false |
ibc/MediaSoup | worker/deps/catch/projects/TestScripts/testRandomOrder.py | 1 | 2135 | #!/usr/bin/env python3
"""
This test script verifies that the random ordering of tests inside
Catch2 is invariant in regards to subsetting. This is done by running
the binary 3 times, once with all tests selected, and twice with smaller
subsets of tests selected, and verifying that the selected tests are in
the same relative order.
"""
import subprocess
import sys
import random
def list_tests(self_test_exe, tags, rng_seed):
cmd = [self_test_exe, '--list-test-names-only', '--order', 'rand',
'--rng-seed', str(rng_seed)]
tags_arg = ','.join('[{}]'.format(t) for t in tags)
if tags_arg:
cmd.append(tags_arg + '~[.]')
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if stderr:
raise RuntimeError("Unexpected error output:\n" + process.stderr)
result = stdout.split(b'\n')
result = [s for s in result if s]
if len(result) < 2:
raise RuntimeError("Unexpectedly few tests listed (got {})".format(
len(result)))
return result
def check_is_sublist_of(shorter, longer):
assert len(shorter) < len(longer)
assert len(set(longer)) == len(longer)
indexes_in_longer = {s: i for i, s in enumerate(longer)}
for s1, s2 in zip(shorter, shorter[1:]):
assert indexes_in_longer[s1] < indexes_in_longer[s2], (
'{} comes before {} in longer list.\n'
'Longer: {}\nShorter: {}'.format(s2, s1, longer, shorter))
def main():
self_test_exe, = sys.argv[1:]
# We want a random seed for the test, but want to avoid 0,
# because it has special meaning
seed = random.randint(1, 2 ** 32 - 1)
list_one_tag = list_tests(self_test_exe, ['generators'], seed)
list_two_tags = list_tests(self_test_exe, ['generators', 'matchers'], seed)
list_all = list_tests(self_test_exe, [], seed)
# First, verify that restricting to a subset yields the same order
check_is_sublist_of(list_two_tags, list_all)
check_is_sublist_of(list_one_tag, list_two_tags)
if __name__ == '__main__':
sys.exit(main())
| isc | 6,595,939,224,350,894,000 | 35.186441 | 79 | 0.640749 | false |
arpitprogressive/arpittest | intergration_test/banner.py | 1 | 18254 | # -*- coding: utf-8 -*-
"""
banner
Description goes here...
:copyright: (c) 2014 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
import unittest
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, \
NoAlertPresentException
from base import Selenium2OnSauce
class Banner(Selenium2OnSauce):
def test_advanced_skills(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/advanced-skills/")
self.assertTrue(self.is_element_present(By.ID, "wfmis"))
def test_advanced_skills_epp(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/software-products/advanced-skills/")
def test_advanced_skills_erd(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/advanced-skills/")
def test_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/")
def test_central_overnment(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/central-government/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
def test_company_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/company-research/")
def test_company_training_provider(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/company-training-programs/")
def test_courseware(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
def test_developing_tomorrow(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/")
def test_download(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/download/")
def test_epp(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/foundation-skills/epp/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_erd(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/")
def test_event(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/event-workforce-enablement/")
def test_executive_summary(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/read-only/executive-summary/")
def test_foundation_advance_skills_devlopment(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/")
def test_foundation_convocation_banner(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/")
self.assertTrue(self.is_element_present(By.XPATH, "(//a[contains(text(),'Know More')])[3]"))
driver.get("http://pursuite.openlabs.us/about-us/ssc-nasscom/vision-mission/")
def test_foundation_skills_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/")
def test_foundation_skills_ed(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/foundation-skills/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
def test_foundation_skills_epp(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/software-products/foundation-skills/")
def test_full_course(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/read-only/full-course/")
def test_gbfs_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/gbfs/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "span.filetitle"))
def test_government(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_government_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
def test_government_training_program(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/government-training-programs/")
def test_healp_you_choose(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/")
self.assertTrue(self.is_element_present(By.LINK_TEXT, "Know More"))
def test_ict_academy_tamilnadu(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/ict-academy-tamilnadu/")
def test_il_fs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resougvrces/private-sector-training-programs/ilfs/")
def test_implementation_cycle_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/gbfs/implementation-cycle/")
def test_interactive_tools(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/interactive-tools/")
def test_it_initiative(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
def test_it_ites(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/it-ites-initiativesprograms/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[7]/div"))
def test_listining_of_programs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/event-workforce-enablement/listing-programs/")
def test_nasscom_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/nasscom-research/")
def test_niit(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/niit/")
def test_obf_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/gbfs/outcome-based-framework-gbfs/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "span.filetitle"))
def test_other_bodies_government(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/government-training-programs/other-bodies/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
def test_other_bodies(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/other-bodies/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[5]/div"))
def test_other_publication(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/other-publication/")
def test_policy_development(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/policy-development/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
def test_private_sector_training_programs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/")
def test_program_registration(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/event-workforce-enablement/program-registration/")
def test_promotion_marketing(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/promotion-marketing/")
def test_read_only(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/read-only/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_skills_academy(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/skills-academy/")
def test_software_products(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/software-products/")
def test_ssc_training_programs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/")
def test_state_government(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/state-government/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
def test_talent_sprint(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/talent-sprint/")
def test_training_materials(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/training-materials/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
def test_training_that_helps_you(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/")
self.assertTrue(self.is_element_present(By.XPATH, "(//a[contains(text(),'Know More')])[2]"))
def test_training_tools(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/training-tools/")
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 7,783,938,493,523,269,000 | 60.461279 | 238 | 0.7165 | false |
YannChemin/wxGIPE | pyplottest.py | 1 | 3074 | # -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: wxGIPE
# Purpose: Satellite image [multi/hyper][spectral/temporal] pixel plotting
# Author: Yann Chemin, <[email protected]>
#
###############################################################################
# Copyright (c) 2008, Yann Chemin <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
#!/usr/bin/python
import wx
import wx.lib.plot
import numpy
class TestFrame(wx.Frame):
def __init__(
self, parent, ID, title, pos=wx.DefaultPosition,
size=(600, 400), style=wx.DEFAULT_FRAME_STYLE
):
wx.Frame.__init__(self, parent, ID, title, pos, size, style)
def OnCloseWindow(self, event):
self.Destroy()
class TestPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1, size=(600, 400))
self.client = wx.lib.plot.PlotCanvas(self)
sizer = wx.BoxSizer()
sizer.Add(self.client, 1, wx.EXPAND)
self.SetSizer(sizer)
data1 = 2.*numpy.pi*numpy.arange(2*100)/200.
data1.shape = (100, 2)
data1[:,1] = numpy.sin(data1[:,0])
markers1 = wx.lib.plot.PolyMarker(data1, legend='Green Markers', colour='green', marker='circle',size=1)
# 50 points cos function, plotted as red line
data1 = 2.*numpy.pi*numpy.arange(2*100)/200.
data1.shape = (100,2)
data1[:,1] = numpy.cos(data1[:,0])
lines = wx.lib.plot.PolyLine(data1, legend= 'Red Line', colour='red')
a = wx.lib.plot.PlotGraphics([markers1, lines],"Graph Title", "X Axis", "Y Axis")
self.client.Draw(a)
def __ptest():
app = wx.PySimpleApp()
win = TestFrame(None, -1, "Test FRAME")
win2 = TestPanel(win)
win.Show(True)
app.MainLoop()
if __name__ == '__main__':
__ptest()
| unlicense | 6,034,506,263,572,517,000 | 35.487805 | 112 | 0.596942 | false |
belkinsky/SFXbot | src/pyAudioAnalysis/audioTrainTest.py | 1 | 46228 | import sys
import numpy
import time
import os
import glob
import pickle
import shutil
import audioop
import signal
import csv
import ntpath
from . import audioFeatureExtraction as aF
from . import audioBasicIO
from matplotlib.mlab import find
import matplotlib.pyplot as plt
import scipy.io as sIO
from scipy import linalg as la
from scipy.spatial import distance
import sklearn.svm
import sklearn.decomposition
import sklearn.ensemble
def signal_handler(signal, frame):
print('You pressed Ctrl+C! - EXIT')
os.system("stty -cbreak echo")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
shortTermWindow = 0.050
shortTermStep = 0.050
eps = 0.00000001
class kNN:
def __init__(self, X, Y, k):
self.X = X
self.Y = Y
self.k = k
def classify(self, testSample):
nClasses = numpy.unique(self.Y).shape[0]
YDist = (distance.cdist(self.X, testSample.reshape(1, testSample.shape[0]), 'euclidean')).T
iSort = numpy.argsort(YDist)
P = numpy.zeros((nClasses,))
for i in range(nClasses):
P[i] = numpy.nonzero(self.Y[iSort[0][0:self.k]] == i)[0].shape[0] / float(self.k)
return (numpy.argmax(P), P)
def classifierWrapper(classifier, classifierType, testSample):
'''
This function is used as a wrapper to pattern classification.
ARGUMENTS:
- classifier: a classifier object of type sklearn.svm.SVC or kNN (defined in this library) or sklearn.ensemble.RandomForestClassifier or sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.ExtraTreesClassifier
- classifierType: "svm" or "knn" or "randomforests" or "gradientboosting" or "extratrees"
- testSample: a feature vector (numpy array)
RETURNS:
- R: class ID
- P: probability estimate
EXAMPLE (for some audio signal stored in array x):
import audioFeatureExtraction as aF
import audioTrainTest as aT
# load the classifier (here SVM, for kNN use loadKNNModel instead):
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep] = aT.loadSVModel(modelName)
# mid-term feature extraction:
[MidTermFeatures, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs*stWin), round(Fs*stStep));
# feature normalization:
curFV = (MidTermFeatures[:, i] - MEAN) / STD;
# classification
[Result, P] = classifierWrapper(Classifier, modelType, curFV)
'''
R = -1
P = -1
if classifierType == "knn":
[R, P] = classifier.classify(testSample)
elif classifierType == "svm" or classifierType == "randomforest" or classifierType == "gradientboosting" or "extratrees":
R = classifier.predict(testSample.reshape(1,-1))[0]
P = classifier.predict_proba(testSample.reshape(1,-1))[0]
return [R, P]
def regressionWrapper(model, modelType, testSample):
'''
This function is used as a wrapper to pattern classification.
ARGUMENTS:
- model: regression model
- modelType: "svm" or "knn" (TODO)
- testSample: a feature vector (numpy array)
RETURNS:
- R: regression result (estimated value)
EXAMPLE (for some audio signal stored in array x):
TODO
'''
if modelType == "svm" or modelType == "randomforest":
return (model.predict(testSample.reshape(1,-1))[0])
# elif classifierType == "knn":
# TODO
return None
def randSplitFeatures(features, partTrain):
'''
def randSplitFeatures(features):
This function splits a feature set for training and testing.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features.
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- partTrain: percentage
RETURNS:
- featuresTrains: a list of training data for each class
- featuresTest: a list of testing data for each class
'''
featuresTrain = []
featuresTest = []
for i, f in enumerate(features):
[numOfSamples, numOfDims] = f.shape
randperm = numpy.random.permutation(list(range(numOfSamples)))
nTrainSamples = int(round(partTrain * numOfSamples))
featuresTrain.append(f[randperm[0:nTrainSamples]])
featuresTest.append(f[randperm[nTrainSamples::]])
return (featuresTrain, featuresTest)
def trainKNN(features, K):
'''
Train a kNN classifier.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features.
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- K: parameter K
RETURNS:
- kNN: the trained kNN variable
'''
[Xt, Yt] = listOfFeatures2Matrix(features)
knn = kNN(Xt, Yt, K)
return knn
def trainSVM(features, Cparam):
'''
Train a multi-class probabilitistic SVM classifier.
Note: This function is simply a wrapper to the sklearn functionality for SVM training
See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- Cparam: SVM parameter C (cost of constraints violation)
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
'''
[X, Y] = listOfFeatures2Matrix(features)
svm = sklearn.svm.SVC(C = Cparam, kernel = 'linear', probability = True)
svm.fit(X,Y)
return svm
def trainRandomForest(features, n_estimators):
'''
Train a multi-class decision tree classifier.
Note: This function is simply a wrapper to the sklearn functionality for SVM training
See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- n_estimators: number of trees in the forest
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
'''
[X, Y] = listOfFeatures2Matrix(features)
rf = sklearn.ensemble.RandomForestClassifier(n_estimators = n_estimators)
rf.fit(X,Y)
return rf
def trainGradientBoosting(features, n_estimators):
'''
Train a gradient boosting classifier
Note: This function is simply a wrapper to the sklearn functionality for SVM training
See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- n_estimators: number of trees in the forest
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
'''
[X, Y] = listOfFeatures2Matrix(features)
rf = sklearn.ensemble.GradientBoostingClassifier(n_estimators = n_estimators)
rf.fit(X,Y)
return rf
def trainExtraTrees(features, n_estimators):
'''
Train a gradient boosting classifier
Note: This function is simply a wrapper to the sklearn functionality for extra tree classifiers
See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- n_estimators: number of trees in the forest
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
'''
[X, Y] = listOfFeatures2Matrix(features)
et = sklearn.ensemble.ExtraTreesClassifier(n_estimators = n_estimators)
et.fit(X,Y)
return et
def trainSVMregression(Features, Y, Cparam):
svm = sklearn.svm.SVR(C = Cparam, kernel = 'linear')
print(Features.shape, Y)
svm.fit(Features,Y)
trainError = numpy.mean(numpy.abs(svm.predict(Features) - Y))
return svm, trainError
# TODO (not avaiable for regression?)
#def trainRandomForestRegression(Features, Y, n_estimators):
# rf = sklearn.ensemble.RandomForestClassifier(n_estimators = n_estimators)
# print Features.shape, Y
# rf.fit(Features,Y)
# trainError = numpy.mean(numpy.abs(rf.predict(Features) - Y))
# return rf, trainError
def featureAndTrain(listOfDirs, mtWin, mtStep, stWin, stStep, classifierType, modelName, computeBEAT=False, perTrain=0.90):
'''
This function is used as a wrapper to segment-based audio feature extraction and classifier training.
ARGUMENTS:
listOfDirs: list of paths of directories. Each directory contains a signle audio class whose samples are stored in seperate WAV files.
mtWin, mtStep: mid-term window length and step
stWin, stStep: short-term window and step
classifierType: "svm" or "knn" or "randomforest" or "gradientboosting" or "extratrees"
modelName: name of the model to be saved
RETURNS:
None. Resulting classifier along with the respective model parameters are saved on files.
'''
# STEP A: Feature Extraction:
[features, classNames, _] = aF.dirsWavFeatureExtraction(listOfDirs, mtWin, mtStep, stWin, stStep, computeBEAT=computeBEAT)
if len(features) == 0:
print("trainSVM_feature ERROR: No data found in any input folder!")
return
numOfFeatures = features[0].shape[1]
featureNames = ["features" + str(d + 1) for d in range(numOfFeatures)]
writeTrainDataToARFF(modelName, features, classNames, featureNames)
for i, f in enumerate(features):
if len(f) == 0:
print("trainSVM_feature ERROR: " + listOfDirs[i] + " folder is empty or non-existing!")
return
# STEP B: Classifier Evaluation and Parameter Selection:
if classifierType == "svm":
classifierParams = numpy.array([0.001, 0.01, 0.5, 1.0, 5.0, 10.0])
elif classifierType == "randomforest":
classifierParams = numpy.array([10, 25, 50, 100,200,500])
elif classifierType == "knn":
classifierParams = numpy.array([1, 3, 5, 7, 9, 11, 13, 15])
elif classifierType == "gradientboosting":
classifierParams = numpy.array([10, 25, 50, 100,200,500])
elif classifierType == "extratrees":
classifierParams = numpy.array([10, 25, 50, 100,200,500])
# get optimal classifeir parameter:
bestParam = evaluateClassifier(features, classNames, 100, classifierType, classifierParams, 0, perTrain)
print("Selected params: {0:.5f}".format(bestParam))
C = len(classNames)
[featuresNorm, MEAN, STD] = normalizeFeatures(features) # normalize features
MEAN = MEAN.tolist()
STD = STD.tolist()
featuresNew = featuresNorm
# STEP C: Save the classifier to file
if classifierType == "svm":
Classifier = trainSVM(featuresNew, bestParam)
with open(modelName, 'wb') as fid: # save to file
pickle.dump(Classifier, fid)
fo = open(modelName + "MEANS", "wb")
pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
elif classifierType == "randomforest":
Classifier = trainRandomForest(featuresNew, bestParam)
with open(modelName, 'wb') as fid: # save to file
pickle.dump(Classifier, fid)
fo = open(modelName + "MEANS", "wb")
pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
elif classifierType == "gradientboosting":
Classifier = trainGradientBoosting(featuresNew, bestParam)
with open(modelName, 'wb') as fid: # save to file
pickle.dump(Classifier, fid)
fo = open(modelName + "MEANS", "wb")
pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
elif classifierType == "extratrees":
Classifier = trainExtraTrees(featuresNew, bestParam)
with open(modelName, 'wb') as fid: # save to file
pickle.dump(Classifier, fid)
fo = open(modelName + "MEANS", "wb")
pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
elif classifierType == "knn":
[X, Y] = listOfFeatures2Matrix(featuresNew)
X = X.tolist()
Y = Y.tolist()
fo = open(modelName, "wb")
pickle.dump(X, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(Y, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(bestParam, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
def featureAndTrainRegression(dirName, mtWin, mtStep, stWin, stStep, modelType, modelName, computeBEAT=False):
'''
This function is used as a wrapper to segment-based audio feature extraction and classifier training.
ARGUMENTS:
dirName: path of directory containing the WAV files and Regression CSVs
mtWin, mtStep: mid-term window length and step
stWin, stStep: short-term window and step
modelType: "svm" or "knn" or "randomforest"
modelName: name of the model to be saved
RETURNS:
None. Resulting regression model along with the respective model parameters are saved on files.
'''
# STEP A: Feature Extraction:
[features, _, fileNames] = aF.dirsWavFeatureExtraction([dirName], mtWin, mtStep, stWin, stStep, computeBEAT=computeBEAT)
features = features[0]
fileNames = [ntpath.basename(f) for f in fileNames[0]]
# Read CSVs:
CSVs = glob.glob(dirName + os.sep + "*.csv")
regressionLabels = []
regressionNames = []
for c in CSVs: # for each CSV
curRegressionLabels = numpy.zeros((len(fileNames, ))) # read filenames, map to "fileNames" and append respective values in the regressionLabels
with open(c, 'rb') as csvfile:
CSVreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in CSVreader:
if len(row) == 2:
if row[0]+".wav" in fileNames:
index = fileNames.index(row[0]+".wav")
curRegressionLabels[index] = float(row[1])
regressionLabels.append(curRegressionLabels) # curRegressionLabels is the list of values for the current regression problem
regressionNames.append(ntpath.basename(c).replace(".csv", "")) # regression task name
if len(features) == 0:
print("ERROR: No data found in any input folder!")
return
numOfFeatures = features.shape[1]
# TODO: ARRF WRITE????
# STEP B: Classifier Evaluation and Parameter Selection:
if modelType == "svm":
modelParams = numpy.array([0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 5.0, 10.0])
elif modelType == "randomforest":
modelParams = numpy.array([5, 10, 25, 50, 100])
# elif modelType == "knn":
# modelParams = numpy.array([1, 3, 5, 7, 9, 11, 13, 15]);
for iRegression, r in enumerate(regressionNames):
# get optimal classifeir parameter:
print("Regression task " + r)
bestParam = evaluateRegression(features, regressionLabels[iRegression], 100, modelType, modelParams)
print("Selected params: {0:.5f}".format(bestParam))
[featuresNorm, MEAN, STD] = normalizeFeatures([features]) # normalize features
# STEP C: Save the model to file
if modelType == "svm":
Classifier, _ = trainSVMregression(featuresNorm[0], regressionLabels[iRegression], bestParam)
with open(modelName + "_" + r, 'wb') as fid: # save to file
pickle.dump(Classifier, fid)
fo = open(modelName + "_" + r + "MEANS", "wb")
pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
''' TODO
elif modelType == "randomforest":
Classifier, _ = trainRandomForestRegression(featuresNorm[0], regressionLabels[iRegression], bestParam)
with open(modelName + "_" + r, 'wb') as fid: # save to file
cPickle.dump(Classifier, fid)
fo = open(modelName + "_" + r + "MEANS", "wb")
cPickle.dump(MEAN, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(STD, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(stWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(stStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(computeBEAT, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
'''
# elif classifierType == "knn":
def loadKNNModel(kNNModelName, isRegression=False):
try:
fo = open(kNNModelName, "rb")
except IOError:
print("didn't find file")
return
try:
X = pickle.load(fo)
Y = pickle.load(fo)
MEAN = pickle.load(fo)
STD = pickle.load(fo)
if not isRegression:
classNames = pickle.load(fo)
K = pickle.load(fo)
mtWin = pickle.load(fo)
mtStep = pickle.load(fo)
stWin = pickle.load(fo)
stStep = pickle.load(fo)
computeBEAT = pickle.load(fo)
except:
fo.close()
fo.close()
X = numpy.array(X)
Y = numpy.array(Y)
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
Classifier = kNN(X, Y, K) # Note: a direct call to the kNN constructor is used here
if isRegression:
return(Classifier, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def loadSVModel(SVMmodelName, isRegression=False):
'''
This function loads an SVM model either for classification or training.
ARGMUMENTS:
- SVMmodelName: the path of the model to be loaded
- isRegression: a flag indigating whereas this model is regression or not
'''
try:
fo = open(SVMmodelName+"MEANS", "rb")
except IOError:
print("Load SVM Model: Didn't find file")
return
try:
MEAN = pickle.load(fo)
STD = pickle.load(fo)
if not isRegression:
classNames = pickle.load(fo)
mtWin = pickle.load(fo)
mtStep = pickle.load(fo)
stWin = pickle.load(fo)
stStep = pickle.load(fo)
computeBEAT = pickle.load(fo)
except:
fo.close()
fo.close()
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
COEFF = []
with open(SVMmodelName, 'rb') as fid:
SVM = pickle.load(fid)
if isRegression:
return(SVM, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(SVM, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def loadRandomForestModel(RFmodelName, isRegression=False):
'''
This function loads an SVM model either for classification or training.
ARGMUMENTS:
- SVMmodelName: the path of the model to be loaded
- isRegression: a flag indigating whereas this model is regression or not
'''
try:
fo = open(RFmodelName+"MEANS", "rb")
except IOError:
print("Load Random Forest Model: Didn't find file")
return
try:
MEAN = pickle.load(fo)
STD = pickle.load(fo)
if not isRegression:
classNames = pickle.load(fo)
mtWin = pickle.load(fo)
mtStep = pickle.load(fo)
stWin = pickle.load(fo)
stStep = pickle.load(fo)
computeBEAT = pickle.load(fo)
except:
fo.close()
fo.close()
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
COEFF = []
with open(RFmodelName, 'rb') as fid:
RF = pickle.load(fid)
if isRegression:
return(RF, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(RF, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def loadGradientBoostingModel(GBModelName, isRegression=False):
'''
This function loads gradient boosting either for classification or training.
ARGMUMENTS:
- SVMmodelName: the path of the model to be loaded
- isRegression: a flag indigating whereas this model is regression or not
'''
try:
fo = open(GBModelName+"MEANS", "rb")
except IOError:
print("Load Random Forest Model: Didn't find file")
return
try:
MEAN = pickle.load(fo)
STD = pickle.load(fo)
if not isRegression:
classNames = pickle.load(fo)
mtWin = pickle.load(fo)
mtStep = pickle.load(fo)
stWin = pickle.load(fo)
stStep = pickle.load(fo)
computeBEAT = pickle.load(fo)
except:
fo.close()
fo.close()
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
COEFF = []
with open(GBModelName, 'rb') as fid:
GB = pickle.load(fid)
if isRegression:
return(GB, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(GB, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def loadExtraTreesModel(ETmodelName, isRegression=False):
'''
This function loads extra trees either for classification or training.
ARGMUMENTS:
- SVMmodelName: the path of the model to be loaded
- isRegression: a flag indigating whereas this model is regression or not
'''
try:
fo = open(ETmodelName+"MEANS", "rb")
except IOError:
print("Load Random Forest Model: Didn't find file")
return
try:
MEAN = pickle.load(fo)
STD = pickle.load(fo)
if not isRegression:
classNames = pickle.load(fo)
mtWin = pickle.load(fo)
mtStep = pickle.load(fo)
stWin = pickle.load(fo)
stStep = pickle.load(fo)
computeBEAT = pickle.load(fo)
except:
fo.close()
fo.close()
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
COEFF = []
with open(ETmodelName, 'rb') as fid:
GB = pickle.load(fid)
if isRegression:
return(GB, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(GB, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def evaluateClassifier(features, ClassNames, nExp, ClassifierName, Params, parameterMode, perTrain=0.90):
'''
ARGUMENTS:
features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features.
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
ClassNames: list of class names (strings)
nExp: number of cross-validation experiments
ClassifierName: svm or knn or randomforest
Params: list of classifier parameters (for parameter tuning during cross-validation)
parameterMode: 0: choose parameters that lead to maximum overall classification ACCURACY
1: choose parameters that lead to maximum overall F1 MEASURE
RETURNS:
bestParam: the value of the input parameter that optimizes the selected performance measure
'''
# feature normalization:
(featuresNorm, MEAN, STD) = normalizeFeatures(features)
#featuresNorm = features;
nClasses = len(features)
CAll = []
acAll = []
F1All = []
PrecisionClassesAll = []
RecallClassesAll = []
ClassesAll = []
F1ClassesAll = []
CMsAll = []
# compute total number of samples:
nSamplesTotal = 0
for f in features:
nSamplesTotal += f.shape[0]
if nSamplesTotal > 1000 and nExp > 50:
nExp = 50
print("Number of training experiments changed to 50 due to high number of samples")
if nSamplesTotal > 2000 and nExp > 10:
nExp = 10
print("Number of training experiments changed to 10 due to high number of samples")
for Ci, C in enumerate(Params): # for each param value
CM = numpy.zeros((nClasses, nClasses))
for e in range(nExp): # for each cross-validation iteration:
print("Param = {0:.5f} - Classifier Evaluation Experiment {1:d} of {2:d}".format(C, e+1, nExp))
# split features:
featuresTrain, featuresTest = randSplitFeatures(featuresNorm, perTrain)
# train multi-class svms:
if ClassifierName == "svm":
Classifier = trainSVM(featuresTrain, C)
elif ClassifierName == "knn":
Classifier = trainKNN(featuresTrain, C)
elif ClassifierName == "randomforest":
Classifier = trainRandomForest(featuresTrain, C)
elif ClassifierName == "gradientboosting":
Classifier = trainGradientBoosting(featuresTrain, C)
elif ClassifierName == "extratrees":
Classifier = trainExtraTrees(featuresTrain, C)
CMt = numpy.zeros((nClasses, nClasses))
for c1 in range(nClasses):
#Results = Classifier.pred(featuresTest[c1])
nTestSamples = len(featuresTest[c1])
Results = numpy.zeros((nTestSamples, 1))
for ss in range(nTestSamples):
[Results[ss], _] = classifierWrapper(Classifier, ClassifierName, featuresTest[c1][ss])
for c2 in range(nClasses):
CMt[c1][c2] = float(len(numpy.nonzero(Results == c2)[0]))
CM = CM + CMt
CM = CM + 0.0000000010
Rec = numpy.zeros((CM.shape[0], ))
Pre = numpy.zeros((CM.shape[0], ))
for ci in range(CM.shape[0]):
Rec[ci] = CM[ci, ci] / numpy.sum(CM[ci, :])
Pre[ci] = CM[ci, ci] / numpy.sum(CM[:, ci])
PrecisionClassesAll.append(Pre)
RecallClassesAll.append(Rec)
F1 = 2 * Rec * Pre / (Rec + Pre)
F1ClassesAll.append(F1)
acAll.append(numpy.sum(numpy.diagonal(CM)) / numpy.sum(CM))
CMsAll.append(CM)
F1All.append(numpy.mean(F1))
# print "{0:6.4f}{1:6.4f}{2:6.1f}{3:6.1f}".format(nu, g, 100.0*acAll[-1], 100.0*F1All[-1])
print(("\t\t"), end=' ')
for i, c in enumerate(ClassNames):
if i == len(ClassNames)-1:
print("{0:s}\t\t".format(c), end=' ')
else:
print("{0:s}\t\t\t".format(c), end=' ')
print ("OVERALL")
print(("\tC"), end=' ')
for c in ClassNames:
print("\tPRE\tREC\tF1", end=' ')
print("\t{0:s}\t{1:s}".format("ACC", "F1"))
bestAcInd = numpy.argmax(acAll)
bestF1Ind = numpy.argmax(F1All)
for i in range(len(PrecisionClassesAll)):
print("\t{0:.3f}".format(Params[i]), end=' ')
for c in range(len(PrecisionClassesAll[i])):
print("\t{0:.1f}\t{1:.1f}\t{2:.1f}".format(100.0 * PrecisionClassesAll[i][c], 100.0 * RecallClassesAll[i][c], 100.0 * F1ClassesAll[i][c]), end=' ')
print("\t{0:.1f}\t{1:.1f}".format(100.0 * acAll[i], 100.0 * F1All[i]), end=' ')
if i == bestF1Ind:
print("\t best F1", end=' ')
if i == bestAcInd:
print("\t best Acc", end=' ')
print()
if parameterMode == 0: # keep parameters that maximize overall classification accuracy:
print("Confusion Matrix:")
printConfusionMatrix(CMsAll[bestAcInd], ClassNames)
return Params[bestAcInd]
elif parameterMode == 1: # keep parameters that maximize overall F1 measure:
print("Confusion Matrix:")
printConfusionMatrix(CMsAll[bestF1Ind], ClassNames)
return Params[bestF1Ind]
def evaluateRegression(features, labels, nExp, MethodName, Params):
'''
ARGUMENTS:
features: numpy matrices of features [numOfSamples x numOfDimensions]
labels: list of sample labels
nExp: number of cross-validation experiments
MethodName: "svm" or "randomforest"
Params: list of classifier params to be evaluated
RETURNS:
bestParam: the value of the input parameter that optimizes the selected performance measure
'''
# feature normalization:
(featuresNorm, MEAN, STD) = normalizeFeatures([features])
featuresNorm = featuresNorm[0]
nSamples = labels.shape[0]
partTrain = 0.9
ErrorsAll = []
ErrorsTrainAll = []
ErrorsBaselineAll = []
for Ci, C in enumerate(Params): # for each param value
Errors = []
ErrorsTrain = []
ErrorsBaseline = []
for e in range(nExp): # for each cross-validation iteration:
# split features:
randperm = numpy.random.permutation(list(range(nSamples)))
nTrain = int(round(partTrain * nSamples))
featuresTrain = [featuresNorm[randperm[i]] for i in range(nTrain)]
featuresTest = [featuresNorm[randperm[i+nTrain]] for i in range(nSamples - nTrain)]
labelsTrain = [labels[randperm[i]] for i in range(nTrain)]
labelsTest = [labels[randperm[i + nTrain]] for i in range(nSamples - nTrain)]
# train multi-class svms:
featuresTrain = numpy.matrix(featuresTrain)
if MethodName == "svm":
[Classifier, trainError] = trainSVMregression(featuresTrain, labelsTrain, C)
# TODO
#elif MethodName == "randomforest":
# [Classifier, trainError] = trainRandomForestRegression(featuresTrain, labelsTrain, C)
# TODO KNN
# elif ClassifierName=="knn":
# Classifier = trainKNN(featuresTrain, C)
ErrorTest = []
ErrorTestBaseline = []
for itest, fTest in enumerate(featuresTest):
R = regressionWrapper(Classifier, MethodName, fTest)
Rbaseline = numpy.mean(labelsTrain)
ErrorTest.append((R - labelsTest[itest]) * (R - labelsTest[itest]))
ErrorTestBaseline.append((Rbaseline - labelsTest[itest]) * (Rbaseline - labelsTest[itest]))
Error = numpy.array(ErrorTest).mean()
ErrorBaseline = numpy.array(ErrorTestBaseline).mean()
Errors.append(Error)
ErrorsTrain.append(trainError)
ErrorsBaseline.append(ErrorBaseline)
ErrorsAll.append(numpy.array(Errors).mean())
ErrorsTrainAll.append(numpy.array(ErrorsTrain).mean())
ErrorsBaselineAll.append(numpy.array(ErrorsBaseline).mean())
bestInd = numpy.argmin(ErrorsAll)
print("{0:s}\t\t{1:s}\t\t{2:s}\t\t{3:s}".format("Param", "MSE", "T-MSE", "R-MSE"))
for i in range(len(ErrorsAll)):
print("{0:.4f}\t\t{1:.2f}\t\t{2:.2f}\t\t{3:.2f}".format(Params[i], ErrorsAll[i], ErrorsTrainAll[i], ErrorsBaselineAll[i]), end=' ')
if i == bestInd:
print("\t\t best", end=' ')
print()
return Params[bestInd]
def printConfusionMatrix(CM, ClassNames):
'''
This function prints a confusion matrix for a particular classification task.
ARGUMENTS:
CM: a 2-D numpy array of the confusion matrix
(CM[i,j] is the number of times a sample from class i was classified in class j)
ClassNames: a list that contains the names of the classes
'''
if CM.shape[0] != len(ClassNames):
print("printConfusionMatrix: Wrong argument sizes\n")
return
for c in ClassNames:
if len(c) > 4:
c = c[0:3]
print("\t{0:s}".format(c), end=' ')
print()
for i, c in enumerate(ClassNames):
if len(c) > 4:
c = c[0:3]
print("{0:s}".format(c), end=' ')
for j in range(len(ClassNames)):
print("\t{0:.1f}".format(100.0 * CM[i][j] / numpy.sum(CM)), end=' ')
print()
def normalizeFeatures(features):
'''
This function normalizes a feature set to 0-mean and 1-std.
Used in most classifier trainning cases.
ARGUMENTS:
- features: list of feature matrices (each one of them is a numpy matrix)
RETURNS:
- featuresNorm: list of NORMALIZED feature matrices
- MEAN: mean vector
- STD: std vector
'''
X = numpy.array([])
for count, f in enumerate(features):
if f.shape[0] > 0:
if count == 0:
X = f
else:
X = numpy.vstack((X, f))
count += 1
MEAN = numpy.mean(X, axis=0)
STD = numpy.std(X, axis=0)
featuresNorm = []
for f in features:
ft = f.copy()
for nSamples in range(f.shape[0]):
ft[nSamples, :] = (ft[nSamples, :] - MEAN) / STD
featuresNorm.append(ft)
return (featuresNorm, MEAN, STD)
def listOfFeatures2Matrix(features):
'''
listOfFeatures2Matrix(features)
This function takes a list of feature matrices as argument and returns a single concatenated feature matrix and the respective class labels.
ARGUMENTS:
- features: a list of feature matrices
RETURNS:
- X: a concatenated matrix of features
- Y: a vector of class indeces
'''
X = numpy.array([])
Y = numpy.array([])
for i, f in enumerate(features):
if i == 0:
X = f
Y = i * numpy.ones((len(f), 1))
else:
X = numpy.vstack((X, f))
Y = numpy.append(Y, i * numpy.ones((len(f), 1)))
return (X, Y)
def pcaDimRed(features, nDims):
[X, Y] = listOfFeatures2Matrix(features)
pca = sklearn.decomposition.PCA(n_components = nDims)
pca.fit(X)
coeff = pca.components_
coeff = coeff[:, 0:nDims]
featuresNew = []
for f in features:
ft = f.copy()
# ft = pca.transform(ft, k=nDims)
ft = numpy.dot(f, coeff)
featuresNew.append(ft)
return (featuresNew, coeff)
def fileClassification(inputFile, modelName, modelType):
# Load classifier:
if not os.path.isfile(inputFile):
print("fileClassification: wav file not found!")
return (-1, -1, -1)
[Fs, x] = audioBasicIO.readAudioFile(inputFile) # read audio file and convert to mono
x = audioBasicIO.stereo2mono(x)
return fragmentClassification(Fs, x, modelName, modelType)
def fragmentClassification(Fs, x, modelName, modelType):
if not os.path.isfile(modelName):
print("fileClassification: input modelName not found!")
return (-1, -1, -1)
if modelType == 'svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadSVModel(modelName)
elif modelType == 'knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadKNNModel(modelName)
elif modelType == 'randomforest':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadRandomForestModel(modelName)
elif modelType == 'gradientboosting':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadGradientBoostingModel(modelName)
elif modelType == 'extratrees':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadExtraTreesModel(modelName)
# feature extraction:
[MidTermFeatures, s] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * stWin), round(Fs * stStep))
MidTermFeatures = MidTermFeatures.mean(axis=1) # long term averaging of mid-term statistics
if computeBEAT:
[beat, beatConf] = aF.beatExtraction(s, stStep)
MidTermFeatures = numpy.append(MidTermFeatures, beat)
MidTermFeatures = numpy.append(MidTermFeatures, beatConf)
curFV = (MidTermFeatures - MEAN) / STD # normalization
[Result, P] = classifierWrapper(Classifier, modelType, curFV) # classification
return Result, P, classNames
def fileRegression(inputFile, modelName, modelType):
# Load classifier:
if not os.path.isfile(inputFile):
print("fileClassification: wav file not found!")
return (-1, -1, -1)
regressionModels = glob.glob(modelName + "_*")
regressionModels2 = []
for r in regressionModels:
if r[-5::] != "MEANS":
regressionModels2.append(r)
regressionModels = regressionModels2
regressionNames = []
for r in regressionModels:
regressionNames.append(r[r.rfind("_")+1::])
# FEATURE EXTRACTION
# LOAD ONLY THE FIRST MODEL (for mtWin, etc)
if modelType == 'svm':
[_, _, _, mtWin, mtStep, stWin, stStep, computeBEAT] = loadSVModel(regressionModels[0], True)
elif modelType == 'knn':
[_, _, _, mtWin, mtStep, stWin, stStep, computeBEAT] = loadKNNModel(regressionModels[0], True)
[Fs, x] = audioBasicIO.readAudioFile(inputFile) # read audio file and convert to mono
x = audioBasicIO.stereo2mono(x)
# feature extraction:
[MidTermFeatures, s] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * stWin), round(Fs * stStep))
MidTermFeatures = MidTermFeatures.mean(axis=1) # long term averaging of mid-term statistics
if computeBEAT:
[beat, beatConf] = aF.beatExtraction(s, stStep)
MidTermFeatures = numpy.append(MidTermFeatures, beat)
MidTermFeatures = numpy.append(MidTermFeatures, beatConf)
# REGRESSION
R = []
for ir, r in enumerate(regressionModels):
if not os.path.isfile(r):
print("fileClassification: input modelName not found!")
return (-1, -1, -1)
if modelType == 'svm':
[Model, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT] = loadSVModel(r, True)
elif modelType == 'knn':
[Model, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT] = loadKNNModel(r, True)
curFV = (MidTermFeatures - MEAN) / STD # normalization
R.append(regressionWrapper(Model, modelType, curFV)) # classification
return R, regressionNames
def lda(data, labels, redDim):
# Centre data
data -= data.mean(axis=0)
nData = numpy.shape(data)[0]
nDim = numpy.shape(data)[1]
print(nData, nDim)
Sw = numpy.zeros((nDim, nDim))
Sb = numpy.zeros((nDim, nDim))
C = numpy.cov((data.T))
# Loop over classes
classes = numpy.unique(labels)
for i in range(len(classes)):
# Find relevant datapoints
indices = (numpy.where(labels == classes[i]))
d = numpy.squeeze(data[indices, :])
classcov = numpy.cov((d.T))
Sw += float(numpy.shape(indices)[0])/nData * classcov
Sb = C - Sw
# Now solve for W
# Compute eigenvalues, eigenvectors and sort into order
#evals,evecs = linalg.eig(dot(linalg.pinv(Sw),sqrt(Sb)))
evals, evecs = la.eig(Sw, Sb)
indices = numpy.argsort(evals)
indices = indices[::-1]
evecs = evecs[:, indices]
evals = evals[indices]
w = evecs[:, :redDim]
#print evals, w
newData = numpy.dot(data, w)
#for i in range(newData.shape[0]):
# plt.text(newData[i,0],newData[i,1],str(labels[i]))
#plt.xlim([newData[:,0].min(), newData[:,0].max()])
#plt.ylim([newData[:,1].min(), newData[:,1].max()])
#plt.show()
return newData, w
def writeTrainDataToARFF(modelName, features, classNames, featureNames):
f = open(modelName + ".arff", 'w')
f.write('@RELATION ' + modelName + '\n')
for fn in featureNames:
f.write('@ATTRIBUTE ' + fn + ' NUMERIC\n')
f.write('@ATTRIBUTE class {')
for c in range(len(classNames)-1):
f.write(classNames[c] + ',')
f.write(classNames[-1] + '}\n\n')
f.write('@DATA\n')
for c, fe in enumerate(features):
for i in range(fe.shape[0]):
for j in range(fe.shape[1]):
f.write("{0:f},".format(fe[i, j]))
f.write(classNames[c]+"\n")
f.close()
def trainSpeakerModelsScript():
'''
This script is used to train the speaker-related models (NOTE: data paths are hard-coded and NOT included in the library, the models are, however included)
import audioTrainTest as aT
aT.trainSpeakerModelsScript()
'''
mtWin = 2.0
mtStep = 2.0
stWin = 0.020
stStep = 0.020
dirName = "DIARIZATION_ALL/all"
listOfDirs = [os.path.join(dirName, name) for name in os.listdir(dirName) if os.path.isdir(os.path.join(dirName, name))]
featureAndTrain(listOfDirs, mtWin, mtStep, stWin, stStep, "knn", "data/knnSpeakerAll", computeBEAT=False, perTrain=0.50)
dirName = "DIARIZATION_ALL/female_male"
listOfDirs = [os.path.join(dirName, name) for name in os.listdir(dirName) if os.path.isdir(os.path.join(dirName, name))]
featureAndTrain(listOfDirs, mtWin, mtStep, stWin, stStep, "knn", "data/knnSpeakerFemaleMale", computeBEAT=False, perTrain=0.50)
def main(argv):
return 0
if __name__ == '__main__':
main(sys.argv)
| mit | -8,503,863,699,730,873,000 | 39.729515 | 237 | 0.609652 | false |
alfa-addon/addon | plugin.video.alfa/channels/bloghorror.py | 1 | 5917 | # -*- coding: utf-8 -*-
# -*- Channel BlogHorror -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import os
import re
from bs4 import BeautifulSoup
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger, subtitletools
from channelselector import get_thumb
host = 'http://bloghorror.com/'
fanart = 'http://bloghorror.com/wp-content/uploads/2015/04/bloghorror-2017-x.jpg'
def create_soup(url, referer=None, unescape=False):
logger.info()
if referer:
data = httptools.downloadpage(url, headers={'Referer': referer}).data
else:
data = httptools.downloadpage(url).data
if unescape:
data = scrapertools.unescape(data)
soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8")
return soup
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Todas", action="list_all",
url=host+'/category/terror', thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Asiaticas", action="list_all",
url=host+'/category/asiatico', thumbnail=get_thumb('asiaticas', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title = 'Buscar', action="search", url=host + '?s=', pages=3,
thumbnail=get_thumb('search', auto=True)))
return itemlist
def list_all(item):
logger.info()
itemlist = list()
soup = create_soup(item.url)
matches = soup.find(id="primary").find_all("article")
for elem in matches:
cat = elem.find("a", class_="covernews-categories")["alt"]
if cat in ["View all posts in Las Mejores Peliculas de Terror", "View all posts in Editoriales"]:
continue
title_data = elem.find("h3", class_="article-title").text.strip()
if "(" in title_data:
title = title_data.replace(")", "").split(" (")
elif "[" in title_data:
title = title_data.replace("]", "").split(" [")
url = elem.find("h3", class_="article-title").a["href"]
thumb = elem.find("div", class_="data-bg-hover")["data-background"]
try:
year = title[1]
except:
year = "-"
if "serie" in url:
continue
itemlist.append(Item(channel=item.channel, title=title[0], url=url, contentTitle=title[0], thumbnail=thumb,
action="findvideos", infoLabels={"year": year}))
tmdb.set_infoLabels_itemlist(itemlist, True)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist:
try:
next_page = soup.find("div", class_="navigation").find("a", class_="next")["href"]
if next_page != '':
itemlist.append(Item(channel=item.channel, fanart=fanart, action="list_all", title='Siguiente >>>',
url=next_page))
except:
pass
return itemlist
def findvideos(item):
logger.info()
itemlist = list()
soup = create_soup(item.url).find("div", class_="entry-content-wrap")
quality = scrapertools.find_single_match(soup.text, r"Calidad: ([^\n]+)\n").split("+")
urls_list = soup.find_all("a", {"data-wpel-link": True, "href": re.compile("magnet|torrent")})
try:
sub_url = soup.find("a", {"data-wpel-link": True, "href": re.compile("subdivx")})["href"]
except:
sub_url = ""
qlty_cnt = 0
for url in urls_list:
url = url["href"]
if not sub_url:
lang = 'VO'
else:
lang = 'VOSE'
try:
qlty = quality[qlty_cnt]
qlty_cnt += 1
except:
qlty = "SD"
itemlist.append(Item(channel=item.channel, title="[%s][%s][%s]", url=url, action="play", quality=qlty,
language=lang, subtitle=sub_url, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % (i.server, i.language, i.quality))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def play(item):
logger.info()
if item.subtitle:
sub = subtitletools.get_from_subdivx(item.subtitle)
return [item.clone(subtitle=sub)]
else:
return [item]
def search(item, texto):
logger.info()
try:
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas', 'terror', 'torrent']:
item.url = host
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
| gpl-3.0 | 5,249,670,684,535,636,000 | 28.575 | 123 | 0.572612 | false |
NicholasHoCode/Galaxy | assets/code/GF.py | 1 | 2040 | from PIL import Image
import numpy as np
import math
from scipy import signal
def boxfilter(n):
assert (n%2 != 0),"Dimension must be odd"
a = np.empty((n, n))
a.fill(1/(n*n))
return a
def gauss1d(sigma):
arr_length = 6*sigma
if arr_length % 2 == 0:
val = ((arr_length)/2)+1
elif arr_length.is_integer() == False:
arr_length = np.ceil(arr_length)
val = (arr_length + 1)/2
if arr_length % 2 == 0:
arr_length = arr_length + 1
val = arr_length - 1
elif arr_length % 2 != 0:
val = (arr_length + 1)/2
lst = list(range(int(val)))
neg_lst = [-x for x in lst]
neg_lst.remove(0)
neg_lst.reverse()
a_val = neg_lst + lst
a_val = [math.exp(- (abs(x)*abs(x)) / (2*sigma*sigma)) for x in a_val]
sum_aval = sum(a_val)
a_aval = [(1/sum_aval)*x for x in a_val]
return np.asarray(a_aval)
def gauss2d(sigma):
f = gauss1d(sigma)
return signal.convolve2d(f[np.newaxis], np.transpose(f[np.newaxis]))
def gaussconvolve2d(array,sigma):
assert (array.ndim == 2),"Array must be 2D"
filter = gauss2d(sigma)
result = signal.convolve2d(array, filter, 'same')
return result
# signal.convolve2d and signal.correlated2d will produce different results if the filter is not symetric due to the associative property of convolution in essence
# convolution is used when multiple symmteric filters are pre-convolved and those multiple filters are then convolved to a single filter.
im = Image.open('bb.jpg')
im.show()
im = im.convert('L')
im_arr = np.asarray(im)
nim = gaussconvolve2d(im_arr, 3)
fim = Image.fromarray(nim)
if fim.mode != 'L':
fim = fim.convert('L')
fim.save('bb_filtered.jpg')
# Since convolution with a Gaussian is seperable a 2D Gaussian filter can be obtianed by multiplying two 1D Gaussian filter a more efficient implementation will be to first convolve each row with a 1D fillter
# then convolve each column with a 1D filter which results in O(n) complexity instead of O(n^2) complexity.
| mit | -6,833,917,436,678,237,000 | 34.172414 | 208 | 0.659314 | false |
Azure/azure-sdk-for-python | common/smoketest/key_vault_keys_async.py | 1 | 1475 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import uuid
from azure.keyvault.keys.aio import KeyClient
from key_vault_base_async import KeyVaultBaseAsync
class KeyVaultKeys(KeyVaultBaseAsync):
def __init__(self):
args = self.get_client_args()
self.key_client = KeyClient(**args)
self.key_name = "key-name-" + uuid.uuid1().hex
async def create_rsa_key(self):
print("Creating an RSA key...")
await self.key_client.create_rsa_key(name=self.key_name, size=2048)
print("\tdone")
async def get_key(self):
print("Getting a key...")
key = await self.key_client.get_key(name=self.key_name)
print("\tdone, key: {}.".format(key.name))
async def delete_key(self):
print("Deleting a key...")
deleted_key = await self.key_client.delete_key(name=self.key_name)
print("\tdone: " + deleted_key.name)
async def run(self):
print("")
print("------------------------")
print("Key Vault - Keys\nIdentity - Credential")
print("------------------------")
print("1) Create a key")
print("2) Get that key")
print("3) Delete that key (Clean up the resource)")
print("")
try:
await self.create_rsa_key()
await self.get_key()
finally:
await self.delete_key() | mit | 5,781,894,766,305,510,000 | 31.086957 | 75 | 0.541695 | false |
pytroll/pygac | pygac/tests/test_calibrate_pod.py | 1 | 5529 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015 Martin Raspaud
# Author(s):
# Martin Raspaud <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test function for the POD calibration.
"""
import unittest
try:
import mock
except ImportError:
from unittest import mock
import numpy as np
from pygac.calibration import Calibrator, calibrate_solar, calibrate_thermal
class TestGenericCalibration(unittest.TestCase):
def test_calibration_vis(self):
counts = np.array([[0, 0, 0, 0, 0,
512, 512, 512, 512, 512,
1023, 1023, 1023, 1023, 1023],
[41, 41, 41, 41, 41,
150, 150, 150, 150, 150,
700, 700, 700, 700, 700]])
year = 1997
jday = 196
spacecraft_id = "noaa14"
cal = Calibrator(spacecraft_id)
corr = 1
channel = 0
ref1 = calibrate_solar(counts[:, channel::5], channel, year, jday, cal, corr)
channel = 1
ref2 = calibrate_solar(counts[:, channel::5], channel, year, jday, cal, corr)
channel = 2
data = np.ma.array(counts[:, channel::5], mask=True)
ref3 = calibrate_solar(data, channel, year, jday, cal, corr)
expected = (np.array([[np.nan, 60.891074, 126.953364],
[0., 14.091565, 85.195791]]),
np.array([[np.nan, 72.98262, 152.16334],
[0., 16.889821, 102.113687]]),
np.array([[-32001., -32001., -32001.],
[-32001., -32001., -32001.]]))
np.testing.assert_allclose(ref1, expected[0])
np.testing.assert_allclose(ref2, expected[1])
np.testing.assert_allclose(ref3.filled(-32001), expected[2])
def test_calibration_ir(self):
counts = np.array([[0, 0, 612, 0, 0,
512, 512, 487, 512, 512,
923, 923, 687, 923, 923],
[41, 41, 634, 41, 41,
150, 150, 461, 150, 150,
700, 700, 670, 700, 700],
[241, 241, 656, 241, 241,
350, 350, 490, 350, 350,
600, 600, 475, 600, 600]])
prt_counts = np.array([0, 230, 230])
ict_counts = np.array([[745.3, 397.9, 377.8],
[744.8, 398.1, 378.4],
[745.7, 398., 378.3]])
space_counts = np.array([[987.3, 992.5, 989.4],
[986.9, 992.8, 989.6],
[986.3, 992.3, 988.9]])
spacecraft_id = "noaa14"
cal = Calibrator(spacecraft_id)
ch3 = calibrate_thermal(counts[:, 2::5],
prt_counts,
ict_counts[:, 0],
space_counts[:, 0],
line_numbers=np.array([1, 2, 3]),
channel=3,
cal=cal)
expected_ch3 = np.array([[298.28466, 305.167571, 293.16182],
[296.878502, 306.414234, 294.410224],
[295.396779, 305.020259, 305.749526]])
np.testing.assert_allclose(expected_ch3, ch3)
ch4 = calibrate_thermal(counts[:, 3::5],
prt_counts,
ict_counts[:, 1],
space_counts[:, 1],
line_numbers=np.array([1, 2, 3]),
channel=4,
cal=cal)
expected_ch4 = np.array([[325.828062, 275.414804, 196.214709],
[322.359517, 312.785057, 249.380649],
[304.326806, 293.490822, 264.148021]])
np.testing.assert_allclose(expected_ch4, ch4)
ch5 = calibrate_thermal(counts[:, 4::5],
prt_counts,
ict_counts[:, 2],
space_counts[:, 2],
line_numbers=np.array([1, 2, 3]),
channel=5,
cal=cal)
expected_ch5 = np.array([[326.460316, 272.146547, 187.434456],
[322.717606, 312.388155, 244.241633],
[303.267012, 291.590832, 260.05426]])
np.testing.assert_allclose(expected_ch5, ch5)
def suite():
"""The suite for test_slerp
"""
loader = unittest.TestLoader()
mysuite = unittest.TestSuite()
mysuite.addTest(loader.loadTestsFromTestCase(TestGenericCalibration))
return mysuite
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,917,036,265,671,069,000 | 35.375 | 85 | 0.477302 | false |
mind1master/aiohttp | tests/test_client_session.py | 1 | 14280 | import asyncio
import contextlib
import gc
import http.cookies
import re
import types
from unittest import mock
import pytest
from multidict import CIMultiDict, MultiDict
import aiohttp
from aiohttp import web
from aiohttp.client import ClientSession
from aiohttp.connector import BaseConnector, TCPConnector
@pytest.fixture
def connector(loop):
conn = BaseConnector(loop=loop)
transp = mock.Mock()
conn._conns['a'] = [(transp, 'proto', 123)]
return conn
@pytest.yield_fixture
def create_session(loop):
session = None
def maker(*args, **kwargs):
nonlocal session
session = ClientSession(*args, loop=loop, **kwargs)
return session
yield maker
if session is not None:
session.close()
@pytest.fixture
def session(create_session):
return create_session()
@pytest.fixture
def params():
return dict(
headers={"Authorization": "Basic ..."},
max_redirects=2,
encoding="latin1",
version=aiohttp.HttpVersion10,
compress="deflate",
chunked=True,
expect100=True,
read_until_eof=False)
def test_init_headers_simple_dict(create_session):
session = create_session(headers={"h1": "header1",
"h2": "header2"})
assert (sorted(session._default_headers.items()) ==
([("H1", "header1"), ("H2", "header2")]))
def test_init_headers_list_of_tuples(create_session):
session = create_session(headers=[("h1", "header1"),
("h2", "header2"),
("h3", "header3")])
assert (session._default_headers ==
CIMultiDict([("h1", "header1"),
("h2", "header2"),
("h3", "header3")]))
def test_init_headers_MultiDict(create_session):
session = create_session(headers=MultiDict([("h1", "header1"),
("h2", "header2"),
("h3", "header3")]))
assert (session._default_headers ==
CIMultiDict([("H1", "header1"),
("H2", "header2"),
("H3", "header3")]))
def test_init_headers_list_of_tuples_with_duplicates(create_session):
session = create_session(headers=[("h1", "header11"),
("h2", "header21"),
("h1", "header12")])
assert (session._default_headers ==
CIMultiDict([("H1", "header11"),
("H2", "header21"),
("H1", "header12")]))
def test_init_cookies_with_simple_dict(create_session):
session = create_session(cookies={"c1": "cookie1",
"c2": "cookie2"})
assert set(session.cookies) == {'c1', 'c2'}
assert session.cookies['c1'].value == 'cookie1'
assert session.cookies['c2'].value == 'cookie2'
def test_init_cookies_with_list_of_tuples(create_session):
session = create_session(cookies=[("c1", "cookie1"),
("c2", "cookie2")])
assert set(session.cookies) == {'c1', 'c2'}
assert session.cookies['c1'].value == 'cookie1'
assert session.cookies['c2'].value == 'cookie2'
def test_merge_headers(create_session):
# Check incoming simple dict
session = create_session(headers={"h1": "header1",
"h2": "header2"})
headers = session._prepare_headers({"h1": "h1"})
assert isinstance(headers, CIMultiDict)
assert headers == CIMultiDict([("h2", "header2"),
("h1", "h1")])
def test_merge_headers_with_multi_dict(create_session):
session = create_session(headers={"h1": "header1",
"h2": "header2"})
headers = session._prepare_headers(MultiDict([("h1", "h1")]))
assert isinstance(headers, CIMultiDict)
assert headers == CIMultiDict([("h2", "header2"),
("h1", "h1")])
def test_merge_headers_with_list_of_tuples(create_session):
session = create_session(headers={"h1": "header1",
"h2": "header2"})
headers = session._prepare_headers([("h1", "h1")])
assert isinstance(headers, CIMultiDict)
assert headers == CIMultiDict([("h2", "header2"),
("h1", "h1")])
def test_merge_headers_with_list_of_tuples_duplicated_names(create_session):
session = create_session(headers={"h1": "header1",
"h2": "header2"})
headers = session._prepare_headers([("h1", "v1"),
("h1", "v2")])
assert isinstance(headers, CIMultiDict)
assert headers == CIMultiDict([("H2", "header2"),
("H1", "v1"),
("H1", "v2")])
def test_http_GET(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.get("http://test.example.com",
params={"x": 1},
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("GET", "http://test.example.com",),
dict(
params={"x": 1},
allow_redirects=True,
**params)]
def test_http_OPTIONS(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.options("http://opt.example.com",
params={"x": 2},
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("OPTIONS", "http://opt.example.com",),
dict(
params={"x": 2},
allow_redirects=True,
**params)]
def test_http_HEAD(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.head("http://head.example.com",
params={"x": 2},
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("HEAD", "http://head.example.com",),
dict(
params={"x": 2},
allow_redirects=False,
**params)]
def test_http_POST(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.post("http://post.example.com",
params={"x": 2},
data="Some_data",
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("POST", "http://post.example.com",),
dict(
params={"x": 2},
data="Some_data",
**params)]
def test_http_PUT(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.put("http://put.example.com",
params={"x": 2},
data="Some_data",
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("PUT", "http://put.example.com",),
dict(
params={"x": 2},
data="Some_data",
**params)]
def test_http_PATCH(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.patch("http://patch.example.com",
params={"x": 2},
data="Some_data",
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("PATCH", "http://patch.example.com",),
dict(
params={"x": 2},
data="Some_data",
**params)]
def test_http_DELETE(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.delete("http://delete.example.com",
params={"x": 2},
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("DELETE",
"http://delete.example.com",),
dict(
params={"x": 2},
**params)]
def test_close(create_session, connector):
session = create_session(connector=connector)
session.close()
assert session.connector is None
assert connector.closed
def test_closed(session):
assert not session.closed
session.close()
assert session.closed
def test_connector(create_session, loop):
connector = TCPConnector(loop=loop)
session = create_session(connector=connector)
assert session.connector is connector
def test_connector_loop(loop):
with contextlib.ExitStack() as stack:
another_loop = asyncio.new_event_loop()
stack.enter_context(contextlib.closing(another_loop))
connector = TCPConnector(loop=another_loop)
stack.enter_context(contextlib.closing(connector))
with pytest.raises(ValueError) as ctx:
ClientSession(connector=connector, loop=loop)
assert re.match("loop argument must agree with connector",
str(ctx.value))
def test_cookies_are_readonly(session):
with pytest.raises(AttributeError):
session.cookies = 123
def test_detach(session):
conn = session.connector
try:
assert not conn.closed
session.detach()
assert session.connector is None
assert session.closed
assert not conn.closed
finally:
conn.close()
@pytest.mark.run_loop
def test_request_closed_session(session):
session.close()
with pytest.raises(RuntimeError):
yield from session.request('get', '/')
def test_close_flag_for_closed_connector(session):
conn = session.connector
assert not session.closed
conn.close()
assert session.closed
def test_double_close(connector, create_session):
session = create_session(connector=connector)
session.close()
assert session.connector is None
session.close()
assert session.closed
assert connector.closed
def test_del(connector, loop, warning):
# N.B. don't use session fixture, it stores extra reference internally
session = ClientSession(connector=connector, loop=loop)
loop.set_exception_handler(lambda loop, ctx: None)
with warning(ResourceWarning):
del session
gc.collect()
def test_context_manager(connector, loop):
with ClientSession(loop=loop, connector=connector) as session:
pass
assert session.closed
def test_borrow_connector_loop(connector, create_session, loop):
session = ClientSession(connector=connector, loop=None)
try:
assert session._loop, loop
finally:
session.close()
@pytest.mark.run_loop
def test_reraise_os_error(create_session):
err = OSError(1, "permission error")
req = mock.Mock()
req_factory = mock.Mock(return_value=req)
req.send = mock.Mock(side_effect=err)
session = create_session(request_class=req_factory)
@asyncio.coroutine
def create_connection(req):
# return self.transport, self.protocol
return mock.Mock(), mock.Mock()
session._connector._create_connection = create_connection
with pytest.raises(aiohttp.ClientOSError) as ctx:
yield from session.request('get', 'http://example.com')
e = ctx.value
assert e.errno == err.errno
assert e.strerror == err.strerror
@pytest.mark.run_loop
def test_request_ctx_manager_props(loop):
yield from asyncio.sleep(0, loop=loop) # to make it a task
with aiohttp.ClientSession(loop=loop) as client:
ctx_mgr = client.get('http://example.com')
next(ctx_mgr)
assert isinstance(ctx_mgr.gi_frame, types.FrameType)
assert not ctx_mgr.gi_running
assert isinstance(ctx_mgr.gi_code, types.CodeType)
@pytest.mark.run_loop
def test_cookie_jar_usage(create_app_and_client):
req_url = None
jar = mock.Mock()
jar.filter_cookies.return_value = None
@asyncio.coroutine
def handler(request):
nonlocal req_url
req_url = "http://%s/" % request.host
resp = web.Response()
resp.set_cookie("response", "resp_value")
return resp
app, client = yield from create_app_and_client(
client_params={"cookies": {"request": "req_value"},
"cookie_jar": jar}
)
app.router.add_route('GET', '/', handler)
# Updating the cookie jar with initial user defined cookies
jar.update_cookies.assert_called_with({"request": "req_value"})
jar.update_cookies.reset_mock()
yield from client.get("/")
# Filtering the cookie jar before sending the request,
# getting the request URL as only parameter
jar.filter_cookies.assert_called_with(req_url)
# Updating the cookie jar with the response cookies
assert jar.update_cookies.called
resp_cookies = jar.update_cookies.call_args[0][0]
assert isinstance(resp_cookies, http.cookies.SimpleCookie)
assert "response" in resp_cookies
assert resp_cookies["response"].value == "resp_value"
def test_session_default_version(loop):
session = aiohttp.ClientSession(loop=loop)
assert session.version == aiohttp.HttpVersion11
| apache-2.0 | 8,333,107,715,438,681,000 | 33 | 78 | 0.55112 | false |
masayukig/tempest | tempest/api/compute/admin/test_hosts_negative.py | 1 | 6175 | # Copyright 2013 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class HostsAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
"""Tests hosts API using admin privileges."""
max_microversion = '2.42'
@classmethod
def setup_clients(cls):
super(HostsAdminNegativeTestJSON, cls).setup_clients()
cls.client = cls.os_admin.hosts_client
cls.non_admin_client = cls.os_primary.hosts_client
@classmethod
def resource_setup(cls):
super(HostsAdminNegativeTestJSON, cls).resource_setup()
hosts = cls.client.list_hosts()['hosts']
if not hosts:
raise lib_exc.NotFound("no host found")
cls.hostname = hosts[0]['host_name']
@decorators.attr(type=['negative'])
@decorators.idempotent_id('dd032027-0210-4d9c-860e-69b1b8deed5f')
def test_list_hosts_with_non_admin_user(self):
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.list_hosts)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e75b0a1a-041f-47a1-8b4a-b72a6ff36d3f')
def test_show_host_detail_with_nonexistent_hostname(self):
self.assertRaises(lib_exc.NotFound,
self.client.show_host, 'nonexistent_hostname')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('19ebe09c-bfd4-4b7c-81a2-e2e0710f59cc')
def test_show_host_detail_with_non_admin_user(self):
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.show_host,
self.hostname)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e40c72b1-0239-4ed6-ba21-81a184df1f7c')
def test_update_host_with_non_admin_user(self):
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.update_host,
self.hostname,
status='enable',
maintenance_mode='enable')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('fbe2bf3e-3246-4a95-a59f-94e4e298ec77')
def test_update_host_with_invalid_status(self):
# 'status' can only be 'enable' or 'disable'
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
self.hostname,
status='invalid',
maintenance_mode='enable')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ab1e230e-5e22-41a9-8699-82b9947915d4')
def test_update_host_with_invalid_maintenance_mode(self):
# 'maintenance_mode' can only be 'enable' or 'disable'
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
self.hostname,
status='enable',
maintenance_mode='invalid')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('0cd85f75-6992-4a4a-b1bd-d11e37fd0eee')
def test_update_host_without_param(self):
# 'status' or 'maintenance_mode' needed for host update
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
self.hostname)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('23c92146-2100-4d68-b2d6-c7ade970c9c1')
def test_update_nonexistent_host(self):
self.assertRaises(lib_exc.NotFound,
self.client.update_host,
'nonexistent_hostname',
status='enable',
maintenance_mode='enable')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('0d981ac3-4320-4898-b674-82b61fbb60e4')
def test_startup_nonexistent_host(self):
self.assertRaises(lib_exc.NotFound,
self.client.startup_host,
'nonexistent_hostname')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9f4ebb7e-b2ae-4e5b-a38f-0fd1bb0ddfca')
def test_startup_host_with_non_admin_user(self):
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.startup_host,
self.hostname)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9e637444-29cf-4244-88c8-831ae82c31b6')
def test_shutdown_nonexistent_host(self):
self.assertRaises(lib_exc.NotFound,
self.client.shutdown_host,
'nonexistent_hostname')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a803529c-7e3f-4d3c-a7d6-8e1c203d27f6')
def test_shutdown_host_with_non_admin_user(self):
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.shutdown_host,
self.hostname)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f86bfd7b-0b13-4849-ae29-0322e83ee58b')
def test_reboot_nonexistent_host(self):
self.assertRaises(lib_exc.NotFound,
self.client.reboot_host,
'nonexistent_hostname')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('02d79bb9-eb57-4612-abf6-2cb38897d2f8')
def test_reboot_host_with_non_admin_user(self):
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.reboot_host,
self.hostname)
| apache-2.0 | -5,796,808,752,177,502,000 | 41.881944 | 78 | 0.617976 | false |
dnanexus/rseqc | rseqc/scripts/read_distribution.py | 1 | 11931 | #!/usr/bin/env python
'''-------------------------------------------------------------------------------------------------
Check reads distribution over exon, intron, UTR, intergenic ... etc
-------------------------------------------------------------------------------------------------'''
#import built-in modules
import os,sys
if sys.version_info[0] != 2 or sys.version_info[1] != 7:
print >>sys.stderr, "\nYou are using python" + str(sys.version_info[0]) + '.' + str(sys.version_info[1]) + " RSeQC needs python2.7!\n"
sys.exit()
import re
import string
from optparse import OptionParser
import warnings
import string
import collections
import math
import sets
#import third-party modules
from bx.bitset import *
from bx.bitset_builders import *
from bx.intervals import *
from bx.binned_array import BinnedArray
from bx_extras.fpconst import isNaN
from bx.bitset_utils import *
#import my own modules
from qcmodule import BED
from qcmodule import SAM
from qcmodule import bam_cigar
__author__ = "Liguo Wang"
__copyright__ = "Copyright 2012. All rights reserved."
__credits__ = []
__license__ = "GPL"
__version__="2.3.3"
__maintainer__ = "Liguo Wang"
__email__ = "[email protected]"
__status__ = "Production"
def cal_size(list):
'''calcualte bed list total size'''
size=0
for l in list:
size += l[2] - l[1]
return size
def foundone(chrom,ranges, st, end):
found = 0
if chrom in ranges:
found = len(ranges[chrom].find(st,end))
return found
def build_bitsets(list):
'''build intevalTree from list'''
ranges={}
for l in list:
chrom =l[0].upper()
st = int(l[1])
end = int(l[2])
if chrom not in ranges:
ranges[chrom] = Intersecter()
ranges[chrom].add_interval( Interval( st, end ) )
return ranges
def process_gene_model(gene_model):
print >>sys.stderr, "processing " + gene_model + ' ...',
obj = BED.ParseBED(gene_model)
utr_3 = obj.getUTR(utr=3)
utr_5 = obj.getUTR(utr=5)
cds_exon = obj.getCDSExon()
intron = obj.getIntron()
intron = BED.unionBed3(intron)
cds_exon=BED.unionBed3(cds_exon)
utr_5 = BED.unionBed3(utr_5)
utr_3 = BED.unionBed3(utr_3)
utr_5 = BED.subtractBed3(utr_5,cds_exon)
utr_3 = BED.subtractBed3(utr_3,cds_exon)
intron = BED.subtractBed3(intron,cds_exon)
intron = BED.subtractBed3(intron,utr_5)
intron = BED.subtractBed3(intron,utr_3)
intergenic_up_1kb = obj.getIntergenic(direction="up",size=1000)
intergenic_down_1kb = obj.getIntergenic(direction="down",size=1000)
intergenic_up_5kb = obj.getIntergenic(direction="up",size=5000)
intergenic_down_5kb = obj.getIntergenic(direction="down",size=5000)
intergenic_up_10kb = obj.getIntergenic(direction="up",size=10000)
intergenic_down_10kb = obj.getIntergenic(direction="down",size=10000)
#merge integenic region
intergenic_up_1kb=BED.unionBed3(intergenic_up_1kb)
intergenic_up_5kb=BED.unionBed3(intergenic_up_5kb)
intergenic_up_10kb=BED.unionBed3(intergenic_up_10kb)
intergenic_down_1kb=BED.unionBed3(intergenic_down_1kb)
intergenic_down_5kb=BED.unionBed3(intergenic_down_5kb)
intergenic_down_10kb=BED.unionBed3(intergenic_down_10kb)
#purify intergenic region
intergenic_up_1kb=BED.subtractBed3(intergenic_up_1kb,cds_exon)
intergenic_up_1kb=BED.subtractBed3(intergenic_up_1kb,utr_5)
intergenic_up_1kb=BED.subtractBed3(intergenic_up_1kb,utr_3)
intergenic_up_1kb=BED.subtractBed3(intergenic_up_1kb,intron)
intergenic_down_1kb=BED.subtractBed3(intergenic_down_1kb,cds_exon)
intergenic_down_1kb=BED.subtractBed3(intergenic_down_1kb,utr_5)
intergenic_down_1kb=BED.subtractBed3(intergenic_down_1kb,utr_3)
intergenic_down_1kb=BED.subtractBed3(intergenic_down_1kb,intron)
#purify intergenic region
intergenic_up_5kb=BED.subtractBed3(intergenic_up_5kb,cds_exon)
intergenic_up_5kb=BED.subtractBed3(intergenic_up_5kb,utr_5)
intergenic_up_5kb=BED.subtractBed3(intergenic_up_5kb,utr_3)
intergenic_up_5kb=BED.subtractBed3(intergenic_up_5kb,intron)
intergenic_down_5kb=BED.subtractBed3(intergenic_down_5kb,cds_exon)
intergenic_down_5kb=BED.subtractBed3(intergenic_down_5kb,utr_5)
intergenic_down_5kb=BED.subtractBed3(intergenic_down_5kb,utr_3)
intergenic_down_5kb=BED.subtractBed3(intergenic_down_5kb,intron)
#purify intergenic region
intergenic_up_10kb=BED.subtractBed3(intergenic_up_10kb,cds_exon)
intergenic_up_10kb=BED.subtractBed3(intergenic_up_10kb,utr_5)
intergenic_up_10kb=BED.subtractBed3(intergenic_up_10kb,utr_3)
intergenic_up_10kb=BED.subtractBed3(intergenic_up_10kb,intron)
intergenic_down_10kb=BED.subtractBed3(intergenic_down_10kb,cds_exon)
intergenic_down_10kb=BED.subtractBed3(intergenic_down_10kb,utr_5)
intergenic_down_10kb=BED.subtractBed3(intergenic_down_10kb,utr_3)
intergenic_down_10kb=BED.subtractBed3(intergenic_down_10kb,intron)
#build intervalTree
cds_exon_ranges = build_bitsets(cds_exon)
utr_5_ranges = build_bitsets(utr_5)
utr_3_ranges = build_bitsets(utr_3)
intron_ranges = build_bitsets(intron)
interg_ranges_up_1kb_ranges = build_bitsets(intergenic_up_1kb)
interg_ranges_up_5kb_ranges = build_bitsets(intergenic_up_5kb)
interg_ranges_up_10kb_ranges = build_bitsets(intergenic_up_10kb)
interg_ranges_down_1kb_ranges = build_bitsets(intergenic_down_1kb)
interg_ranges_down_5kb_ranges = build_bitsets(intergenic_down_5kb)
interg_ranges_down_10kb_ranges = build_bitsets(intergenic_down_10kb)
exon_size = cal_size(cds_exon)
intron_size = cal_size(intron)
utr3_size = cal_size(utr_3)
utr5_size = cal_size(utr_5)
int_up1k_size = cal_size(intergenic_up_1kb)
int_up5k_size = cal_size(intergenic_up_5kb)
int_up10k_size = cal_size(intergenic_up_10kb)
int_down1k_size = cal_size(intergenic_down_1kb)
int_down5k_size = cal_size(intergenic_down_5kb)
int_down10k_size = cal_size(intergenic_down_10kb)
print >>sys.stderr, "Done"
return (cds_exon_ranges,intron_ranges,utr_5_ranges,utr_3_ranges,\
interg_ranges_up_1kb_ranges,interg_ranges_up_5kb_ranges,interg_ranges_up_10kb_ranges,\
interg_ranges_down_1kb_ranges,interg_ranges_down_5kb_ranges,interg_ranges_down_10kb_ranges,\
exon_size,intron_size,utr5_size,utr3_size,\
int_up1k_size,int_up5k_size,int_up10k_size,\
int_down1k_size,int_down5k_size,int_down10k_size)
def main():
usage="%prog [options]" + '\n' + __doc__ + "\n"
parser = OptionParser(usage,version="%prog " + __version__)
parser.add_option("-i","--input-file",action="store",type="string",dest="input_file",help="Alignment file in BAM or SAM format.")
parser.add_option("-r","--refgene",action="store",type="string",dest="ref_gene_model",help="Reference gene model in bed format.")
(options,args)=parser.parse_args()
if not (options.input_file and options.ref_gene_model):
parser.print_help()
sys.exit(0)
if not os.path.exists(options.ref_gene_model):
print >>sys.stderr, '\n\n' + options.ref_gene_model + " does NOT exists" + '\n'
#parser.print_help()
sys.exit(0)
if not os.path.exists(options.input_file):
print >>sys.stderr, '\n\n' + options.input_file + " does NOT exists" + '\n'
sys.exit(0)
#build bitset
(cds_exon_r, intron_r, utr_5_r, utr_3_r,\
intergenic_up_1kb_r,intergenic_up_5kb_r,intergenic_up_10kb_r,\
intergenic_down_1kb_r,intergenic_down_5kb_r,intergenic_down_10kb_r,\
cds_exon_base,intron_base,utr_5_base,utr_3_base,\
intergenic_up1kb_base,intergenic_up5kb_base,intergenic_up10kb_base,\
intergenic_down1kb_base,intergenic_down5kb_base,intergenic_down10kb_base) = process_gene_model(options.ref_gene_model)
intron_read=0
cds_exon_read=0
utr_5_read=0
utr_3_read=0
intergenic_up1kb_read=0
intergenic_down1kb_read=0
intergenic_up5kb_read=0
intergenic_down5kb_read=0
intergenic_up10kb_read=0
intergenic_down10kb_read=0
totalReads=0
totalFrags=0
unAssignFrags=0
obj = SAM.ParseBAM(options.input_file)
R_qc_fail=0
R_duplicate=0
R_nonprimary=0
R_unmap=0
print >>sys.stderr, "processing " + options.input_file + " ...",
try:
while(1):
aligned_read = obj.samfile.next()
if aligned_read.is_qcfail: #skip QC fail read
R_qc_fail +=1
continue
if aligned_read.is_duplicate: #skip duplicate read
R_duplicate +=1
continue
if aligned_read.is_secondary: #skip non primary hit
R_nonprimary +=1
continue
if aligned_read.is_unmapped: #skip unmap read
R_unmap +=1
continue
totalReads +=1
chrom = obj.samfile.getrname(aligned_read.tid)
chrom=chrom.upper()
exons = bam_cigar.fetch_exon(chrom, aligned_read.pos, aligned_read.cigar)
totalFrags += len(exons)
for exn in exons:
#print chrom + '\t' + str(exn[1]) + '\t' + str(exn[2])
mid = int(exn[1]) + int((int(exn[2]) - int(exn[1]))/2)
if foundone(chrom,cds_exon_r,mid,mid) > 0:
cds_exon_read += 1
continue
elif foundone(chrom,utr_5_r,mid,mid) >0 and foundone(chrom,utr_3_r,mid,mid) == 0:
utr_5_read += 1
continue
elif foundone(chrom,utr_3_r,mid,mid) >0 and foundone(chrom,utr_5_r,mid,mid) == 0:
utr_3_read += 1
continue
elif foundone(chrom,utr_3_r,mid,mid) >0 and foundone(chrom,utr_5_r,mid,mid) > 0:
unAssignFrags +=1
continue
elif foundone(chrom,intron_r,mid,mid) > 0:
intron_read += 1
continue
elif foundone(chrom,intergenic_up_10kb_r,mid,mid) >0 and foundone(chrom,intergenic_down_10kb_r,mid,mid) > 0:
unAssignFrags +=1
continue
elif foundone(chrom,intergenic_up_1kb_r,mid,mid) >0:
intergenic_up1kb_read += 1
intergenic_up5kb_read += 1
intergenic_up10kb_read += 1
elif foundone(chrom,intergenic_up_5kb_r,mid,mid) >0:
intergenic_up5kb_read += 1
intergenic_up10kb_read += 1
elif foundone(chrom,intergenic_up_10kb_r,mid,mid) >0:
intergenic_up10kb_read += 1
elif foundone(chrom,intergenic_down_1kb_r,mid,mid) >0:
intergenic_down1kb_read += 1
intergenic_down5kb_read += 1
intergenic_down10kb_read += 1
elif foundone(chrom,intergenic_down_5kb_r,mid,mid) >0:
intergenic_down5kb_read += 1
intergenic_down10kb_read += 1
elif foundone(chrom,intergenic_down_10kb_r,mid,mid) >0:
intergenic_down10kb_read += 1
else:
unAssignFrags +=1
except StopIteration:
print >>sys.stderr, "Finished\n"
print "%-30s%d" % ("Total Reads",totalReads)
print "%-30s%d" % ("Total Tags",totalFrags)
print "%-30s%d" % ("Total Assigned Tags",totalFrags-unAssignFrags)
print "====================================================================="
print "%-20s%-20s%-20s%-20s" % ('Group','Total_bases','Tag_count','Tags/Kb')
print "%-20s%-20d%-20d%-18.2f" % ('CDS_Exons',cds_exon_base,cds_exon_read,cds_exon_read*1000.0/(cds_exon_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("5'UTR_Exons",utr_5_base,utr_5_read, utr_5_read*1000.0/(utr_5_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("3'UTR_Exons",utr_3_base,utr_3_read, utr_3_read*1000.0/(utr_3_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("Introns",intron_base,intron_read,intron_read*1000.0/(intron_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TSS_up_1kb",intergenic_up1kb_base, intergenic_up1kb_read, intergenic_up1kb_read*1000.0/(intergenic_up1kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TSS_up_5kb",intergenic_up5kb_base, intergenic_up5kb_read, intergenic_up5kb_read*1000.0/(intergenic_up5kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TSS_up_10kb",intergenic_up10kb_base, intergenic_up10kb_read, intergenic_up10kb_read*1000.0/(intergenic_up10kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TES_down_1kb",intergenic_down1kb_base, intergenic_down1kb_read, intergenic_down1kb_read*1000.0/(intergenic_down1kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TES_down_5kb",intergenic_down5kb_base, intergenic_down5kb_read, intergenic_down5kb_read*1000.0/(intergenic_down5kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TES_down_10kb",intergenic_down10kb_base, intergenic_down10kb_read, intergenic_down10kb_read*1000.0/(intergenic_down10kb_base+1))
print "====================================================================="
if __name__ == '__main__':
main()
| gpl-3.0 | -9,165,150,322,009,244,000 | 39.036913 | 165 | 0.693068 | false |
nickname456/pbots | poker.py | 1 | 5255 | #-----------------------------------------------------------#
# Heads Up Omaha Challange - Starter Bot #
#===========================================================#
# #
# Last update: 22 May, 2014 #
# #
# @author Jackie <[email protected]> #
# @version 1.0 #
# @license MIT License (http://opensource.org/licenses/MIT) #
#-----------------------------------------------------------#
class Card(object):
'''
Card class
'''
def __init__(self, suit, value):
self.suit = suit
self.value = value
self.number = '23456789TJQKA'.find(value)
def __repr__(self):
return self.value+self.suit
def __cmp__(self,other):
n_cmp = cmp(self.number,other.number)
if n_cmp!=0:
return n_cmp
return cmp(self.suit,other.suit)
class Pocket(object):
'''
Pocket class
'''
def __init__(self, cards):
self.cards = cards
def __iter__(self):
return iter(self.cards)
class Table(object):
'''
Table class
'''
def __init__(self, cards):
self.cards = cards
class Hand(object):
'''
Hand class
'''
def __init__(self, cards):
self.cards = cards
self.rank = Ranker.rank_five_cards(cards)
def __gt__(self, hand):
return self.rank > hand.rank
def __ge__(self, hand):
return self.rank >= hand.rank
def __lt__(self, hand):
return self.rank < hand.rank
def __le__(self, hand):
return self.rank <= hand.rank
def __eq__(self, hand):
return self.rank == hand.rank
def __repr__(self):
return "Hand:"+str(self.cards)+" rank"+str(self.rank)
# TODO: cache the below?
def is_flush_draw(self):
return Ranker.is_flush_draw(self.cards)
def is_straight_draw(self):
return Ranker.is_flush_draw(self.cards)
class Ranker(object):
'''
Ranker class
'''
@staticmethod
def rank_five_cards(cards):
# List of all card values
values = sorted(['23456789TJQKA'.find(card.value) for card in cards])
# Checks if hand is a straight
is_straight = all([values[i] == values[0] + i for i in range(5)])
# Additional straight check
if not is_straight:
# Wheel
is_straight = all(values[i] == values[0] + i for i in range(4)) \
and values[4] == 12 \
and values[0] == 0
# Rotate values as the ace is weakest in this case
values = values[1:] + values[:1]
# Checks if hand is a flush
is_flush = all([card.suit == cards[0].suit for card in cards])
# Get card value counts
value_count = {value: values.count(value) for value in values}
# Sort value counts by most occuring
sorted_value_count = sorted([(count, value) for value, count in value_count.items()], reverse = True)
# Get all kinds (e.g. four of a kind, three of a kind, pair)
kinds = [value_count[0] for value_count in sorted_value_count]
# Get values for kinds
kind_values = [value_count[1] for value_count in sorted_value_count]
# Royal flush
if is_straight and is_flush and values[0] == 8:
return ['9'] + values
# Straight flush
if is_straight and is_flush:
return ['8'] + kind_values
# Four of a kind
if kinds[0] == 4:
return ['7'] + kind_values
# Full house
if kinds[0] == 3 and kinds[1] == 2:
return ['6'] + kind_values
# Flush
if is_flush:
return ['5'] + kind_values
# Straight
if is_straight:
return ['4'] + kind_values
# Three of a kind
if kinds[0] == 3:
return ['3'] + kind_values
# Two pair
if kinds[0] == 2 and kinds[1] == 2:
return ['2'] + kind_values
# Pair
if kinds[0] == 2:
return ['1'] + kind_values
# No pair
return ['0'] + kind_values
@staticmethod
def is_flush_draw(cards):
for i in range(0,5):
cards_ = cards[0:i]+cards[(i+1):]
same_suit = all([c.suit == cards_[0].suit for c in cards_])
if same_suit:
return True
return False
@staticmethod
def is_straight_draw(cards):
# List of all card values
values = sorted(['23456789TJQKA'.find(card.value) for card in cards])
for i in range(0,5):
cards_ = cards[0:i]+cards[(i+1):]
assert False # copied logic from full hand, haven't fixed it up yet
sd = all([v[i] == values[0] + i for i in range(5)])
# Additional straight check
if not is_straight:
# Wheel
is_straight = all(values[i] == values[0] + i for i in range(4)) \
and values[4] == 12 \
and values[0] == 0
| mit | 5,522,409,518,547,737,000 | 27.873626 | 109 | 0.479163 | false |
certifiedloud/e6b | tests.py | 1 | 2317 | from e6b import E6B
import unittest
class TimeSpeedDistance(unittest.TestCase):
'''Test time, speed and distance calculations'''
def setUp(self):
self.e6b = E6B()
def test_time(self):
'''Test time calculations'''
time = self.e6b.time(1, 60)
self.assertEqual(60, time)
def test_speed(self):
'''Test speed calculations'''
speed = self.e6b.speed(1, 60)
self.assertEqual(60, speed)
def test_distance(self):
'''Test distance calculations'''
distance = self.e6b.time(1, 60)
self.assertEqual(60, distance)
def test_true_airspeed(self):
tas = self.e6b.true_airspeed(0, 28.01, -50, 60)
self.assertAlmostEqual(tas, 54, delta=3)
class Fuel(unittest.TestCase):
'''Fuel calculations'''
def setUp(self):
self.e6b = E6B()
def test_fuel_endurance(self):
'''Test fuel endurance calculation'''
pass
def test_average_fuel_consumption(self):
pass
def test_fuel_capacity(self):
pass
class Wind(unittest.TestCase):
'''Test wind calculations'''
def setUp(self):
self.e6b = E6B()
def test_wind_correction_angle(self):
'''Test wind correction angle calculation'''
wca = self.e6b.wind_correction_angle(360, 60, 330, 10)
self.assertAlmostEqual(wca, -5, delta=2)
class Altitude(unittest.TestCase):
'''Altitude calculations'''
def setUp(self):
self.e6b = E6B()
def test_density_altitude(self):
da = self.e6b.density_altitude(5470, 35, 5)
self.assertAlmostEqual(da, 9034, delta=5)
class Conversions(unittest.TestCase):
'''Unit conversions'''
def setUp(self):
self.e6b = E6B()
def test_cel_to_fahr(self):
fahr = self.e6b.cel_to_fahr(0)
self.assertAlmostEqual(32, fahr, delta=.1)
def test_fahr_to_cel(self):
cel = self.e6b.fahr_to_cel(32)
self.assertAlmostEqual(0, cel, delta=.1)
def test_nautical_to_statute(self):
stat = self.e6b.nautical_to_statute(10)
self.assertAlmostEqual(11.51, stat, delta=.01)
def test_statute_to_nautical(self):
naut = self.e6b.statute_to_nautical(20)
self.assertAlmostEqual(17.37, naut, delta=.01)
if __name__ == '__main__':
unittest.main()
| mit | -1,729,810,929,808,543,700 | 26.258824 | 62 | 0.615019 | false |
cmbclh/vnpy1.7 | vnpy/trader/app/login/uiLoginWidget.py | 1 | 7055 | # encoding: UTF-8
'''
登陆模块相关的GUI控制组件
'''
import sys
sys.path.append('../')
#sys.path.append('D:\\tr\\vnpy-master\\vn.trader\\DAO')
sys.path.append('D:\\tr\\vnpy-1.7\\vnpy\\DAO')
sys.path.append('D:\\tr\\vnpy-1.7\\vnpy\\common')
import vnpy.DAO
import vnpy.common
from vnpy.DAO import *
import pandas as pd
import Tkinter
#from Tkinter import messagebox
from vnpy.trader.app.login.language import text
from vnpy.trader.uiBasicWidget import QtWidgets
TBUSER_COLUMNS = ['user_id','user_name','status','password','branch_no','open_date','cancel_date','passwd_date','op_group','op_rights','reserve1','dep_id','last_logon_date','last_logon_time','last_ip_address','fail_times','fail_date','reserve2','last_fail_ip']
########################################################################
class LoginSpinBox(QtWidgets.QLineEdit):#.QSpinBox):
"""调整参数用的数值框"""
#----------------------------------------------------------------------
def __init__(self, value):
"""Constructor"""
super(LoginSpinBox, self).__init__()
#self.setMinimum(0)
#self.setMaximum(1000000)
self.setText(value)
########################################################################
class LoginLine(QtWidgets.QFrame):
"""水平分割线"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(LoginLine, self).__init__()
self.setFrameShape(self.HLine)
self.setFrameShadow(self.Sunken)
########################################################################
class LoginEngineManager(QtWidgets.QWidget):
"""风控引擎的管理组件"""
#----------------------------------------------------------------------
def __init__(self, loginEngine, eventEngine, parent=None):
"""Constructor"""
super(LoginEngineManager, self).__init__(parent)
self.loginEngine = loginEngine
self.eventEngine = eventEngine
self.initUi()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
print self
self.setWindowTitle(text.LOGIN_MANAGER)
# 设置界面
self.userId = LoginSpinBox(self.loginEngine.userId)
self.password = LoginSpinBox(self.loginEngine.password)
buttonLogin = QtWidgets.QPushButton(text.LOGIN)
buttonLogout = QtWidgets.QPushButton(text.LOGOUT)
buttonSubmit = QtWidgets.QPushButton(text.SUBMIT)
Label = QtWidgets.QLabel
grid = QtWidgets.QGridLayout()
grid.addWidget(Label(text.USERID), 2, 0)
grid.addWidget(self.userId, 2, 1)
grid.addWidget(Label(text.PASSWORD), 3, 0)
grid.addWidget(self.password, 3, 1)
grid.addWidget(LoginLine(), 4, 0, 1, 2)
hbox = QtWidgets.QHBoxLayout()
hbox.addStretch()
hbox.addWidget(buttonSubmit)
hbox.addWidget(buttonLogin)
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(grid)
vbox.addLayout(hbox)
self.setLayout(vbox)
# 连接组件信号
buttonSubmit.clicked.connect(self.submit)
buttonLogin.clicked.connect(self.login)
# 设为固定大小
self.setFixedSize(self.sizeHint())
# ----------------------------------------------------------------------
def login(self):
print (u'登陆验证开始self.userId=%s, self.password=%s' % (self.userId, self.password))
userId = str(self.userId.text())
password = str(self.password.text())
print (u'登陆验证开始userId=%s, password=%s' % (userId, password))
# 根据以下条件查询出的有效用户只有一条记录
sql = ' SELECT *' \
' from tbuser where user_id = \'%s\' and password = \'%s\' and status = 0 ' % (userId, password)
try:
ret = vnpy.DAO.getDataBySQL('vnpy', sql)
if ret.empty :
print (u'登陆验证失败,用户不存在或密码不正确')
#QtWidgets.QMessageBox.information(self, "登陆失败", "用户不存在或密码不正确,请重试!", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
QtWidgets.QMessageBox.information(self, text.LOGINERROR,text.LOGINERRORINFO,
QtWidgets.QMessageBox.Retry)
#Tkinter.messagebox.showinfo('登陆验证失败,用户不存在或密码不正确')
else:
print (u'登陆验证成功')
QtWidgets.QMessageBox.information(self, text.LOGINSUSS, text.LOGINSUSSINFO, QtWidgets.QMessageBox.Ok)
self.close()
#Tkinter.messagebox.showinfo('欢迎')
except Exception as e:
print e
# ----------------------------------------------------------------------
def logout(self):
pass
# ----------------------------------------------------------------------
def submit(self):
userId = str(self.userId.text())
password = str(self.password.text())
print (u'注册验证开始userId=%s, password=%s' % (userId, password))
# 根据以下条件查询出的有效用户只有一条记录
sql = ' SELECT user_id,status' \
' from tbuser where user_id = \'%s\' ' % (userId)
try:
ret = vnpy.DAO.getDataBySQL('vnpy', sql)
#若系统中无该用户,则直接插入注册
if ret.empty:
print (u'无此客户信息,可直接注册')
userData = [userId, userId, 0, password, '', 0, 0, 0, '', ' ', ' ', '', 0, 0, '', 0, 0, ' ', '']
d = pd.DataFrame([userData], columns=TBUSER_COLUMNS)
try:
print("开始写入TBUSER中")
vnpy.DAO.writeData('vnpy', 'tbuser', d)
print (u'注册成功')
QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITSUSS, QtWidgets.QMessageBox.Ok)
self.close()
except Exception as e1:
print (u'注册失败')
QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITFAIL, QtWidgets.QMessageBox.Retry)
print e1
# 若系统中有该用户,则修改状态及密码,激活用户
else:
#暂时空
QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITFAIL, QtWidgets.QMessageBox.Ok)
self.close()
except Exception as e:
print e
#QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITSUSS, QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
# ----------------------------------------------------------------------
def closeLoginEngineManager(self):
self.close()
pass | mit | 3,855,137,239,608,203,000 | 36.542857 | 260 | 0.507992 | false |
fast90/christian | modules/hq.py | 1 | 1507 | from datetime import datetime
class HQ(object):
def __init__(self):
self.people_in_hq = 0
self.keys_in_hq = 0
self.joined_users = []
self.hq_status = 'unknown'
self.status_since = datetime.now().strftime('%Y-%m-%d %H:%M')
self.is_clean = True
self.joined_keys = []
def update_time(self):
self.status_since = datetime.now().strftime('%Y-%m-%d %H:%M')
def hq_open(self):
self.hq_status = 'open'
self.update_time()
def hq_close(self):
self.hq_status = 'closed'
self.update_time()
self.people_in_hq = 0
del(self.joined_users[:])
del(self.joined_keys[:])
def hq_private(self):
self.hq_status = 'private'
self.update_time()
def hq_clean(self):
self.is_clean = True
def hq_dirty(self):
self.is_clean = False
def hq_join(self,user):
self.people_in_hq +=1
self.joined_users.append(user)
def hq_leave(self,user):
self.people_in_hq -=1
self.joined_users.remove(user)
def hq_keyjoin(self,user):
self.keys_in_hq +=1
self.joined_keys.append(user)
def hq_keyleave(self,user):
self.keys_in_hq -=1
self.joined_keys.remove(user)
def get_hq_status(self):
return ('HQ is {} since {}. {} Members are here'
.format(self.hq_status, self.status_since, self.people_in_hq))
def get_hq_clean(self):
return self.is_clean
| gpl-3.0 | 6,626,186,680,984,100,000 | 24.542373 | 78 | 0.558062 | false |
micbou/YouCompleteMe | python/ycm/client/completer_available_request.py | 2 | 1716 | # Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from ycm.client.base_request import BaseRequest, BuildRequestData
class CompleterAvailableRequest( BaseRequest ):
def __init__( self, filetypes ):
super( CompleterAvailableRequest, self ).__init__()
self.filetypes = filetypes
self._response = None
def Start( self ):
request_data = BuildRequestData()
request_data.update( { 'filetypes': self.filetypes } )
self._response = self.PostDataToHandler( request_data,
'semantic_completion_available' )
def Response( self ):
return self._response
def SendCompleterAvailableRequest( filetypes ):
request = CompleterAvailableRequest( filetypes )
# This is a blocking call.
request.Start()
return request.Response()
| gpl-3.0 | -4,551,042,928,777,884,700 | 33.32 | 78 | 0.726107 | false |
vivisect/synapse | synapse/cryotank.py | 1 | 49542 | import os
import types
import shutil
import struct
import logging
import threading
import contextlib
from functools import partial, wraps
from collections import defaultdict
import lmdb # type: ignore
import synapse.lib.cell as s_cell
import synapse.lib.lmdb as s_lmdb
import synapse.lib.queue as s_queue
import synapse.lib.config as s_config
import synapse.lib.msgpack as s_msgpack
import synapse.lib.threads as s_threads
import synapse.lib.datapath as s_datapath
import synapse.exc as s_exc
import synapse.glob as s_glob
import synapse.common as s_common
import synapse.eventbus as s_eventbus
import synapse.datamodel as s_datamodel
logger = logging.getLogger(__name__)
class CryoTank(s_config.Config):
'''
A CryoTank implements a stream of structured data.
'''
def __init__(self, dirn, conf=None):
s_config.Config.__init__(self, conf)
self.path = s_common.gendir(dirn)
path = s_common.gendir(self.path, 'cryo.lmdb')
mapsize = self.getConfOpt('mapsize')
self.lmdb = lmdb.open(path, writemap=True, max_dbs=128)
self.lmdb.set_mapsize(mapsize)
self.lmdb_items = self.lmdb.open_db(b'items')
self.lmdb_metrics = self.lmdb.open_db(b'metrics')
noindex = self.getConfOpt('noindex')
self.indexer = None if noindex else CryoTankIndexer(self)
with self.lmdb.begin() as xact:
self.items_indx = xact.stat(self.lmdb_items)['entries']
self.metrics_indx = xact.stat(self.lmdb_metrics)['entries']
def fini():
self.lmdb.sync()
self.lmdb.close()
self.onfini(fini)
@staticmethod
@s_config.confdef(name='cryotank')
def _cryotank_confdefs():
defs = (
('mapsize', {'type': 'int', 'doc': 'LMDB mapsize value', 'defval': s_lmdb.DEFAULT_MAP_SIZE}),
('noindex', {'type': 'bool', 'doc': 'Disable indexing', 'defval': 0}),
)
return defs
def last(self):
'''
Return the last item stored in this CryoTank.
'''
with self.lmdb.begin() as xact:
with xact.cursor(db=self.lmdb_items) as curs:
if not curs.last():
return None
indx = struct.unpack('>Q', curs.key())[0]
return indx, s_msgpack.un(curs.value())
def puts(self, items):
'''
Add the structured data from items to the CryoTank.
Args:
items (list): A list of objects to store in the CryoTank.
Returns:
int: The index that the item storage began at.
'''
itembyts = [s_msgpack.en(i) for i in items]
tick = s_common.now()
bytesize = sum([len(b) for b in itembyts])
with self.lmdb.begin(db=self.lmdb_items, write=True) as xact:
retn = self.items_indx
todo = []
for byts in itembyts:
todo.append((struct.pack('>Q', self.items_indx), byts))
self.items_indx += 1
with xact.cursor() as curs:
curs.putmulti(todo, append=True)
took = s_common.now() - tick
with xact.cursor(db=self.lmdb_metrics) as curs:
lkey = struct.pack('>Q', self.metrics_indx)
self.metrics_indx += 1
info = {'time': tick, 'count': len(items), 'size': bytesize, 'took': took}
curs.put(lkey, s_msgpack.en(info), append=True)
self.fire('cryotank:puts', numrecords=len(itembyts))
return retn
def metrics(self, offs, size=None):
'''
Yield metrics rows starting at offset.
Args:
offs (int): The index offset.
size (int): The maximum number of records to yield.
Yields:
((int, dict)): An index offset, info tuple for metrics.
'''
mink = struct.pack('>Q', offs)
with self.lmdb.begin() as xact:
with xact.cursor(db=self.lmdb_metrics) as curs:
if not curs.set_range(mink):
return
for i, (lkey, lval) in enumerate(curs):
if size is not None and i >= size:
return
indx = struct.unpack('>Q', lkey)[0]
item = s_msgpack.un(lval)
yield indx, item
def slice(self, offs, size):
'''
Yield a number of items from the CryoTank starting at a given offset.
Args:
offs (int): The index of the desired datum (starts at 0)
size (int): The max number of items to yield.
Notes:
This API performs msgpack unpacking on the bytes, and could be
slow to call remotely.
Yields:
((index, object)): Index and item values.
'''
lmin = struct.pack('>Q', offs)
with self.lmdb.begin() as xact:
with xact.cursor(db=self.lmdb_items) as curs:
if not curs.set_range(lmin):
return
for i, (lkey, lval) in enumerate(curs):
if i >= size:
return
indx = struct.unpack('>Q', lkey)[0]
yield indx, s_msgpack.un(lval)
def rows(self, offs, size):
'''
Yield a number of raw items from the CryoTank starting at a given offset.
Args:
offs (int): The index of the desired datum (starts at 0)
size (int): The max number of items to yield.
Yields:
((indx, bytes)): Index and msgpacked bytes.
'''
lmin = struct.pack('>Q', offs)
imax = offs + size
# time slice the items from the cryo tank
with self.lmdb.begin() as xact:
with xact.cursor(db=self.lmdb_items) as curs:
if not curs.set_range(lmin):
return
for lkey, lval in curs:
indx = struct.unpack('>Q', lkey)[0]
if indx >= imax:
break
yield indx, lval
def info(self):
'''
Returns information about the CryoTank instance.
Returns:
dict: A dict containing items and metrics indexes.
'''
return {'indx': self.items_indx, 'metrics': self.metrics_indx, 'stat': self.lmdb.stat()}
class CryoCell(s_cell.Cell):
def postCell(self):
'''
CryoCell initialization routines.
'''
self.names = self.getCellDict('cryo:names')
self.confs = self.getCellDict('cryo:confs')
self.tanks = s_eventbus.BusRef()
for name, iden in self.names.items():
logger.info('Bringing tank [%s][%s] online', name, iden)
path = self.getCellPath('tanks', iden)
conf = self.confs.get(name)
tank = CryoTank(path, conf)
self.tanks.put(name, tank)
def initConfDefs(self):
super().initConfDefs()
self.addConfDefs((
('defvals', {'defval': {},
'ex': '{"mapsize": 1000000000}',
'doc': 'Default settings for cryotanks created by the cell.',
'asloc': 'tank_defaults'}),
))
def finiCell(self):
'''
Fini handlers for the CryoCell
'''
self.tanks.fini()
def handlers(self):
'''
CryoCell message handlers.
'''
cryo_handlers = {
'cryo:init': self._onCryoInit,
'cryo:list': self._onCryoList,
'cryo:puts': self._onCryoPuts,
'cryo:dele': self._onCryoDele,
'cryo:last': partial(self._onGeneric, CryoTank.last),
'cryo:rows': partial(self._onGeneric, CryoTank.rows),
'cryo:slice': partial(self._onGeneric, CryoTank.slice),
'cryo:metrics': partial(self._onGeneric, CryoTank.metrics),
}
indexer_calls = {
'cryo:indx:add': CryoTankIndexer.addIndex,
'cryo:indx:del': CryoTankIndexer.delIndex,
'cryo:indx:pause': CryoTankIndexer.pauseIndex,
'cryo:indx:resume': CryoTankIndexer.resumeIndex,
'cryo:indx:stat': CryoTankIndexer.getIndices,
'cryo:indx:querynormvalu': CryoTankIndexer.queryNormValu,
'cryo:indx:querynormrecords': CryoTankIndexer.queryNormRecords,
'cryo:indx:queryrows': CryoTankIndexer.queryRows
}
cryo_handlers.update({k: partial(self._onCryoIndex, v) for k, v in indexer_calls.items()})
return cryo_handlers
def _standard_return(self, chan, subfunc, *args, **kwargs):
'''
Calls a function and returns the return value or exception back through the channel
'''
try:
rv = subfunc(*args, **kwargs)
except Exception as e:
retn = s_common.getexcfo(e)
return chan.tx((False, retn))
if isinstance(rv, types.GeneratorType):
chan.setq()
chan.tx((True, True))
genr = s_common.chunks(rv, 1000)
chan.txwind(genr, 100, timeout=30)
return
return chan.tx((True, rv))
@s_glob.inpool
def _onGeneric(self, method, chan, mesg):
'''
Generic handler that looks up tank in name field and passes it to method of cryotank
'''
cmdstr, kwargs = mesg
name = kwargs.pop('name')
tank = self.tanks.get(name)
with chan:
if tank is None:
return chan.tx((False, ('NoSuchName', {'name': name})))
return self._standard_return(chan, method, tank, **kwargs)
@s_glob.inpool
def _onCryoIndex(self, subfunc, chan, mesg):
cmdstr, kwargs = mesg
name = kwargs.pop('name')
tank = self.tanks.get(name)
with chan:
if tank is None:
return chan.tx((False, ('NoSuchName', {'name': name})))
indexer = tank.indexer
if indexer is None:
return chan.tx((False, ('IndexingDisabled', {'name': name})))
return self._standard_return(chan, subfunc, indexer, **kwargs)
def genCryoTank(self, name, conf=None):
'''
Generate a new CryoTank with a given name or get an reference to an existing CryoTank.
Args:
name (str): Name of the CryoTank.
Returns:
CryoTank: A CryoTank instance.
'''
tank = self.tanks.get(name)
if tank is not None:
return tank
iden = s_common.guid()
logger.info('Creating new tank: %s', name)
path = self.getCellPath('tanks', iden)
mergeconf = self.tank_defaults.copy()
if conf is not None:
mergeconf.update(conf)
tank = CryoTank(path, mergeconf)
self.names.set(name, iden)
self.confs.set(name, conf)
self.tanks.put(name, tank)
return tank
def getCryoList(self):
'''
Get a list of (name, info) tuples for the CryoTanks.
Returns:
list: A list of tufos.
'''
return [(name, tank.info()) for (name, tank) in self.tanks.items()]
def _onCryoList(self, chan, mesg):
chan.txfini((True, self.getCryoList()))
@s_glob.inpool
def _onCryoDele(self, chan, mesg):
name = mesg[1].get('name')
logger.info('Deleting tank: %s' % (name,))
with chan:
tank = self.tanks.pop(name) # type: CryoTank
if tank is None:
return chan.tx((True, False))
self.names.pop(name)
tank.fini()
shutil.rmtree(tank.path, ignore_errors=True)
return chan.tx((True, True))
@s_glob.inpool
def _onCryoPuts(self, chan, mesg):
name = mesg[1].get('name')
chan.setq()
chan.tx(True)
with chan:
size = 0
tank = self.genCryoTank(name)
for items in chan.rxwind(timeout=30):
tank.puts(items)
size += len(items)
chan.txok(size)
@s_glob.inpool
def _onCryoInit(self, chan, mesg):
with chan:
tank = self.tanks.get(mesg[1].get('name'))
if tank:
return chan.tx((True, False))
return self._standard_return(chan, lambda **kwargs: bool(self.genCryoTank(**kwargs)), **mesg[1])
class CryoClient:
'''
Client-side helper for interacting with a CryoCell which hosts CryoTanks.
Args:
auth ((str, dict)): A user auth tufo
addr ((str, int)): The address / port tuple.
timeout (int): Connect timeout
'''
_chunksize = 10000
def _remotecall(self, name, cmd_str, timeout=None, **kwargs):
'''
Handles all non-generator remote calls
'''
kwargs['name'] = name
ok, retn = self.sess.call((cmd_str, kwargs), timeout=timeout)
return s_common.reqok(ok, retn)
def _genremotecall(self, name, cmd_str, timeout=None, **kwargs):
'''
Handles all generator function remote calls
'''
kwargs['name'] = name
with self.sess.task((cmd_str, kwargs), timeout=timeout) as chan:
ok, retn = chan.next(timeout=timeout)
s_common.reqok(ok, retn)
for bloc in chan.rxwind(timeout=timeout):
for item in bloc:
yield item
def __init__(self, sess):
self.sess = sess
def puts(self, name, items, timeout=None):
'''
Add data to the named remote CryoTank by consuming from items.
Args:
name (str): The name of the remote CryoTank.
items (iter): An iterable of data items to load.
timeout (float/int): The maximum timeout for an ack.
Returns:
None
'''
with self.sess.task(('cryo:puts', {'name': name})) as chan:
if not chan.next(timeout=timeout):
return False
genr = s_common.chunks(items, self._chunksize)
chan.txwind(genr, 100, timeout=timeout)
return chan.next(timeout=timeout)
def last(self, name, timeout=None):
'''
Return the last entry in the named CryoTank.
Args:
name (str): The name of the remote CryoTank.
timeout (int): Request timeout
Returns:
((int, object)): The last entry index and object from the CryoTank.
'''
return self._remotecall(name, cmd_str='cryo:last', timeout=timeout)
def delete(self, name, timeout=None):
'''
Delete a named CryoTank.
Args:
name (str): The name of the remote CryoTank.
timeout (int): Request timeout
Returns:
bool: True if the CryoTank was deleted, False if it was not deleted.
'''
return self._remotecall(name, cmd_str='cryo:dele', timeout=timeout)
def list(self, timeout=None):
'''
Get a list of the remote CryoTanks.
Args:
timeout (int): Request timeout
Returns:
tuple: A tuple containing name, info tufos for the remote CryoTanks.
'''
ok, retn = self.sess.call(('cryo:list', {}), timeout=timeout)
return s_common.reqok(ok, retn)
def slice(self, name, offs, size, timeout=None):
'''
Slice and return a section from the named CryoTank.
Args:
name (str): The name of the remote CryoTank.
offs (int): The offset to begin the slice.
size (int): The number of records to slice.
timeout (int): Request timeout
Yields:
(int, obj): (indx, item) tuples for the sliced range.
'''
return self._genremotecall(name, offs=offs, size=size, cmd_str='cryo:slice', timeout=timeout)
def rows(self, name, offs, size, timeout=None):
'''
Retrieve raw rows from a section of the named CryoTank.
Args:
name (str): The name of the remote CryoTank.
offs (int): The offset to begin the row retrieval from.
size (int): The number of records to retrieve.
timeout (int): Request timeout.
Notes:
This returns msgpack encoded records. It is the callers
responsibility to decode them.
Yields:
(int, bytes): (indx, bytes) tuples for the rows in range.
'''
return self._genremotecall(name, offs=offs, size=size, cmd_str='cryo:rows', timeout=timeout)
def metrics(self, name, offs, size=None, timeout=None):
'''
Carve a slice of metrics data from the named CryoTank.
Args:
name (str): The name of the remote CryoTank.
offs (int): The index offset.
timeout (int): Request timeout
Returns:
tuple: A tuple containing metrics tufos for the named CryoTank.
'''
return self._genremotecall(name, offs=offs, size=size, cmd_str='cryo:metrics', timeout=timeout)
def init(self, name, conf=None, timeout=None):
'''
Create a new named Cryotank.
Args:
name (str): Name of the Cryotank to make.
conf (dict): Additional configable options for the Cryotank.
timeout (int): Request timeout
Returns:
True if the tank was created, False if the tank existed or
there was an error during CryoTank creation.
'''
return self._remotecall(name, conf=conf, cmd_str='cryo:init', timeout=timeout)
def addIndex(self, name, prop, syntype, datapaths, timeout=None):
'''
Add an index to the cryotank
Args:
name (str): name of the Cryotank.
prop (str): the name of the property this will be stored as in the normalized record
syntype (str): the synapse type this will be interpreted as
datapaths(Iterable[str]): datapath specs against which the raw record is run to extract a single field
that is passed to the type normalizer. These will be tried in order until one succeeds. At least one
must be present.
timeout (Optional[float]): the maximum timeout for an ack
Returns:
None
Note:
Additional datapaths will only be tried if prior datapaths are not present, and *not* if
the normalization fails.
'''
if not len(datapaths):
raise s_exc.BadOperArg(mesg='datapaths must have at least one entry')
return self._remotecall(name, prop=prop, syntype=syntype, datapaths=datapaths, cmd_str='cryo:indx:add',
timeout=timeout)
def delIndex(self, name, prop, timeout=None):
'''
Delete an index
Args:
name (str): name of the Cryotank
prop (str): the (normalized) property name
timeout (Optional[float]): the maximum timeout for an ack
Returns:
None
'''
return self._remotecall(name, prop=prop, cmd_str='cryo:indx:del', timeout=timeout)
def pauseIndex(self, name, prop=None, timeout=None):
'''
Temporarily stop indexing one or all indices
Args:
name (str): name of the Cryotank
prop: (Optional[str]): the index to stop indexing, or if None, indicate to stop all indices
timeout (Optional[float]): the maximum timeout for an ack
Returns:
None
Note:
Pausing is not persistent. Restarting the process will resume indexing.
'''
return self._remotecall(name, prop=prop, cmd_str='cryo:indx:pause', timeout=timeout)
def resumeIndex(self, name, prop=None, timeout=None):
'''
Undo a pauseIndex
Args:
name (str): name of the Cryotank
prop (Optional[str]): the index to start indexing, or if None, indicate to resume all indices
timeout (Optional[float]): the maximum timeout for an ack
Returns:
None
'''
return self._remotecall(name, prop=prop, cmd_str='cryo:indx:resume', timeout=timeout)
def getIndices(self, name, timeout=None):
'''
Get information about all the indices
Args:
name (str): name of the Cryotank
timeout (Optional[float]): the maximum timeout for an ack
Returns:
List[Dict[str: Any]]: all the indices with progress and statistics
'''
return self._remotecall(name, cmd_str='cryo:indx:stat', timeout=timeout)
def queryNormValu(self, name, prop, valu=None, exact=False, timeout=None):
'''
Query for normalized individual property values
Args:
name (str): name of the Cryotank
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered a prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversely, if False, indicates a prefix match.
timeout (Optional[float]): the maximum timeout for an ack
Returns:
Iterable[Tuple[int, Union[str, int]]]: A generator of offset, normalized value tuples.
'''
return self._genremotecall(name, prop=prop, valu=valu, exact=exact, cmd_str='cryo:indx:querynormvalu',
timeout=timeout)
def queryNormRecords(self, name, prop, valu=None, exact=False, timeout=None):
'''
Query for normalized property values grouped together in dicts
Args:
name (str): name of the Cryotank
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered a prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversely, if False, indicates a prefix match.
timeout (Optional[float]): the maximum timeout for an ack
Returns:
Iterable[Tuple[int, Dict[str, Union[str, int]]]]: A generator of offset, dictionary tuples
'''
return self._genremotecall(name, prop=prop, valu=valu, exact=exact, cmd_str='cryo:indx:querynormrecords',
timeout=timeout)
def queryRows(self, name, prop, valu=None, exact=False, timeout=None):
'''
Query for raw (i.e. from the cryotank itself) records
Args:
name (str): name of the Cryotank
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered a prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversely, if False, indicates a prefix match.
timeout (Optional[float]): The maximum timeout for an ack
Returns:
Iterable[Tuple[int, bytes]]: A generator of tuple (offset, messagepack encoded) raw records
'''
return self._genremotecall(name, prop=prop, valu=valu, exact=exact, cmd_str='cryo:indx:queryrows',
timeout=timeout)
# TODO: what to do with subprops returned from getTypeNorm
class _MetaEntry:
''' Describes a single CryoTank index in the system. '''
def __init__(self, propname: str, syntype: str, datapaths) -> None:
'''
Makes a MetaEntry
Args:
propname: The name of the key in the normalized dictionary
syntype: The synapse type name against which the data will be normalized
datapath (Iterable[str]) One or more datapath strings that will be used to find the field in a raw record
'''
self.propname = propname
self.syntype = syntype
self.datapaths = tuple(s_datapath.DataPath(d) for d in datapaths)
def en(self):
'''
Encodes a MetaEntry for storage
'''
return s_msgpack.en(self.asdict())
def asdict(self):
'''
Returns a MetaEntry as a dictionary
'''
return {'propname': self.propname,
'syntype': self.syntype,
'datapaths': tuple(d.path for d in self.datapaths)}
# Big-endian 64-bit integer encoder
_Int64be = struct.Struct('>Q')
class _IndexMeta:
'''
Manages persistence of CryoTank index metadata with an in-memory copy
"Schema":
b'indices' key has msgpack encoded dict of
{ 'present': [8238483: {'propname': 'foo:bar', 'syntype': type, 'datapaths': (datapath, datapath2)}, ...],
'deleting': [8238483, ...]
}
b'progress' key has mesgpack encoded dict of
{ 8328483: {nextoffset, ngood, nnormfail}, ...
_present_ contains the encoding information about the current indices
_deleting_ contains the indices currently being deleted (but aren't done)
_progress_ contains how far each index has gotten, how many successful props were indexed (which might be different
because of missing properties), and how many normalizations failed. It is separate because it gets updated a lot
more.
'''
def __init__(self, dbenv: lmdb.Environment) -> None:
'''
Creates metadata for all the indices.
Args:
dbenv (lmdb.Environment): the lmdb instance in which to store the metadata.
Returns:
None
'''
self._dbenv = dbenv
# The table in the database file (N.B. in LMDB speak, this is called a database)
self._metatbl = dbenv.open_db(b'meta')
is_new_db = False
with dbenv.begin(db=self._metatbl, buffers=True) as txn:
indices_enc = txn.get(b'indices')
progress_enc = txn.get(b'progress')
if indices_enc is None or progress_enc is None:
if indices_enc is None and progress_enc is None:
is_new_db = True
indices_enc = s_msgpack.en({'present': {}, 'deleting': []})
progress_enc = s_msgpack.en({})
else:
raise s_exc.CorruptDatabase('missing meta information in index meta') # pragma: no cover
indices = s_msgpack.un(indices_enc)
# The details about what the indices are actually indexing: the datapath and type.
self.indices = {k: _MetaEntry(**s_msgpack.un(v)) for k, v in indices.get('present', {}).items()}
self.deleting = list(indices.get('deleting', ()))
# Keeps track (non-persistently) of which indices have been paused
self.asleep = defaultdict(bool) # type: ignore
# How far each index has progressed as well as statistics
self.progresses = s_msgpack.un(progress_enc)
if not all(p in self.indices for p in self.deleting):
raise s_exc.CorruptDatabase(
'index meta table: deleting entry with unrecognized property name') # pragma: no cover
if not all(p in self.indices for p in self.progresses):
raise s_exc.CorruptDatabase(
'index meta table: progress entry with unrecognized property name') # pragma: no cover
if is_new_db:
self.persist()
def persist(self, progressonly=False, txn=None):
'''
Persists the index info to the database
Args:
progressonly (bool): if True, only persists the progress (i.e. more dynamic) information
txn (Optional[lmdb.Transaction]): if not None, will use that transaction to record data. txn is
not committed.
Returns:
None
'''
d = {'delete': self.deleting,
'present': {k: metaentry.en() for k, metaentry in self.indices.items()}}
with contextlib.ExitStack() as stack:
if txn is None:
txn = stack.enter_context(self._dbenv.begin(db=self._metatbl, buffers=True, write=True))
if not progressonly:
txn.put(b'indices', s_msgpack.en(d), db=self._metatbl)
txn.put(b'progress', s_msgpack.en(self.progresses), db=self._metatbl)
def lowestProgress(self):
'''
Returns:
int: The next offset that should be indexed, based on active indices.
'''
nextoffsets = [p['nextoffset'] for iid, p in self.progresses.items() if not self.asleep[iid]]
return min(nextoffsets) if nextoffsets else s_lmdb.MAX_INT_VAL
def iidFromProp(self, prop):
'''
Retrieve the random index ID from the property name
Args:
prop (str) The name of the indexed property
Returns:
int: the index id for the propname, None if not found
'''
return next((k for k, idx in self.indices.items() if idx.propname == prop), None)
def addIndex(self, prop, syntype, datapaths):
'''
Add an index to the cryotank
Args:
prop (str): the name of the property this will be stored as in the normalized record
syntype (str): the synapse type this will be interpreted as
datapaths (Iterable[str]): datapaths that will be tried in order.
Returns:
None
Note:
Additional datapaths will only be tried if prior datapaths are not present, and *not* if
the normalization fails.
'''
if self.iidFromProp(prop) is not None:
raise s_exc.DupIndx(mesg='Index already exists', index=prop)
if not len(datapaths):
raise s_exc.BadOperArg(mesg='datapaths must have at least one entry')
s_datamodel.tlib.reqDataType(syntype)
iid = int.from_bytes(os.urandom(8), 'little')
self.indices[iid] = _MetaEntry(propname=prop, syntype=syntype, datapaths=datapaths)
self.progresses[iid] = {'nextoffset': 0, 'ngood': 0, 'nnormfail': 0}
self.persist()
def delIndex(self, prop):
'''
Delete an index
Args:
prop (str): the (normalized) property name
Returns:
None
'''
iid = self.iidFromProp(prop)
if iid is None:
raise s_exc.NoSuchIndx(mesg='No such index', index=prop)
del self.indices[iid]
self.deleting.append(iid)
# remove the progress entry in case a new index with the same propname gets added later
del self.progresses[iid]
self.persist()
def pauseIndex(self, prop):
'''
Temporarily stop indexing one or all indices
Args:
prop: (Optional[str]): the index to stop indexing, or if None, indicate to stop all indices
Returns:
None
Note:
Pausing is not persistent. Restarting the process will resume indexing.
'''
for iid, idx in self.indices.items():
if prop is None or prop == idx.propname:
self.asleep[iid] = True
def resumeIndex(self, prop):
'''
Undo a pauseIndex
Args:
prop (Optional[str]): the index to start indexing, or if None, indicate to resume all indices
Returns:
None
'''
for iid, idx in self.indices.items():
if prop is None or prop == idx.propname:
self.asleep[iid] = False
def markDeleteComplete(self, iid):
'''
Indicates that deletion of a single index is complete
Args:
iid (int): The index ID to mark as deleted
'''
self.deleting.remove(iid)
self.persist()
_Int64le = struct.Struct('<Q')
def _iid_en(iid):
'''
Encode a little endian 64-bit integer
'''
return _Int64le.pack(iid)
def _iid_un(iid):
'''
Decode a little endian 64-bit integer
'''
return _Int64le.unpack(iid)[0]
def _inWorker(callback):
'''
Queue the the decorated function to the indexing worker to run in his thread
Args:
callback: the function to wrap
Returns:
the wrapped function
(Just like inpool for the worker)
'''
@wraps(callback)
def wrap(self, *args, **kwargs):
with s_threads.RetnWait() as retn:
self._workq.put((retn, callback, (self, ) + args, kwargs))
succ, rv = retn.wait(timeout=self.MAX_WAIT_S)
if succ:
if isinstance(rv, Exception):
raise rv
return rv
raise s_exc.TimeOut()
return wrap
class CryoTankIndexer:
'''
Manages indexing of a single cryotank's records
This implements a lazy indexer that indexes a cryotank in a separate thread.
Cryotank entries are msgpack-encoded values. An index consists of a property name, one or more datapaths (i.e.
what field out of the entry), and a synapse type. The type specifies the function that normalizes the output of
the datapath query into a string or integer.
Indices can be added and deleted asynchronously from the indexing thread via CryotankIndexer.addIndex and
CryotankIndexer.delIndex.
Indexes can be queried with queryNormValu, queryNormRecords, queryRows.
To harmonize with LMDB requirements, writing only occurs on a singular indexing thread. Reading indices takes
place in the caller's thread. Both reading and writing index metadata (that is, information about which indices
are running) take place on the indexer's thread.
Note:
The indexer cannot detect when a type has changed from underneath itself. Operators must explicitly delete
and re-add the index to avoid mixed normalized data.
'''
MAX_WAIT_S = 10
def __init__(self, cryotank):
'''
Create an indexer
Args:
cryotank: the cryotank to index
Returns:
None
'''
self.cryotank = cryotank
ebus = cryotank
self._worker = threading.Thread(target=self._workerloop, name='CryoTankIndexer')
path = s_common.gendir(cryotank.path, 'cryo_index.lmdb')
cryotank_map_size = cryotank.lmdb.info()['map_size']
self._dbenv = lmdb.open(path, writemap=True, metasync=False, max_readers=8, max_dbs=4,
map_size=cryotank_map_size)
# iid, v -> offset table
self._idxtbl = self._dbenv.open_db(b'indices', dupsort=True)
# offset, iid -> normalized prop
self._normtbl = self._dbenv.open_db(b'norms')
self._to_delete = {} # type: Dict[str, int]
self._workq = s_queue.Queue()
# A dict of propname -> MetaEntry
self._meta = _IndexMeta(self._dbenv)
self._next_offset = self._meta.lowestProgress()
self._chunk_sz = 1000 # < How many records to read at a time
self._remove_chunk_sz = 1000 # < How many index entries to remove at a time
ebus.on('cryotank:puts', self._onData)
self._worker.start()
def _onfini():
self._workq.done()
self._worker.join(self.MAX_WAIT_S)
self._dbenv.close()
ebus.onfini(_onfini)
def _onData(self, unused):
'''
Wake up the index worker if he already doesn't have a reason to be awake
'''
if 0 == len(self._workq):
self._workq.put((None, lambda: None, None, None))
def _removeSome(self):
'''
Make some progress on removing deleted indices
'''
left = self._remove_chunk_sz
for iid in self._meta.deleting:
if not left:
break
iid_enc = _iid_en(iid)
with self._dbenv.begin(db=self._idxtbl, buffers=True, write=True) as txn, txn.cursor() as curs:
if curs.set_range(iid_enc):
for k, offset_enc in curs.iternext():
if k[:len(iid_enc)] != iid_enc:
break
if not curs.delete():
raise s_exc.CorruptDatabase('delete failure') # pragma: no cover
txn.delete(offset_enc, iid_enc, db=self._normtbl)
left -= 1
if not left:
break
if not left:
break
self._meta.markDeleteComplete(iid)
def _normalize_records(self, raw_records):
'''
Yield stream of normalized fields
Args:
raw_records(Iterable[Tuple[int, Dict[int, str]]]) generator of tuples of offset/decoded raw cryotank
record
Returns:
Iterable[Tuple[int, int, Union[str, int]]]: generator of tuples of offset, index ID, normalized property
value
'''
for offset, record in raw_records:
self._next_offset = offset + 1
dp = s_datapath.initelem(s_msgpack.un(record))
for iid, idx in ((k, v) for k, v in self._meta.indices.items() if not self._meta.asleep[k]):
if self._meta.progresses[iid]['nextoffset'] > offset:
continue
try:
self._meta.progresses[iid]['nextoffset'] = offset + 1
for datapath in idx.datapaths:
field = dp.valu(datapath)
if field is None:
continue
# TODO : what to do with subprops?
break
else:
# logger.debug('Datapaths %s yield nothing for offset %d',
# [d.path for d in idx.datapaths], offset)
continue
normval, _ = s_datamodel.getTypeNorm(idx.syntype, field)
except (s_exc.NoSuchType, s_exc.BadTypeValu):
# logger.debug('Norm fail', exc_info=True)
self._meta.progresses[iid]['nnormfail'] += 1
continue
self._meta.progresses[iid]['ngood'] += 1
yield offset, iid, normval
def _writeIndices(self, rows):
'''
Persist actual indexing to disk
Args:
rows(Iterable[Tuple[int, int, Union[str, int]]]): generators of tuples of offset, index ID, normalized
property value
Returns:
int: the next cryotank offset that should be indexed
'''
count = -1
with self._dbenv.begin(db=self._idxtbl, buffers=True, write=True) as txn:
for count, (offset, iid, normval) in enumerate(rows):
offset_enc = _Int64be.pack(offset)
iid_enc = _iid_en(iid)
valkey_enc = s_lmdb.encodeValAsKey(normval)
txn.put(iid_enc + valkey_enc, offset_enc)
txn.put(offset_enc + iid_enc, s_msgpack.en(normval), db=self._normtbl)
self._meta.persist(progressonly=True, txn=txn)
return count + 1
def _workerloop(self):
'''
Actually do the indexing
Runs as separate thread.
'''
stillworktodo = True
last_callback = 'None'
while True:
# Run the outstanding commands
recalc = False
while True:
try:
job = self._workq.get(timeout=0 if stillworktodo else None)
stillworktodo = True
retn, callback, args, kwargs = job
try:
if retn is not None:
last_callback = callback.__name__
retn.retn(callback(*args, **kwargs))
recalc = True
except Exception as e:
if retn is None:
raise
else:
# Not using errx because I want the exception object itself
retn.retn(e)
except s_exc.IsFini:
return
except s_exc.TimeOut:
break
if recalc:
# Recalculate the next offset to index, since we may have a new index
self._next_offset = self._meta.lowestProgress()
record_tuples = self.cryotank.rows(self._next_offset, self._chunk_sz)
norm_gen = self._normalize_records(record_tuples)
rowcount = self._writeIndices(norm_gen)
self._removeSome()
if not rowcount and not self._meta.deleting:
if stillworktodo is True:
self.cryotank.fire('cryotank:indexer:noworkleft:' + last_callback)
last_callback = 'None'
stillworktodo = False
else:
stillworktodo = True
@_inWorker
def addIndex(self, prop, syntype, datapaths):
'''
Add an index to the cryotank
Args:
prop (str): the name of the property this will be stored as in the normalized record
syntype (str): the synapse type this will be interpreted as
datapaths(Iterable[str]): datapath specs against which the raw record is run to extract a single field
that is passed to the type normalizer. These will be tried in order until one succeeds. At least one
must be present.
Returns:
None
Note:
Additional datapaths will only be tried if prior datapaths are not present, and *not* if
the normalization fails.
'''
return self._meta.addIndex(prop, syntype, datapaths)
@_inWorker
def delIndex(self, prop):
'''
Delete an index
Args:
prop (str): the (normalized) property name
Returns:
None
'''
return self._meta.delIndex(prop)
@_inWorker
def pauseIndex(self, prop=None):
'''
Temporarily stop indexing one or all indices.
Args:
prop: (Optional[str]): the index to stop indexing, or if None, indicate to stop all indices
Returns:
None
Note:
Pausing is not persistent. Restarting the process will resume indexing.
'''
return self._meta.pauseIndex(prop)
@_inWorker
def resumeIndex(self, prop=None):
'''
Undo a pauseIndex
Args:
prop: (Optional[str]): the index to start indexing, or if None, indicate to resume all indices
Returns:
None
'''
return self._meta.resumeIndex(prop)
@_inWorker
def getIndices(self):
'''
Get information about all the indices
Args:
None
Returns:
List[Dict[str: Any]]: all the indices with progress and statistics
'''
idxs = {iid: dict(metaentry.asdict()) for iid, metaentry in self._meta.indices.items()}
for iid in idxs:
idxs[iid].update(self._meta.progresses.get(iid, {}))
return list(idxs.values())
def _iterrows(self, prop, valu, exact=False):
'''
Query against an index.
Args:
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversly, if False, indicates a prefix match.
Returns:
Iterable[Tuple[int, bytes, bytes, lmdb.Transaction]: a generator of a Tuple of the offset, the encoded
offset, the encoded index ID, and the LMDB read transaction.
Note:
Ordering of Tuples disregard everything after the first 128 bytes of a property.
'''
iid = self._meta.iidFromProp(prop)
if iid is None:
raise s_exc.NoSuchIndx(mesg='No such index', index=prop)
iidenc = _iid_en(iid)
islarge = valu is not None and isinstance(valu, str) and len(valu) >= s_lmdb.LARGE_STRING_SIZE
if islarge and not exact:
raise s_exc.BadOperArg(mesg='prefix search valu cannot exceed 128 characters')
if islarge and exact:
key = iidenc + s_lmdb.encodeValAsKey(valu)
elif valu is None:
key = iidenc
else:
key = iidenc + s_lmdb.encodeValAsKey(valu, isprefix=not exact)
with self._dbenv.begin(db=self._idxtbl, buffers=True) as txn, txn.cursor() as curs:
if exact:
rv = curs.set_key(key)
else:
rv = curs.set_range(key)
if not rv:
return
while True:
rv = []
curkey, offset_enc = curs.item()
if (not exact and not curkey[:len(key)] == key) or (exact and curkey != key):
return
offset = _Int64be.unpack(offset_enc)[0]
yield (offset, offset_enc, iidenc, txn)
if not curs.next():
return
def queryNormValu(self, prop, valu=None, exact=False):
'''
Query for normalized individual property values
Args:
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered a prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversely, if False, indicates a prefix match.
Returns:
Iterable[Tuple[int, Union[str, int]]]: A generator of offset, normalized value tuples.
'''
if not exact and valu is not None and isinstance(valu, str) and len(valu) >= s_lmdb.LARGE_STRING_SIZE:
raise s_exc.BadOperArg(mesg='prefix search valu cannot exceed 128 characters')
for (offset, offset_enc, iidenc, txn) in self._iterrows(prop, valu, exact):
rv = txn.get(bytes(offset_enc) + iidenc, None, db=self._normtbl)
if rv is None:
raise s_exc.CorruptDatabase('Missing normalized record') # pragma: no cover
yield offset, s_msgpack.un(rv)
def queryNormRecords(self, prop, valu=None, exact=False):
'''
Query for normalized property values grouped together in dicts
Args:
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered a prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversely, if False, indicates a prefix match.
Returns:
Iterable[Tuple[int, Dict[str, Union[str, int]]]]: A generator of offset, dictionary tuples
'''
if not exact and valu is not None and isinstance(valu, str) and len(valu) >= s_lmdb.LARGE_STRING_SIZE:
raise s_exc.BadOperArg(mesg='prefix search valu cannot exceed 128 characters')
for offset, offset_enc, _, txn in self._iterrows(prop, valu, exact):
norm = {}
olen = len(offset_enc)
with txn.cursor(db=self._normtbl) as curs:
if not curs.set_range(offset_enc):
raise s_exc.CorruptDatabase('Missing normalized record') # pragma: no cover
while True:
curkey, norm_enc = curs.item()
if curkey[:olen] != offset_enc:
break
iid = _iid_un(curkey[olen:])
# this is racy with the worker, but it is still safe
idx = self._meta.indices.get(iid)
if idx is not None:
norm[idx.propname] = s_msgpack.un(norm_enc)
if not curs.next():
break
yield offset, norm
def queryRows(self, prop, valu=None, exact=False):
'''
Query for raw (i.e. from the cryotank itself) records
Args:
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered a prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversely, if False, indicates a prefix match.
Returns:
Iterable[Tuple[int, bytes]]: A generator of tuple (offset, messagepack encoded) raw records
'''
if not exact and valu is not None and isinstance(valu, str) and len(valu) >= s_lmdb.LARGE_STRING_SIZE:
raise s_exc.BadOperArg(mesg='prefix search valu cannot exceed 128 characters')
for offset, _, _, txn in self._iterrows(prop, valu, exact):
yield next(self.cryotank.rows(offset, 1))
| apache-2.0 | -7,920,170,919,440,636,000 | 35.214912 | 120 | 0.568669 | false |
mdhaman/superdesk-core | tests/es_utils_test.py | 1 | 2861 |
from superdesk.tests import TestCase
from superdesk import es_utils
class ESUtilsTestCase(TestCase):
def test_filter2query(self):
"""Check that a saved_searches style filter is converted correctly to Elastic Search DSL"""
filter_ = {"query": {"spike": "exclude", "notgenre": '["Article (news)"]'}}
expected = {
"query": {
"bool": {"must": [], "must_not": [{"term": {"state": "spiked"}}, {"term": {"package_type": "takes"}}]}
},
"post_filter": {"bool": {"must": [], "must_not": [{"terms": {"genre.name": ["Article (news)"]}}]}},
}
with self.app.app_context():
__, query = es_utils.filter2query(filter_)
self.assertEqual(query, expected)
def test_filter2query_date(self):
"""Check that date a converted correctly to Elastic Search DSL"""
filter_ = {
"query": {
"spike": "exclude",
"firstcreatedfrom": "now-1M/M",
"firstcreatedto": "now-1M/M",
"firstpublished": "last_day",
"versioncreatedfrom": "01/02/2018",
"versioncreatedto": "11/12/2018",
}
}
expected = {
"query": {
"bool": {"must": [], "must_not": [{"term": {"state": "spiked"}}, {"term": {"package_type": "takes"}}]}
},
"post_filter": {
"bool": {
"must": [
{
"range": {
"firstcreated": {"lte": "now-1M/M", "gte": "now-1M/M"},
"versioncreated": {
"lte": "2018-12-11T00:00:00+01:00",
"gte": "2018-02-01T23:59:59.999999+01:00",
},
"firstpublished": {"lte": "now-1d/d", "gte": "now-1d/d"},
}
}
],
"must_not": [],
}
},
}
with self.app.app_context():
__, query = es_utils.filter2query(filter_)
self.assertEqual(query, expected)
def test_filter2query_ingest_provider(self):
"""Check that ingest provider is handler correctly"""
filter_ = {
"query": {
"repo": "ingest",
"ingest_provider": "5c505c8f0d6f137d69cebc99",
"spike": "exclude",
"params": "{}",
}
}
expected = {'bool': {'must': [{'term': {'ingest_provider': '5c505c8f0d6f137d69cebc99'}}], 'must_not': []}}
with self.app.app_context():
__, query = es_utils.filter2query(filter_)
self.assertEqual(query['post_filter'], expected)
| agpl-3.0 | 1,550,914,024,326,518,300 | 37.146667 | 118 | 0.431667 | false |
Tufin/pytos | pytos/securetrack/xml_objects/rest/nat_rules.py | 1 | 13490 | import logging
from pytos.common.base_types import XML_List, XML_Object_Base, Comparable
from pytos.common.definitions.xml_tags import Elements
from pytos.common.logging.definitions import XML_LOGGER_NAME
from pytos.common.functions import get_xml_int_value, get_xml_text_value, get_xml_node, create_tagless_xml_objects_list, str_to_bool
from pytos.securetrack.xml_objects.base_types import Base_Object
logger = logging.getLogger(XML_LOGGER_NAME)
class NatRules(XML_List):
def __init__(self, nat_rules):
super().__init__(Elements.NAT_RULES, nat_rules)
@classmethod
def from_xml_node(cls, xml_node):
rules = []
for nat_rule in xml_node.iter(tag=Elements.NAT_RULE):
rules.append(NatRule.from_xml_node(nat_rule))
return cls(rules)
class NatRule(XML_Object_Base, Comparable):
def __init__(self, binding ,num_id, order, uid, auto_nat, disabled, dst_nat_method, enable_net4tonet6, enable_route_lookup,
orig_dst_network, orig_service, orig_src_network, egress_interface, rule_number, service_nat_method,
src_nat_method, translated_service, translated_dst_network, translated_src_network, nat_type):
self.binding = binding
self.id = num_id
self.order = order
self.uid = uid
self.autoNat = auto_nat
self.disabled = disabled
self.dstNatMethod = dst_nat_method
self.enable_net4tonet6 = enable_net4tonet6
self.enable_route_lookup = enable_route_lookup
self.orig_dst_network = orig_dst_network
self.orig_service = orig_service
self.orig_src_network = orig_src_network
self.egress_interface = egress_interface
self.ruleNumber = rule_number
self.serviceNatMethod = service_nat_method
self.srcNatMethod = src_nat_method
self.translated_service = translated_service
self.translated_dst_network = translated_dst_network
self.translated_src_network = translated_src_network
self.type = nat_type
super().__init__(Elements.NAT_RULE)
def _key(self):
hash_keys = [self.id, self.uid]
if self.binding:
try:
hash_keys.append(self.binding.uid)
except AttributeError:
pass
return tuple(hash_keys)
def __str__(self):
return "ORIGINAL: (src={} dst={} srv={}); TRANSLATED: (src={} dst={} srv={})".format(
self.orig_src_network,
self.orig_dst_network,
self.orig_service,
self.translated_src_network,
self.translated_dst_network,
self.translated_service
)
def is_enabled(self):
return str_to_bool(self.disabled)
@classmethod
def from_xml_node(cls, xml_node):
num_id = get_xml_int_value(xml_node, Elements.ID)
order = get_xml_text_value(xml_node, Elements.ORDER)
uid = get_xml_text_value(xml_node, Elements.UID)
auto_nat = get_xml_text_value(xml_node, Elements.AUTONAT)
disabled = get_xml_text_value(xml_node, Elements.DISABLED)
dst_nat_method = get_xml_text_value(xml_node, Elements.DST_NAT_METHOD)
enable_net4tonet6 = get_xml_text_value(xml_node, Elements.ENABLE_NET_4_TO_NET_6)
enable_route_lookup = get_xml_text_value(xml_node, Elements.ENABLE_ROUTE_LOOKUP)
rule_number = get_xml_text_value(xml_node, Elements.RULENUMBER)
service_nat_method = get_xml_text_value(xml_node, Elements.SERVICENATMETHOD)
src_nat_method = get_xml_text_value(xml_node, Elements.SRCNATMETHOD)
nat_type = get_xml_text_value(xml_node, Elements.TYPE)
binding = create_tagless_xml_objects_list(xml_node, Elements.BINDING, NatRuleBinding)[0]
orig_dst_network = create_tagless_xml_objects_list(xml_node, Elements.ORIG_DST_NETWORK, OrigDstNetwork)[0]
orig_service = create_tagless_xml_objects_list(xml_node, Elements.ORIG_SERVICE, OrigService)[0]
orig_src_network = create_tagless_xml_objects_list(xml_node, Elements.ORIG_SRC_NETWORK, OrigSrcNetwork)[0]
egress_interface_node = get_xml_node(xml_node, Elements.ENGRESS_INTERFACE)
egress_interface = EgressInterface.from_xml_node(egress_interface_node) if egress_interface_node else None
translated_service = create_tagless_xml_objects_list(xml_node, Elements.TRANSLATED_SERVICE, TranslatedService)[0]
translated_dst_network = create_tagless_xml_objects_list(xml_node, Elements.TRANSLATED_DST_NETWORK, TranslatedDstNetwork)[0]
translated_src_network = create_tagless_xml_objects_list(xml_node, Elements.TRANSLATED_SRC_NETWORK, TranslatedSrcNetwork)[0]
return cls(binding ,num_id, order, uid, auto_nat, disabled, dst_nat_method, enable_net4tonet6, enable_route_lookup,
orig_dst_network, orig_service, orig_src_network, egress_interface, rule_number, service_nat_method,
src_nat_method, translated_service, translated_dst_network, translated_src_network, nat_type)
class NatRuleBinding(XML_Object_Base):
def __init__(self, default, postnat_iface, prenat_iface, rule_count, security_rule_count, uid):
self.default = default
self.postnat_iface = postnat_iface
self.prenat_iface = prenat_iface
self.rule_count = rule_count
self.security_rule_count = security_rule_count
self.uid = uid
super().__init__(Elements.BINDING)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
default = get_xml_text_value(xml_node, Elements.DEFAULT)
postnat_iface = get_xml_text_value(xml_node, Elements.POSTNAT_IFACE)
prenat_iface = get_xml_text_value(xml_node, Elements.PRENAT_IFACE)
rule_count = get_xml_text_value(xml_node, Elements.RULE_COUNT)
security_rule_count = get_xml_text_value(xml_node, Elements.SECURITY_RULE_COUNT)
uid = get_xml_text_value(xml_node, Elements.UID)
return cls(default, postnat_iface, prenat_iface, rule_count, security_rule_count, uid)
class OrigDstNetwork(Base_Object):
def __init__(self, id, uid, display_name, name):
super().__init__(Elements.ORIG_DST_NETWORK, name, display_name, id, uid)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
return cls(id, uid, display_name, name)
class OrigService(Base_Object):
def __init__(self, id, uid, display_name, name):
super().__init__(Elements.DST_SERVICE, name, display_name, id, uid)
@classmethod
def from_xml_node(cls, xml_node):
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
return cls(id, uid, display_name, name)
class OrigSrcNetwork(Base_Object):
def __init__(self, id, uid, display_name, name):
super().__init__(Elements.ORIG_SRC_NETWORK, name, display_name, id, uid)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
return cls(id, uid, display_name, name)
class TranslatedService(Base_Object):
def __init__(self, id, uid, display_name, name):
super().__init__(Elements.TRANSLATED_SERVICE, name, display_name, id, uid)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
return cls(id, uid, display_name, name)
class TranslatedSrcNetwork(Base_Object):
def __init__(self, id, uid, display_name, name):
super().__init__(Elements.TRANSLATED_SRC_NETWORK, name, display_name, id, uid)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
return cls(id, uid, display_name, name)
class TranslatedDstNetwork(Base_Object):
def __init__(self, id, uid, display_name, name, dm_inline_members):
super().__init__(Elements.TRANSLATED_DST_NETWORK, name, display_name, id, uid)
if dm_inline_members is not None:
self.dm_inline_members = dm_inline_members
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
dm_inline_members_node = get_xml_node(xml_node, Elements.DM_INLINE_MEMBRES, True)
if dm_inline_members_node:
dm_inline_members = XML_List.from_xml_node_by_tags(xml_node, Elements.DM_INLINE_MEMBRES, Elements.MEMBER,
DmInlineMember)
else:
dm_inline_members = None
return cls(id, uid, display_name, name, dm_inline_members)
class DmInlineMember(Base_Object):
def __init__(self, id, uid, display_name, name):
super().__init__(Elements.MEMBER, name, display_name, id, uid)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
return cls(id, uid, display_name, name)
class EgressInterface(XML_Object_Base):
def __init__(self, name, id, direction, device_id, acl_name, is_global, interface_ips):
self.name = name
self.id = id
self.direction = direction
self.device_id = device_id
self.acl_name = acl_name
self.is_global = is_global
self.interface_ips = interface_ips
super().__init__(Elements.ENGRESS_INTERFACE)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
name = get_xml_text_value(xml_node, Elements.NAME)
id = get_xml_int_value(xml_node, Elements.ID)
direction = get_xml_text_value(xml_node, Elements.DIRECTION)
device_id = get_xml_text_value(xml_node, Elements.DEVICE_ID)
acl_name = get_xml_text_value(xml_node, Elements.ACL_NAME)
is_global = get_xml_text_value(xml_node, Elements.GLOBAL)
interface_ips_node = get_xml_node(xml_node, Elements.INTERFACE_IPS, True)
if interface_ips_node:
interface_ips = XML_List.from_xml_node_by_tags(xml_node, Elements.INTERFACE_IPS, Elements.INTERFACE_IP,
NatInterfaceIP)
else:
interface_ips = None
return cls(name, id, direction, device_id, acl_name, is_global, interface_ips)
class NatInterfaceIP(XML_Object_Base):
def __init__(self, ip, netmask):
self.ip = ip
self.netmask = netmask
super().__init__(Elements.INTERFACE_IP)
@classmethod
def from_xml_node(cls, xml_node):
ip = get_xml_text_value(xml_node, Elements.IP)
netmask = get_xml_text_value(xml_node, Elements.NETMASK)
return cls(ip, netmask) | apache-2.0 | -5,468,314,753,821,417,000 | 43.820598 | 132 | 0.646256 | false |
Texju/DIT-Machine_Learning | Codes/validation.py | 1 | 1983 | # -*- coding: utf-8 -*-
"""
Created on Wed May 24 15:00:15 2017
@author: Julien Couillard & Jean Thevenet
"""
from sklearn import cross_validation
from sklearn import metrics
import numpy
class MLValidation:
"""This class calculates the preciseness of our tree against a set of data"""
def __init__(self, tree):
self.__tree = tree
self.__targets = []
self.__predictions = []
def test(self, data):
""" Testing the model """
# Train our model
instances_train, target_train = self.__tree.prepareData(data.Training)
self.__tree.Tree.fit(instances_train, target_train)
# Test the model
instances_test, target_test = self.__tree.prepareData(data.Testing)
self.__targets = target_test
#Use the model to make predictions for the test set queries
self.__predictions = self.__tree.Tree.predict(instances_test)
def test_KFoldCrossValidation(self, data, k):
instances_train, target_train = self.__tree.prepareData(data.Raw)
scores=cross_validation.cross_val_score(self.__tree.Tree, instances_train, target_train, cv=k)
return scores
def testNaiveAlwaysYes(self, data):
""" Test our targets against a matrix that always return - 50000"""
self.test(data)
self.__predictions[:] = " - 50000."
def confusionMatrix(self):
if len(self.__predictions) != 0:
return metrics.confusion_matrix(self.__targets, self.__predictions)
def accuracy(self):
return metrics.accuracy_score(self.__targets, self.__predictions, normalize=True)
def accuracy_harmonic(self):
t = self.__targets.replace(" - 50000.","yes")
t = t.replace(" 50000+.","no")
p = numpy.copy(self.__predictions)
p[p == " - 50000."] = "yes"
p[p == " 50000+."] = "no"
return metrics.f1_score(t, p, pos_label="yes") | gpl-3.0 | -9,109,253,405,910,331,000 | 32.627119 | 102 | 0.599597 | false |
brandonw/photo-album | photo_album/rotate_and_thumbs.py | 1 | 1245 | import os, sys
from PIL import Image, ExifTags
size = (128, 128)
for infile in os.listdir(sys.argv[1]):
inpath = os.path.join(sys.argv[1], infile)
pieces = os.path.splitext(inpath)
outpath = pieces[0] + ".thumb" + pieces[1]
if (inpath != outpath and not os.path.exists(outpath) and
'thumb' not in infile):
try:
image = Image.open(inpath)
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
e = image._getexif()
if e is not None:
exif = dict(e.items())
if orientation in exif:
if exif[orientation] == 3:
image=image.transpose(Image.ROTATE_180)
elif exif[orientation] == 6:
image = image.transpose(Image.ROTATE_270)
elif exif[orientation] == 8:
image = image.transpose(Image.ROTATE_90)
image.save(inpath)
image.thumbnail(size, Image.ANTIALIAS)
image.save(outpath, 'JPEG')
except IOError as ex:
print('cannot create thumbnail for ' + infile + ' -- ' + ex.strerror)
| bsd-3-clause | 8,823,710,597,784,660,000 | 36.727273 | 81 | 0.526104 | false |
innovation-cat/DeepLearningBook | cifar10 classification/py3/softmax.py | 1 | 4468 | # coding: utf-8
#
# softmax.py
#
# Author: Huang Anbu
# Date: 2017.3
#
# Description: Implementation of softmax classification
#
# Copyright©2017. All Rights Reserved.
# ===============================================================================================
from __future__ import print_function, division
from basiclib import *
# 模型构建
class SoftmaxLayer:
def __init__ (self, input, n_input, n_output):
self.input = input
self.n_input = n_input
self.n_output = n_output
# 权重参数初始化
self.W = theano.shared(
value = numpy.zeros(shape=(n_input, n_output)).astype(theano.config.floatX), name = "W", borrow = True
)
self.b = theano.shared(
value = numpy.zeros(shape=(n_output, )).astype(theano.config.floatX), name = 'b', borrow = True
)
self.params = [self.W, self.b]
# 输出矩阵
self.p_y_given_x = T.nnet.softmax(T.dot(self.input, self.W)+self.b)
# 预测值
self.p_pred = T.argmax(self.p_y_given_x, axis=1)
def cross_entropy(self, y):
# 交叉熵损失函数
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def get_cost_updates(self, y, lr, reg, optimizer_fun):
cost = self.cross_entropy(y) + 0.5*reg*((self.W**2).sum())
try:
updates = optimizer_fun(cost, self.params, lr)
except:
print("Error: no optimizer function")
else:
return (cost, updates)
def error_rate(self, y):
# 错误率
return T.mean(T.neq(self.p_pred, y))
if __name__ == "__main__":
# 读取输入数据
train_x, train_y = load_cifar10_dataset(r"./dataset/cifar-10-batches-py/data_batch_*")
test_x, test_y = load_cifar10_dataset(r"./dataset/cifar-10-batches-py/test_batch")
train_x = train_x / 255.0
test_x = test_x / 255.0
train_set_size, col = train_x.shape
test_set_size, _ = test_x.shape
# 设置tensor变量
x = T.matrix('x').astype(theano.config.floatX)
y = T.ivector('y')
index = T.iscalar('index')
lr = T.scalar('lr', dtype=theano.config.floatX)
reg = T.scalar('reg', dtype=theano.config.floatX)
batch_size = options['batch_size']
n_train_batch = train_set_size//batch_size
n_test_batch = test_set_size//batch_size
model = SoftmaxLayer(x, col, options['n_output'])
cost, updates = model.get_cost_updates(y, lr, reg, optimizer[options["optimizer"]])
# 构建训练函数
train_model = theano.function(inputs = [x, y, lr, reg], outputs = cost, updates = updates)
# 构建测试函数
train_err = theano.function(inputs = [x, y, lr, reg], outputs = model.error_rate(y), on_unused_input = 'ignore')
test_err = theano.function(inputs = [x, y, lr, reg], outputs = model.error_rate(y), on_unused_input = 'ignore')
idx = numpy.arange(train_set_size)
train_num = 0
best_err = 1.0
error_output = open("softmax.txt", "w")
with open("model_softmax.npz", "wb") as fout:
for epoch in range(options["n_epoch"]):
numpy.random.shuffle(idx)
new_train_x = [train_x[i] for i in idx]
new_train_y = [train_y[i] for i in idx]
for n_batch_index in range(n_train_batch):
c = train_model(
new_train_x[n_batch_index*batch_size:(n_batch_index+1)*batch_size],
new_train_y[n_batch_index*batch_size:(n_batch_index+1)*batch_size],
0.0001, 0.0
)
train_num = train_num + 1
if train_num%options["print_freq"]==0:
print("train num: %d, cost: %lf"%(train_num, c))
if train_num%options["valid_freq"]==0:
train_errors = [train_err(train_x[n_train_index*batch_size:(n_train_index+1)*batch_size], train_y[n_train_index*batch_size:(n_train_index+1)*batch_size], 0.00000001, 0.0) for n_train_index in range(n_train_batch)]
test_errors = [test_err(test_x[n_test_index*batch_size:(n_test_index+1)*batch_size], test_y[n_test_index*batch_size:(n_test_index+1)*batch_size], 0.00000001, 0.0) for n_test_index in range(n_test_batch)]
if numpy.mean(test_errors) < best_err:
best_err = numpy.mean(test_errors)
params = dict([(p.name, p.get_value()) for p in model.params])
numpy.savez(fout, params)
print("train num: %d, best train error: %lf, best test error: %lf"%(train_num, numpy.mean(train_errors), numpy.mean(test_errors)))
print("epoch %d end" % epoch)
test_errors = [test_err(test_x[n_test_index*batch_size:(n_test_index+1)*batch_size], test_y[n_test_index*batch_size:(n_test_index+1)*batch_size], 0.00000001, 0.0) for n_test_index in range(n_test_batch)]
print("%lf" % numpy.mean(test_errors), file=error_output) | mit | -4,965,939,120,042,824,000 | 33.393701 | 218 | 0.63499 | false |
Thierry46/CalcAl | database/DatabaseReaderFactory.py | 1 | 3131 | # -*- coding: utf-8 -*-
"""
************************************************************************************
Class : DatabaseReaderFactory
Author : Thierry Maillard (TMD)
Date : 13/6/2016 - 17/12/2016
Role : Return a database reader according parameters.
Licence : GPLv3
Copyright (c) 2016 - Thierry Maillard
This file is part of CalcAl project.
CalcAl project is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CalcAl project is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CalcAl project. If not, see <http://www.gnu.org/licenses/>.
************************************************************************************
"""
import os.path
class DatabaseReaderFactory():
""" Class used to get a database reader using the design pattern Factory.
"""
# Decorator to use method getValueFormatedStatic()
# without instantiating class : static method
@staticmethod
def getInstance(configApp, dirProject, typeDatabase, connDB, dbname):
""" return a database reader """
databaseReader = None
# Test if reader plugin exists
readerBaseName = typeDatabase
if typeDatabase.startswith("Ciqual"):
readerBaseName = "Ciqual"
readersPath = os.path.join(dirProject,
configApp.get("Resources", "ReadersDir"),
readerBaseName + "_Reader.py")
if not os.path.isfile(readersPath):
raise ImportError(_("Reader for base") + " " + typeDatabase + " " + _("not found") +
" !\n" + _("Please contact support team") + " : " +
configApp.get("Version", "EmailSupport1") + " " + _("or") + " " +
configApp.get("Version", "EmailSupport2"))
if typeDatabase.startswith("Ciqual"):
from . import Ciqual_Reader
databaseReader = Ciqual_Reader.Ciqual_Reader(configApp, dirProject,
connDB, dbname, typeDatabase)
elif typeDatabase == "USDA_28":
from . import USDA_28_Reader
databaseReader = USDA_28_Reader.USDA_28_Reader(configApp, dirProject,
connDB, dbname)
else:
raise ImportError(_("Reader for base") + " " + typeDatabase + " " +
_("can not be loaded") +
" !\n" + _("Please contact support team") + " : " +
configApp.get("Version", "EmailSupport1") + " " + _("or") + " " +
configApp.get("Version", "EmailSupport2"))
return databaseReader
| gpl-3.0 | 1,505,146,703,561,911,800 | 44.376812 | 96 | 0.554136 | false |
sthesing/Podstatty | db.py | 1 | 4858 | # -*- coding: utf8 -*-
## Copyright (c) 2013 Stefan Thesing
##
##This file is part of Podstatty.
##
##Podstatty is free software: you can redistribute it and/or modify
##it under the terms of the GNU General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##Podstatty is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU General Public License for more details.
##
##You should have received a copy of the GNU General Public License
##along with Podstatty. If not, see http://www.gnu.org/licenses/.
from storm.locals import Storm, Int, Unicode, ReferenceSet
import requests
class Db:
"""
A class intended to provide handy control over the database.
"""
def __init__(self, store, base_url):
self.store = store
self.base_url = base_url
def add_file(self, filename, exclude_strings):
"""
Processes a prepared logfile and stores the data into the
database.
"""
log = open(filename)
date = filename.split("access_log_")[1]
date = date.replace("_filtered.txt", "")
if self.store.find(Stats, Stats.date_time_string == unicode(date)).count():
print "A logfile for this date has already been processed."
return None
stats =[]
for line in log:
# In the settings file, users can specify strings that are
# used as filter criteria. If the line contains this string,
# it won't be processed.
# In the beginning, we assume the line will be processed.
line_shall_be_processed = True
# 'exclude_strings' is a list of the filter criteria.
# If the line contains one of those strings, the line will
# not be processed.
for string in exclude_strings:
if string in line:
line_shall_be_processed = False
if line_shall_be_processed:
split_line = line.split()
stat = Stats(unicode(split_line[0]), int(split_line[1]), unicode(date))
stats.append(stat)
urls = []
for stat in stats:
if not stat.url in urls:
urls.append(stat.url)
for url in urls:
new_stat = Stats(url, 0, unicode(date))
for stat in stats:
if stat.url == url:
new_stat.traffic = new_stat.traffic+stat.traffic
self.store.add(new_stat)
#check if all URLs are already in table "filesizes", if not,
#get the filesize and write it into that table
self.check_url(url)
self.store.flush()
self.store.commit()
def check_url(self, url):
"""
Checks if the filesize of the file found behind this url is
already stored in the database. If not, it tries to retrieve
the filesize by making a http HEAD request and stores it into
the database.
"""
#if the url is not yet in the "Filesizes" table
if not self.store.find(Filesizes, Filesizes.url == url).count():
# Get the filesize from the server
# TODO Implement error routine
r = requests.head(self.base_url + url)
# Files no longer present on the server are removed, for now.
# TODO Maybe add an "else"-condition here and ask the user what to do?
# What about files that are no longer there but you still want to
# have them in your statistics?
if not (r.status_code == 404):
size = int(r.headers['Content-Length'])
# Write the URL and it's filesize to database
self.store.add(Filesizes(url, size))
class Stats(Storm):
"""
The table containing the actual numbers
'CREATE TABLE stats (id INTEGER PRIMARY KEY, url VARCHAR,
traffic INTEGER, date_time_string VARCHAR)'
"""
__storm_table__ = "stats"
id = Int(primary=True)
url = Unicode()
traffic = Int()
date_time_string = Unicode()
def __init__(self, url, traffic, date_time_string):
self.url = url
self.traffic = traffic
self.date_time_string = date_time_string
class Filesizes(Storm):
"""
The table containing the filesizes for each URL
'CREATE TABLE filesizes (url VARCHAR PRIMARY KEY, filesize INTEGER)'
"""
__storm_table__ = "filesizes"
url = Unicode(primary=True)
filesize = Int()
def __init__(self, url, filesize):
self.url = url
self.filesize = filesize
| gpl-3.0 | -6,791,800,694,305,659,000 | 37.251969 | 87 | 0.594072 | false |
SabaFar/plc | examples/plc-ccphy-example.py | 1 | 4082 | # -*- Mode:Python; -*-
# /*
# * Copyright (c) 2010 INRIA
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# *
# * Authors: Alexander Schloegl <[email protected]>
# */
# Chase combining phy example
import ns.plc
import ns.core
import ns.spectrum
import ns.network
def startTx(phy,p):
phy.StartTx(p)
def sendRedundancy(phy):
phy.SendRedundancy()
def receiveSuccess(packet):
print "\n*** Packet received ***\n"
def main(dummy_argv):
## Enable logging
ns.core.LogComponentEnableAll(ns.core.LOG_PREFIX_TIME)
ns.core.LogComponentEnable('PLC_Phy', ns.core.LOG_LEVEL_FUNCTION)
# ns.core.LogComponentEnable('PLC_LinkPerformanceModel', ns.core.LOG_LEVEL_LOGIC)
# ns.core.LogComponentEnable('PLC_Interference', ns.core.LOG_LEVEL_LOGIC)
## Enable packet printing
ns.network.Packet.EnablePrinting()
## Define spectrum model
sm = ns.plc.PLC_SpectrumModelHelper().GetG3SpectrumModel()
## Define time model, mains frequency: 60Hz, OFDM symbol duration: 2240us
ns.plc.PLC_Time.SetTimeModel(60, ns.core.MicroSeconds(2240))
## Define transmit power spectral density
txPsd = ns.spectrum.SpectrumValue(sm)
txPsd += 1e-8;
## Create nodes
n1 = ns.plc.PLC_Node()
n2 = ns.plc.PLC_Node()
n1.SetPosition(0,0,0)
n2.SetPosition(1000,0,0)
n1.SetName('Node1')
n2.SetName('Node2')
nodes = [n1,n2]
## Create cable type
cable = ns.plc.PLC_NAYY50SE_Cable(sm)
## Link nodes
ns.plc.PLC_Line(cable,n1,n2)
## Setup channel
channelHelper = ns.plc.PLC_ChannelHelper()
channelHelper.Install(nodes)
channel = channelHelper.GetChannel()
## Create outlets
o1 = ns.plc.PLC_Outlet(n1)
o2 = ns.plc.PLC_Outlet(n2)
## Create PHYs
phy1 = ns.plc.PLC_ChaseCombiningPhy()
phy2 = ns.plc.PLC_ChaseCombiningPhy()
## Define RX/TX impedances
txImp = ns.plc.PLC_ConstImpedance(sm, 50)
rxImp = ns.plc.PLC_ConstImpedance(sm, 50)
## Create interfaces
phy1.CreateInterfaces(o1, txPsd, txImp, rxImp)
phy2.CreateInterfaces(o2, txPsd, txImp, rxImp)
## Set background noise
noiseFloor = ns.plc.PLC_ColoredNoiseFloor(-140,38.75,-0.72,sm).GetNoisePsd()
noiseFloor += 1e-7
phy1.SetNoiseFloor(noiseFloor)
phy2.SetNoiseFloor(noiseFloor)
## Set modulation and coding scheme
phy1.SetHeaderModulationAndCodingScheme(ns.plc.BPSK_1_2)
phy2.SetHeaderModulationAndCodingScheme(ns.plc.BPSK_1_2)
phy1.SetPayloadModulationAndCodingScheme(ns.plc.QAM64_16_21)
phy2.SetPayloadModulationAndCodingScheme(ns.plc.QAM64_16_21)
## Aggregate RX-Interfaces to ns3 nodes
phy1.GetRxInterface().AggregateObject(ns.network.Node())
phy2.GetRxInterface().AggregateObject(ns.network.Node())
## Set the function to be called after successful packet reception by phy2
phy2.SetReceiveSuccessCallback(receiveSuccess)
## Calculate channels
channel.InitTransmissionChannels()
channel.CalcTransmissionChannels()
## Create packet to send
p = ns.network.Packet(128)
## Schedule chase combining transmissions
ns.core.Simulator.Schedule(ns.core.Seconds(1), startTx, phy1, p)
for i in range(1,11):
ns.core.Simulator.Schedule(ns.core.Seconds(i), sendRedundancy, phy1)
## Start simulation
ns.core.Simulator.Run()
## Cleanup simulation
ns.core.Simulator.Destroy()
if __name__ == '__main__':
import sys
main(sys.argv)
| gpl-3.0 | -16,086,139,063,123,222 | 29.691729 | 84 | 0.695492 | false |
mquad/DNN_Lab_UPF | exercise/img_classification/logreg_raw.py | 1 | 4523 | # -*- coding: utf-8 -*-
# @Author: massimo
# @Date: 2016-03-10 17:09:49
# @Last Modified by: massimo
# @Last Modified time: 2016-03-11 15:54:15
import numpy as np
import theano
import theano.tensor as T
from collections import OrderedDict
import gzip
import cPickle as pkl # python 2.x
import time
def load_gz(gz_path):
dataset = pkl.load(gzip.open(gz_path, 'rb'))
return dataset
def floatX(arr):
return np.asarray(arr, dtype=theano.config.floatX)
def init_weights(shape, sigma=0.01, name=''):
if sigma == 0:
W_bound = np.sqrt(6. / (shape[0] + shape[1]))
return theano.shared(floatX(np.random.uniform(low=-W_bound, high=W_bound, size=shape)), borrow=True, name=name)
return theano.shared(floatX(np.random.randn(*shape) * sigma), borrow=True, name=name)
def softmax(x):
# TODO: WRITE YOUR DEFINITION OF SOFTMAX HERE
pass
# import the MNIST dataset
# it has been alredy split into train, validation and test sets
train_data, valid_data, test_data = load_gz('../../data/mnist.pkl.gz')
train_x, train_y = train_data[0], train_data[1].astype(np.int32)
valid_x, valid_y = valid_data[0], valid_data[1].astype(np.int32)
test_x, test_y = test_data[0], test_data[1].astype(np.int32)
# define our model parameters here
n_inputs = train_x.shape[1] # number of input features
n_classes = 10 # number of output classes (it depends on the task, 10 for MNIST)
learning_rate = 0.1 # learning rate used in Stochastic Gradient Descent
batch_size = 128 # number of samples per minibatch
epochs = 10 # number of training epochs (i.e. number of passes over the entire training set)
# compute the number of minibateches
num_train_batches = -(-train_x.shape[0] // batch_size)
num_valid_batches = -(-valid_x.shape[0] // batch_size)
num_test_batches = -(-test_x.shape[0] // batch_size)
np.random.seed(2**30)
####################################################################
# TODO: write the Theano code for the Logistic Regression classifier
# You have to do the following:
# 1) define the input variables as Theano Tensors
# 2) define weights as Theano SharedVariables
# 3) define the symbolic operation to compute the predicted class probability
# the predicted output class of Logistic Regression
# 4) define the categorical cross-entropy loss
# 5) compute the gradients of each parameter w.r.t. the cross-entropy loss
# 6) define the sgd updates
# 7) finally, define the loss, training and prediction functions (call them loss_fn, train_fn and pred_fn)
# PUT YOUR CODE HERE
############################################################
# This is a simple check for correctness of the entire model
# With an untrained model, predictions should be almost uniformly distributed over each class
# Thus the expected loss is -log(1/n_classes) = log(n_classes) ~ 2.3
# This simple check can reveal errors in your loss function or in other parts of your model (e.g, the softmax normalization,...)
expected = np.log(len(np.unique(train_y)))
actual = loss_fn(train_x, train_y)
print 'Expected initial loss: ', expected
print 'Actual initial loss: ', actual
# randomly shuffle the training data
shuffle_idx = np.random.permutation(train_x.shape[0])
train_x = train_x[shuffle_idx]
train_y = train_y[shuffle_idx]
print 'Training started'
t0 = time.time()
for e in range(epochs):
avg_cost = 0
for bidx in range(num_train_batches):
batch_x = train_x[bidx * batch_size: (bidx + 1) * batch_size]
batch_y = train_y[bidx * batch_size: (bidx + 1) * batch_size]
batch_cost = train_fn(batch_x, batch_y)
avg_cost += batch_cost
avg_cost /= num_train_batches
print 'Epoch: {} Loss: {:.8f}'.format(e + 1, avg_cost)
print 'Training completed in {:.2f} sec'.format(time.time() - t0)
# compute the validation accuracy (you should get values around 92%)
hits = 0
for bidx in range(num_valid_batches):
batch_x = valid_x[bidx * batch_size: (bidx + 1) * batch_size]
batch_y = valid_y[bidx * batch_size: (bidx + 1) * batch_size]
batch_y_pred = pred_fn(batch_x)
hits += np.sum(batch_y_pred == batch_y)
accuracy = np.float32(hits) / valid_y.shape[0]
print 'Valid. accuracy: {:.4f}'.format(accuracy)
# compute the test accuracy (you should get values around 92%)
hits = 0
for bidx in range(num_test_batches):
batch_x = test_x[bidx * batch_size: (bidx + 1) * batch_size]
batch_y = test_y[bidx * batch_size: (bidx + 1) * batch_size]
batch_y_pred = pred_fn(batch_x)
hits += np.sum(batch_y_pred == batch_y)
accuracy = np.float32(hits) / test_y.shape[0]
print 'Test. accuracy: {:.4f}'.format(accuracy)
| mit | 2,625,091,526,244,373,500 | 37 | 128 | 0.687749 | false |
ayepezv/GAD_ERP | addons/website_sale/models/product.py | 1 | 8355 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools, _
class ProductStyle(models.Model):
_name = "product.style"
name = fields.Char(string='Style Name', required=True)
html_class = fields.Char(string='HTML Classes')
class ProductPricelist(models.Model):
_inherit = "product.pricelist"
code = fields.Char(string='E-commerce Promotional Code')
class ProductPublicCategory(models.Model):
_name = "product.public.category"
_inherit = ["website.seo.metadata"]
_description = "Website Product Category"
_order = "sequence, name"
name = fields.Char(required=True, translate=True)
parent_id = fields.Many2one('product.public.category', string='Parent Category', index=True)
child_id = fields.One2many('product.public.category', 'parent_id', string='Children Categories')
sequence = fields.Integer(help="Gives the sequence order when displaying a list of product categories.")
# NOTE: there is no 'default image', because by default we don't show
# thumbnails for categories. However if we have a thumbnail for at least one
# category, then we display a default image on the other, so that the
# buttons have consistent styling.
# In this case, the default image is set by the js code.
image = fields.Binary(attachment=True, help="This field holds the image used as image for the category, limited to 1024x1024px.")
image_medium = fields.Binary(string='Medium-sized image', attachment=True,
help="Medium-sized image of the category. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views.")
image_small = fields.Binary(string='Small-sized image', attachment=True,
help="Small-sized image of the category. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required.")
@api.model
def create(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).create(vals)
@api.multi
def write(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).write(vals)
@api.constrains('parent_id')
def check_parent_id(self):
if not self._check_recursion():
raise ValueError(_('Error ! You cannot create recursive categories.'))
@api.multi
def name_get(self):
res = []
for category in self:
names = [category.name]
parent_category = category.parent_id
while parent_category:
names.append(parent_category.name)
parent_category = parent_category.parent_id
res.append((category.id, ' / '.join(reversed(names))))
return res
class ProductTemplate(models.Model):
_inherit = ["product.template", "website.seo.metadata", 'website.published.mixin', 'rating.mixin']
_order = 'website_published desc, website_sequence desc, name'
_name = 'product.template'
_mail_post_access = 'read'
website_message_ids = fields.One2many(
'mail.message', 'res_id',
domain=lambda self: ['&', ('model', '=', self._name), ('message_type', '=', 'comment')],
string='Website Comments',
)
website_description = fields.Html('Description for the website', sanitize=False, translate=True)
alternative_product_ids = fields.Many2many('product.template', 'product_alternative_rel', 'src_id', 'dest_id',
string='Suggested Products', help='Appear on the product page')
accessory_product_ids = fields.Many2many('product.product', 'product_accessory_rel', 'src_id', 'dest_id',
string='Accessory Products', help='Appear on the shopping cart')
website_size_x = fields.Integer('Size X', default=1)
website_size_y = fields.Integer('Size Y', default=1)
website_style_ids = fields.Many2many('product.style', string='Styles')
website_sequence = fields.Integer('Website Sequence', help="Determine the display order in the Website E-commerce",
default=lambda self: self._default_website_sequence())
public_categ_ids = fields.Many2many('product.public.category', string='Website Product Category',
help="Those categories are used to group similar products for e-commerce.")
availability = fields.Selection([
('empty', 'Display Nothing'),
('in_stock', 'In Stock'),
('warning', 'Warning'),
], "Availability", default='empty', help="This field is used to display a availability banner with a message on the ecommerce")
availability_warning = fields.Text("Availability Warning", translate=True)
def _default_website_sequence(self):
self._cr.execute("SELECT MIN(website_sequence) FROM %s" % self._table)
min_sequence = self._cr.fetchone()[0]
return min_sequence and min_sequence - 1 or 10
def set_sequence_top(self):
self.website_sequence = self.sudo().search([], order='website_sequence desc', limit=1).website_sequence + 1
def set_sequence_bottom(self):
self.website_sequence = self.sudo().search([], order='website_sequence', limit=1).website_sequence - 1
def set_sequence_up(self):
previous_product_tmpl = self.sudo().search(
[('website_sequence', '>', self.website_sequence), ('website_published', '=', self.website_published)],
order='website_sequence', limit=1)
if previous_product_tmpl:
previous_product_tmpl.website_sequence, self.website_sequence = self.website_sequence, previous_product_tmpl.website_sequence
else:
self.set_sequence_top()
def set_sequence_down(self):
next_prodcut_tmpl = self.search([('website_sequence', '<', self.website_sequence), ('website_published', '=', self.website_published)], order='website_sequence desc', limit=1)
if next_prodcut_tmpl:
next_prodcut_tmpl.website_sequence, self.website_sequence = self.website_sequence, next_prodcut_tmpl.website_sequence
else:
return self.set_sequence_bottom()
@api.multi
def _compute_website_url(self):
super(ProductTemplate, self)._compute_website_url()
for product in self:
product.website_url = "/shop/product/%s" % (product.id,)
@api.multi
def display_price(self, pricelist, qty=1, public=False, **kw):
self.ensure_one()
return self.product_variant_ids and self.product_variant_ids[0].display_price(pricelist, qty=qty, public=public) or 0
class Product(models.Model):
_inherit = "product.product"
@api.multi
def website_publish_button(self):
self.ensure_one()
return self.product_tmpl_id.website_publish_button()
@api.multi
def display_price(self, pricelist, qty=1, public=False, **kw):
self.ensure_one()
partner = self.env.user.partner_id
context = {
'pricelist': pricelist.id,
'quantity': qty,
'partner': partner
}
ret = self.env.user.has_group('sale.group_show_price_subtotal') and 'total_excluded' or 'total_included'
taxes = partner.property_account_position_id.map_tax(self.taxes_id)
return taxes.compute_all(public and self.lst_price or self.with_context(context).price, pricelist.currency_id, qty, product=self, partner=partner)[ret]
class ProductAttribute(models.Model):
_inherit = "product.attribute"
type = fields.Selection([('radio', 'Radio'), ('select', 'Select'), ('color', 'Color'), ('hidden', 'Hidden')], default='radio')
class ProductAttributeValue(models.Model):
_inherit = "product.attribute.value"
html_color = fields.Char(string='HTML Color Index', oldname='color', help="Here you can set a "
"specific HTML color index (e.g. #ff0000) to display the color on the website if the "
"attibute type is 'Color'.")
| gpl-3.0 | 7,746,958,657,846,388,000 | 47.017241 | 183 | 0.641412 | false |
zstackio/zstack-woodpecker | integrationtest/vm/vpc/test_vpc_ospf_stub_md5.py | 1 | 3923 | '''
@author: Hengguo Ge
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.vpc_operations as vpc_ops
import os
import time
from itertools import izip
VPC1_VLAN, VPC1_VXLAN = ['l3VlanNetwork2', "l3VxlanNetwork12"]
VPC2_VLAN, VPC2_VXLAN = ["l3VlanNetwork3", "l3VxlanNetwork13"]
vpc_l3_list = [(VPC1_VLAN, VPC1_VXLAN), (VPC2_VLAN, VPC2_VXLAN)]
vpc_name_list = ['vpc1', 'vpc2']
case_flavor = dict(vm1_l3_vlan_vm2_l3_vlan=dict(vm1l3=VPC1_VLAN, vm2l3=VPC2_VLAN),
vm1_l3_vxlan_vm2_l3_vxlan=dict(vm1l3=VPC1_VXLAN, vm2l3=VPC2_VXLAN),
vm1_l3_vlan_vm2_l3_vxlan=dict(vm1l3=VPC1_VLAN, vm2l3=VPC2_VXLAN),
)
ospf_area_id = '1.1.1.1'
ospf_area_type = 'Stub'
area_auth_type = 'MD5'
area_auth_password = '11111111'
md5_key_id = 10
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
vr_list = []
vpc1_l3_uuid = []
vpc2_l3_uuid = []
vr_uuid = []
public_net = 'public network'
def test():
flavor = case_flavor[os.environ.get('CASE_FLAVOR')]
test_util.test_dsc("create vpc vrouter and attach vpc l3 to vpc")
for vpc_name in vpc_name_list:
vr_list.append(test_stub.create_vpc_vrouter(vpc_name))
for vr, l3_list in izip(vr_list, vpc_l3_list):
test_stub.attach_l3_to_vpc_vr(vr, l3_list)
for vpc1_l3 in vpc_l3_list[0]:
vpc1_l3_uuid.append(test_lib.lib_get_l3_by_name(vpc1_l3).uuid)
for vpc2_l3 in vpc_l3_list[1]:
vpc2_l3_uuid.append(test_lib.lib_get_l3_by_name(vpc2_l3).uuid)
vpc1_l3_uuid.append(test_lib.lib_get_l3_by_name(public_net).uuid)
vpc2_l3_uuid.append(test_lib.lib_get_l3_by_name(public_net).uuid)
vm1, vm2 = [test_stub.create_vm_with_random_offering(vm_name='vpc_vm_{}'.format(name), l3_name=name) for name in
(flavor['vm1l3'], flavor['vm2l3'])]
[test_obj_dict.add_vm(vm) for vm in (vm1, vm2)]
[vm.check() for vm in (vm1, vm2)]
[test_stub.run_command_in_vm(vm.get_vm(), 'iptables -F') for vm in (vm1, vm2)]
test_util.test_dsc("disable snat before create ospf")
for vr in vr_list:
vpc_ops.set_vpc_vrouter_network_service_state(vr.inv.uuid, networkService='SNAT', state='disable')
test_util.test_dsc("test vm1 and vm2 connectivity without ospf")
test_stub.check_icmp_between_vms(vm1, vm2, expected_result='FAIL')
test_util.test_dsc("create ospf")
vpc_ops.create_vrouter_ospf_area(areaId=ospf_area_id, areaType=ospf_area_type, areaAuth=area_auth_type, password=area_auth_password, keyId=md5_key_id)
area_uuid = test_lib.lib_get_ospf_area_by_area_id(areaId=ospf_area_id).uuid
test_util.test_dsc("add vpc to ospf")
for vr in vr_list:
vr_uuid.append(vr.inv.uuid)
for vpc_l3 in vpc1_l3_uuid:
vpc_ops.add_vrouter_networks_to_ospf_area(vr_uuid[0], [vpc_l3], area_uuid)
for vpc_l3 in vpc2_l3_uuid:
vpc_ops.add_vrouter_networks_to_ospf_area(vr_uuid[1], [vpc_l3], area_uuid)
time.sleep(60)
test_util.test_dsc("check ospf neighbor state")
for vr in vr_uuid:
if vpc_ops.get_vrouter_ospf_neighbor(vr):
if 'Full' not in vpc_ops.get_vrouter_ospf_neighbor(vr)[0]['state']:
print vpc_ops.get_vrouter_ospf_neighbor(vr)[0]['state']
test_util.test_fail('cannot form ospf neighbor, test fail')
else:
test_util.test_fail('cannot form ospf neighbor, test fail')
test_util.test_dsc("test vm1 and vm2 connectivity with ospf")
test_stub.check_icmp_between_vms(vm1, vm2, expected_result='PASS')
test_lib.lib_error_cleanup(test_obj_dict)
test_stub.remove_all_vpc_vrouter()
test_stub.delete_all_ospf_area()
def env_recover():
test_lib.lib_error_cleanup(test_obj_dict)
test_stub.remove_all_vpc_vrouter()
test_stub.delete_all_ospf_area() | apache-2.0 | -9,100,616,342,536,122,000 | 35.333333 | 154 | 0.665817 | false |
poldracklab/cogat | cognitive/apps/atlas/forms.py | 1 | 18517 | from django import forms
from django.core.exceptions import ValidationError
from django.urls import reverse
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Div, Field, HTML, Layout, Reset, Submit
from cognitive.apps.atlas.query import Assertion, Disorder, Task, Battery, ConceptClass, Concept
import cognitive.apps.atlas.query as query
def set_field_html_name(cls, new_name):
"""
This creates wrapper around the normal widget rendering,
allowing for a custom field name (new_name).
"""
old_render = cls.widget.render
def _widget_render_wrapper(name, value, attrs=None):
return old_render(new_name, value, attrs)
cls.widget.render = _widget_render_wrapper
class TaskForm(forms.Form):
term_name = forms.CharField(required=True)
definition_text = forms.CharField(required=True)
class ConceptForm(forms.Form):
name = forms.CharField(required=True, label="Term:")
definition_text = forms.CharField(required=True, widget=forms.Textarea(),
label="Your Definition:")
concept_class = ConceptClass()
choices = [(x['id'], "-yes- " + str(x['name']))
for x in concept_class.all()]
choices.insert(0, (None, "-no-"))
cc_label = "In your opinion, does this concept belong to a larger class of concepts?"
concept_class = forms.ChoiceField(
choices=choices, label=cc_label, required=False)
def __init__(self, concept_id, *args, **kwargs):
if not args or not args[0].get('submit'):
concept = Concept()
con_class = concept.get_relation(concept_id, "CLASSIFIEDUNDER",
label="concept_class")
if con_class:
args[0]['concept_class'] = con_class[0]['id']
super(ConceptForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = reverse(
'update_concept', kwargs={'uid': concept_id, })
self.helper.layout = Layout(
Div(
Field('name'),
Field('definition_text'),
Field('concept_class'),
Submit('submit', 'Submit'),
Reset('concept-cancel', 'Cancel', type="reset"),
css_class="formline",
)
)
class ContrastForm(forms.Form):
name = forms.CharField(required=True)
description = forms.CharField(required=True)
class ConditionForm(forms.Form):
condition_text = forms.CharField(required=True)
condition_description = forms.CharField(required=True)
class WeightForm(forms.Form):
weight = forms.FloatField()
def __init__(self, cond_id, label, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.weight_name = cond_id
self.fields['weight'].label = label
set_field_html_name(self.fields['weight'], self.weight_name)
def clean_weight(self):
data = self.data['weight']
if not data:
raise ValidationError('Missing input')
return data
class ImplementationForm(forms.Form):
implementation_uri = forms.URLField(required=True)
implementation_name = forms.CharField(required=True)
implementation_description = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
super(ImplementationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('implementation-cancel', 'Cancel'))
class ExternalDatasetForm(forms.Form):
dataset_name = forms.CharField(required=True)
dataset_uri = forms.URLField(required=True)
def __init__(self, *args, **kwargs):
super(ExternalDatasetForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('dataset-cancel', 'Cancel'))
class IndicatorForm(forms.Form):
type = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
super(IndicatorForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('indicator-cancel', 'Cancel'))
class CitationForm(forms.Form):
citation_url = forms.URLField(required=True)
citation_comment = forms.CharField(required=False)
citation_desc = forms.CharField(required=True)
citation_authors = forms.CharField(required=False)
citation_type = forms.CharField(required=False)
citation_pubname = forms.CharField(required=False)
citation_pubdate = forms.CharField(required=False)
citation_pmid = forms.CharField(required=False)
citation_source = forms.CharField(required=False)
doi = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
super(CitationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('citation-cancel', 'Cancel'))
class DisorderForm(forms.Form):
name = forms.CharField(required=True)
definition = forms.CharField(required=True, widget=forms.Textarea())
def __init__(self, *args, **kwargs):
super(DisorderForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('disorder-cancel', 'Cancel'))
class TheoryAssertionForm(forms.Form):
def __init__(self, *args, **kwargs):
super(TheoryAssertionForm, self).__init__(*args, **kwargs)
assertions = Assertion()
choices = [(x['id'], x['name']) for x in assertions.all()]
self.fields['assertions'] = forms.ChoiceField(choices=choices)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('theory-assertion-cancel', 'Cancel'))
class TaskDisorderForm(forms.Form):
def __init__(self, task_id, *args, **kwargs):
super(TaskDisorderForm, self).__init__(*args, **kwargs)
disorders = Disorder()
behaviors = query.Behavior()
traits = query.Trait()
tasks = Task()
contrasts = tasks.get_relation(task_id, "HASCONTRAST")
cont_choices = [(x['id'], x['name']) for x in contrasts]
self.fields['contrasts'] = forms.ChoiceField(choices=cont_choices)
pheno_choices = []
pheno_choices.extend(
[(x['id'], ''.join([x['name'], " (Disorder)"])) for x in disorders.all()])
pheno_choices.extend(
[(x['id'], ''.join([x['name'], " (Behavior)"])) for x in behaviors.all()])
pheno_choices.extend(
[(x['id'], ''.join([x['name'], " (Trait)"])) for x in traits.all()])
self.fields['disorders'] = forms.ChoiceField(choices=pheno_choices)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('task-disorder-cancel', 'Cancel'))
class TaskConceptForm(forms.Form):
def __init__(self, task_id, *args, **kwargs):
super(TaskConceptForm, self).__init__(*args, **kwargs)
concept = Concept()
tasks = Task()
contrasts = tasks.get_relation(task_id, "HASCONTRAST")
cont_choices = [(x['id'], x['name']) for x in contrasts]
self.fields['concept-contrasts'] = forms.ChoiceField(
choices=cont_choices)
concept_choices = [(x['id'], x['name']) for x in concept.all()]
self.fields['concept'] = forms.ChoiceField(choices=concept_choices)
self.helper = FormHelper()
self.helper.attrs = {'id': 'concept-form'}
self.helper.form_class = "hidden"
self.helper.form_action = reverse('add_task_concept',
kwargs={'uid': task_id})
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('task-concept-cancel', 'Cancel'))
class TheoryForm(forms.Form):
label = "Enter the name of the theory collection you wish to add: "
name = forms.CharField(required=True, label=label)
def __init__(self, *args, **kwargs):
super(TheoryForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.attrs = {'id': 'theory-form'}
self.helper.form_class = "hidden"
self.helper.form_action = reverse('add_theory')
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('theory-cancel', 'Cancel'))
class BatteryForm(forms.Form):
label = "Enter the name of the task collection you wish to add: "
name = forms.CharField(required=True, label=label)
def __init__(self, *args, **kwargs):
super(BatteryForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.attrs = {'id': 'battery-form'}
self.helper.form_class = "hidden"
self.helper.form_action = reverse('add_battery')
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('battery-cancel', 'Cancel', type="button"))
class ConceptTaskForm(forms.Form):
def __init__(self, *args, **kwargs):
super(ConceptTaskForm, self).__init__(*args, **kwargs)
tasks = Task()
choices = [(x['id'], x['name']) for x in tasks.all()]
self.fields['tasks'] = forms.ChoiceField(choices=choices)
self.helper = FormHelper()
self.helper.form_class = "hidden"
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('battery-cancel', 'Cancel', type="button"))
class BatteryBatteryForm(forms.Form):
def __init__(self, *args, **kwargs):
super(BatteryBatteryForm, self).__init__(*args, **kwargs)
batteries = Battery()
choices = [(x['id'], x['name']) for x in batteries.all()]
self.fields['batteries'] = forms.ChoiceField(choices=choices)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('battery-cancel', 'Cancel', type="button"))
class BatteryTaskForm(forms.Form):
def __init__(self, *args, **kwargs):
super(BatteryTaskForm, self).__init__(*args, **kwargs)
tasks = Task()
choices = [(x['id'], x['name']) for x in tasks.all()]
self.fields['tasks'] = forms.ChoiceField(choices=choices)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('battery-task-cancel', 'Cancel',
type="button"))
class ConceptContrastForm(forms.Form):
def __init__(self, task_id, concept_id, *args, **kwargs):
super(ConceptContrastForm, self).__init__(*args, **kwargs)
tasks = Task()
contrasts = tasks.get_relation(task_id, "HASCONTRAST")
choices = [(x['id'], x['name']) for x in contrasts]
self.fields['contrasts'] = forms.ChoiceField(choices=choices)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('battery-cancel', 'Cancel', type="button"))
self.helper.form_action = reverse('add_concept_contrast',
kwargs={'uid': concept_id, 'tid': task_id})
class DisorderDisorderForm(forms.Form):
''' form for relating disorders to themselves '''
type = forms.ChoiceField(
choices=[('parent', 'Parent'), ('child', 'Child')])
def __init__(self, name=None, *args, **kwargs):
super(DisorderDisorderForm, self).__init__(*args, **kwargs)
name = (name if name is not None else '')
disorders = Disorder()
type_choices = [
('parent', '{} is a kind of <selected disorder>'.format(name)),
('child', '<selected disorder> is a kind of {}'.format(name))
]
dis_choices = [(x['id'], x['name']) for x in disorders.all()]
self.fields['type'] = forms.ChoiceField(choices=type_choices)
self.fields['disorders'] = forms.ChoiceField(choices=dis_choices)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('disorder-disorder-cancel', 'Cancel'))
class ExternalLinkForm(forms.Form):
''' an external link for a node. For disorders this link may describe the
disorder in more detail'''
uri = forms.URLField(
required=True, label="Enter the full URL for the link")
def __init__(self, *args, **kwargs):
super(ExternalLinkForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('link-cancel', 'Cancel'))
class ConceptClassForm(forms.Form):
name = forms.CharField()
def __init__(self, *args, **kwargs):
super(ConceptClassForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('concept-class-cancel', 'Cancel'))
self.helper.form_action = reverse('add_concept_class')
class DisambiguationForm(forms.Form):
term1_name = forms.CharField(label="")
term1_name_ext = forms.CharField(label="")
term1_definition = forms.CharField(required=True, widget=forms.Textarea(),
label="Original Term Description")
term2_name = forms.CharField(label="")
term2_name_ext = forms.CharField(label="")
term2_definition = forms.CharField(required=True, widget=forms.Textarea(),
label="New Term Description")
def __init__(self, label, uid, term=None, *args, **kwargs):
super(DisambiguationForm, self).__init__(*args, **kwargs)
if term is not None:
self.initial = {
'term1_name': term['name'],
'term2_name': term['name'],
'term1_definition': term['definition_text']
}
self.helper = FormHelper()
self.helper.add_input(Reset('disambiguate_cancel_button', 'Cancel'))
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.form_action = reverse('add_disambiguation',
kwargs={'label': label, 'uid': uid})
self.helper.layout = Layout(
Div(
Div(
Field('term1_name', css_class='disam-name'),
HTML('('),
Field('term1_name_ext', css_class='disam-name-ext'),
HTML(')'),
css_class='name-ext-inputs'
),
Field('term1_definition', css_class='disam-def'),
Div(
Field('term2_name', css_class='disam-name'),
HTML('('),
Field('term2_name_ext', css_class='disam-name-ext'),
HTML(')'),
css_class='name-ext-inputs'
),
Field('term2_definition', css_class='disam-def'),
css_class='popstar',
)
)
class PhenotypeForm(forms.Form):
name = forms.CharField(required=True, label="Phenotype Name:")
definition = forms.CharField(required=True, widget=forms.Textarea(),
label="Description:")
choices = (("disorder", "Disorder"),
("trait", "Trait"), ("behavior", "Behavior"))
type = forms.ChoiceField(
choices=choices, label="Phenotype classification", required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('phenotype-cancel-button', 'Cancel'))
self.helper.form_action = reverse('add_phenotype')
class TraitForm(forms.Form):
name = forms.CharField(required=True, label="Phenotype Name:")
definition = forms.CharField(required=True, widget=forms.Textarea(),
label="Description:")
def __init__(self, uid, trait=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if trait is not None:
self.initial = {
'name': trait['name'],
'definition': trait['definition']
}
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('trait_cancel_button', 'Cancel'))
self.helper.form_action = reverse('update_trait', kwargs={'uid': uid})
class BehaviorForm(forms.Form):
name = forms.CharField(required=True, label="Phenotype Name:")
definition = forms.CharField(required=True, widget=forms.Textarea(),
label="Description:")
def __init__(self, uid, behavior=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if behavior is not None:
self.initial = {
'name': behavior['name'],
'definition': behavior['definition']
}
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('behavior_cancel_button', 'Cancel'))
self.helper.form_action = reverse(
'update_behavior', kwargs={'uid': uid})
class DoiForm(forms.Form):
doi = forms.CharField(required=True, label="DOI:")
def __init__(self, uid, label, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('doi-cancel-button', 'Cancel'))
self.helper.form_action = reverse('add_citation_doi',
kwargs={'label': label, 'uid': uid})
| mit | -3,327,605,317,850,539,000 | 38.736052 | 96 | 0.596155 | false |
CSAILVision/sceneparsing | evaluationCode/utils_eval.py | 1 | 1826 | import numpy as np
# This function takes the prediction and label of a single image, returns intersection and union areas for each class
# To compute over many images do:
# for i in range(Nimages):
# (area_intersection[:,i], area_union[:,i]) = intersectionAndUnion(imPred[i], imLab[i])
# IoU = 1.0 * np.sum(area_intersection, axis=1) / np.sum(np.spacing(1)+area_union, axis=1)
def intersectionAndUnion(imPred, imLab, numClass):
imPred = np.asarray(imPred)
imLab = np.asarray(imLab)
# Remove classes from unlabeled pixels in gt image.
# We should not penalize detections in unlabeled portions of the image.
imPred = imPred * (imLab>0)
# Compute area intersection:
intersection = imPred * (imPred==imLab)
(area_intersection,_) = np.histogram(intersection, bins=numClass, range=(1, numClass))
# Compute area union:
(area_pred,_) = np.histogram(imPred, bins=numClass, range=(1, numClass))
(area_lab,_) = np.histogram(imLab, bins=numClass, range=(1, numClass))
area_union = area_pred + area_lab - area_intersection
return (area_intersection, area_union)
# This function takes the prediction and label of a single image, returns pixel-wise accuracy
# To compute over many images do:
# for i = range(Nimages):
# (pixel_accuracy[i], pixel_correct[i], pixel_labeled[i]) = pixelAccuracy(imPred[i], imLab[i])
# mean_pixel_accuracy = 1.0 * np.sum(pixel_correct) / (np.spacing(1) + np.sum(pixel_labeled))
def pixelAccuracy(imPred, imLab):
imPred = np.asarray(imPred)
imLab = np.asarray(imLab)
# Remove classes from unlabeled pixels in gt image.
# We should not penalize detections in unlabeled portions of the image.
pixel_labeled = np.sum(imLab>0)
pixel_correct = np.sum((imPred==imLab)*(imLab>0))
pixel_accuracy = 1.0 * pixel_correct / pixel_labeled
return (pixel_accuracy, pixel_correct, pixel_labeled)
| bsd-3-clause | -2,552,586,764,081,443,000 | 42.47619 | 117 | 0.727273 | false |
hexid/WordGenerator | GenerateChain.py | 1 | 1975 | #!/bin/env python
# usage: depth , inDictionary [, outJSON]
def generateChain(depth, inFile):
import collections, re
numChar, endChar = '#', '.'
regexWord = re.compile('^[a-z]+$')
depthRange = range(depth - 1)
padStr = ' ' * (depth - 1)
chars = collections.deque(maxlen = depth) # limit to depth chars
def NestedDict(): return collections.defaultdict(NestedDict)
rootNode = NestedDict() # create a tree of dictionaries
rootNode['depth'] = depth # set the depth of the chain
curNode, curChar = None, None
with open(inFile, 'r') as f:
for word in f.read().split():
if regexWord.match(word):
chars.extend(padStr) # reset chars for the new word
for curChar in "%s%s" % (word, endChar):
chars.append(curChar) # add the next character
curNode = rootNode # start at the root of the tree
for n in depthRange: # traverse down the tree
curNode = curNode[chars[n]]
# increment the total for the leaves on the branch
curNode[numChar] = curNode.get(numChar, 0) + 1
# increment the total for the current leaf
curNode[curChar] = curNode.get(curChar, 0) + 1
return rootNode
def writeToFile(chain, outFile):
with open(outFile, 'w') as f:
import json # write the json data to outFile
# the json data will be sorted and compressed to save space
f.write(json.dumps(chain, sort_keys=True, separators=(',',':')))
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('depth', metavar='depth', type=int, help='The length of any given chain')
parser.add_argument('inFile', type=str, help='Input dictionary file')
parser.add_argument('outFile', type=str, nargs='?', default='_markov.json', help='Output JSON file (default = _markov.json)')
(args, unknown) = parser.parse_known_args()
chain = generateChain(args.depth, args.inFile)
writeToFile(chain, args.outFile)
if __name__ == "__main__":
main()
| mit | -1,638,003,219,230,873,600 | 36.980769 | 127 | 0.658734 | false |
smartforceplus/SmartForceplus | .local/share/Odoo/addons/8.0/builder/models/demo/base.py | 1 | 5437 | import json
import pickle
import os
import random
from openerp import models, api, fields, _
class GeneratorInterface(models.AbstractModel):
_name = 'builder.ir.model.demo.generator.base'
_description = 'Generator Interface'
@api.multi
def get_generator(self, field):
raise NotImplementedError
@api.multi
def action_save(self):
return {'type': 'ir.actions.act_window_close'}
_demo_data = {}
@api.model
def get_demo_data(self, filename=None, dataFormat='json'):
if filename is None:
filename = "{name}.json".format(name=self.subclass_model)
if filename not in self._demo_data:
fullname = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', filename))
if os.path.exists(fullname):
try:
if dataFormat == 'json':
self._demo_data[filename] = json.loads(open(fullname).read())
else:
self._demo_data[filename] = open(fullname).read()
except Exception, e:
return {}
return self._demo_data.get(filename, {})
class Generator(models.Model):
_name = 'builder.ir.model.demo.generator'
_description = 'Generic Generator'
_inherit = ['ir.mixin.polymorphism.superclass', 'builder.ir.model.demo.generator.base']
_order = 'module_id asc, model_id asc'
_target_type = 'char'
model_id = fields.Many2one('builder.ir.model', ondelete='cascade')
module_id = fields.Many2one('builder.ir.module.module', 'Module', related='model_id.module_id', ondelete='cascade',
store=True)
type = fields.Char('Type', compute='_compute_type')
target_fields_type = fields.Char('Target Fields Type', compute='_compute_target_fields_type')
field_ids = fields.Many2many(
comodel_name='builder.ir.model.fields',
relation='builder_model_demo_fields_rel',
column1='generator_id',
column2='field_id',
string='Fields',
)
field_names = fields.Char('Field Names', compute='_compute_field_names', store=True)
allow_nulls = fields.Boolean('Allow Null Values', help='If the field is not required allow to generate null values for them.')
_defaults = {
'subclass_model': lambda s, c, u, cxt=None: s._name
}
@api.multi
def generate_null_values(self, field):
if self.allow_nulls and not field.required:
return random.random() <= (1.0 / (self.model_id.demo_records + 1))
return False
@api.one
@api.depends('subclass_model')
def _compute_type(self):
data = dict(self.get_generators())
self.type = data.get(self.subclass_model, _('Unknown'))
@api.one
@api.depends('field_ids.name')
def _compute_field_names(self):
self.field_names = ', '.join([field.name for field in self.field_ids])
@api.one
@api.depends('subclass_model')
def _compute_target_fields_type(self):
self.target_fields_type = self.env[self.subclass_model]._model._target_type
@api.model
def get_generators(self):
ms = self.env['ir.model'].search([
('model', 'ilike', 'builder.ir.model.demo.generator.%'),
('model', 'not in', ['builder.ir.model.demo.generator.base', 'builder.ir.model.demo.generator'])
])
return [
(model.model, model.name)
for model in ms
]
@api.one
def get_generator(self, field):
return self.get_instance().get_generator(field)
@api.multi
def action_open_view(self):
model = self._model
action = model.get_formview_action(self.env.cr, self.env.uid, self.ids, self.env.context)
action.update({'target': 'new'})
return action
class IrModel(models.Model):
_name = 'builder.ir.model'
_inherit = ['builder.ir.model']
demo_records = fields.Integer('Demo Records')
demo_data_ids = fields.One2many(
comodel_name='builder.ir.model.demo.generator',
inverse_name='model_id',
string='Demo Data',
copy=True,
)
demo_xml_id_sample = fields.Text(compute='_compute_demo_xml_id_sample', store=True)
@api.one
@api.depends('demo_records', 'model')
def _compute_demo_xml_id_sample(self):
tmpl = '{model}_'.format(model=self.model.lower().replace('.', '_')) + '{id}' if self.model else 'model_'
self.demo_xml_id_sample = pickle.dumps([tmpl.format(id=i) for i in xrange(self.demo_records)])
@api.multi
def demo_xml_id(self, index):
return pickle.loads(self.demo_xml_id_sample)[index]
_field_generators = None
@property
def field_generators(self, reload=False):
if not self._field_generators or reload:
result = {}
for generator in self.demo_data_ids:
for field in generator.field_ids:
if field.name not in result:
result[field.name] = generator.instance.get_generator(field)
self._field_generators = result
return self._field_generators
class IrModule(models.Model):
_name = 'builder.ir.module.module'
_inherit = ['builder.ir.module.module']
demo_data_ids = fields.One2many(
comodel_name='builder.ir.model.demo.generator',
inverse_name='module_id',
string='Demo Data',
copy=True,
)
| agpl-3.0 | 1,007,184,603,457,756,800 | 32.98125 | 130 | 0.606401 | false |
akariv/redash | redash/handlers/data_sources.py | 1 | 4792 | from flask import make_response, request
from flask_restful import abort
from funcy import project
from redash import models
from redash.utils.configuration import ConfigurationContainer, ValidationError
from redash.permissions import require_admin, require_permission, require_access, view_only
from redash.query_runner import query_runners, get_configuration_schema_for_query_runner_type
from redash.handlers.base import BaseResource, get_object_or_404
class DataSourceTypeListResource(BaseResource):
@require_admin
def get(self):
return [q.to_dict() for q in sorted(query_runners.values(), key=lambda q: q.name())]
class DataSourceResource(BaseResource):
@require_admin
def get(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
return data_source.to_dict(all=True)
@require_admin
def post(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
req = request.get_json(True)
schema = get_configuration_schema_for_query_runner_type(req['type'])
if schema is None:
abort(400)
try:
data_source.options.set_schema(schema)
data_source.options.update(req['options'])
except ValidationError:
abort(400)
data_source.type = req['type']
data_source.name = req['name']
data_source.save()
return data_source.to_dict(all=True)
@require_admin
def delete(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
data_source.delete_instance(recursive=True)
return make_response('', 204)
class DataSourceListResource(BaseResource):
@require_permission('list_data_sources')
def get(self):
if self.current_user.has_permission('admin'):
data_sources = models.DataSource.all(self.current_org)
else:
data_sources = models.DataSource.all(self.current_org, groups=self.current_user.groups)
response = {}
for ds in data_sources:
if ds.id in response:
continue
d = ds.to_dict()
d['view_only'] = all(project(ds.groups, self.current_user.groups).values())
response[ds.id] = d
return sorted(response.values(), key=lambda d: d['id'])
@require_admin
def post(self):
req = request.get_json(True)
required_fields = ('options', 'name', 'type')
for f in required_fields:
if f not in req:
abort(400)
schema = get_configuration_schema_for_query_runner_type(req['type'])
if schema is None:
abort(400)
config = ConfigurationContainer(req['options'], schema)
if not config.is_valid():
abort(400)
datasource = models.DataSource.create_with_group(org=self.current_org,
name=req['name'],
type=req['type'],
options=config)
self.record_event({
'action': 'create',
'object_id': datasource.id,
'object_type': 'datasource'
})
return datasource.to_dict(all=True)
class DataSourceSchemaResource(BaseResource):
def get(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
require_access(data_source.groups, self.current_user, view_only)
schema = data_source.get_schema()
return schema
class DataSourcePauseResource(BaseResource):
@require_admin
def post(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
data = request.get_json(force=True, silent=True)
if data:
reason = data.get('reason')
else:
reason = request.args.get('reason')
data_source.pause(reason)
data_source.save()
self.record_event({
'action': 'pause',
'object_id': data_source.id,
'object_type': 'datasource'
})
return data_source.to_dict()
@require_admin
def delete(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
data_source.resume()
data_source.save()
self.record_event({
'action': 'resume',
'object_id': data_source.id,
'object_type': 'datasource'
})
return data_source.to_dict()
| bsd-2-clause | 5,638,463,786,395,855,000 | 32.51049 | 110 | 0.603297 | false |
hiidef/hiicart | hiicart/tests/paypal_express.py | 1 | 2736 | import base
from hiicart.gateway.paypal_express.gateway import PaypalExpressCheckoutGateway
STORE_SETTINGS = {
'API_USERNAME': 'sdk-three_api1.sdk.com',
'API_PASSWORD': 'QFZCWN5HZM8VBG7Q',
'API_SIGNATURE': 'A-IzJhZZjhg29XQ2qnhapuwxIDzyAZQ92FRP5dqBzVesOkzbdUONzmOU',
'RETURN_URL': 'http://goodsietest.com/return_url',
'CANCEL_URL': 'http://goodsietest.com/cancel_url',
'FINALIZE_URL': 'http://goodsietest.com/finalize_url',
'COMPLETE_URL': 'http://goodsietest.com/complete_url'
}
class PaypalExpressCheckoutTestCase(base.HiiCartTestCase):
"""Paypal Express Checkout tests"""
def test_submit(self):
"""Submit a cart to express checkout."""
self.cart.hiicart_settings.update(STORE_SETTINGS)
self.assertEquals(self.cart.state, "OPEN")
result = self.cart.submit("paypal_express", False, {'request': None})
self.assertEqual(result.type, "url")
self.assertNotEqual(result.url, None)
self.assertEqual(self.cart.state, "SUBMITTED")
def test_submit_recurring(self):
"""Test submitting a cart with recurring items to express checkout"""
self.cart.hiicart_settings.update(STORE_SETTINGS)
self._add_recurring_item()
self.assertEquals(self.cart.state, "OPEN")
result = self.cart.submit("paypal_express", False, {'request': None})
self.assertEqual(result.type, "url")
self.assertEqual(self.cart.state, "SUBMITTED")
token = result.session_args['hiicart_paypal_express_token']
self.assertNotEqual(token, None)
def test_update_cart_details(self):
pp_params = {
'PAYMENTREQUEST_0_SHIPTONAME': 'Dmitri Shostakovich',
'PAYMENTREQUEST_0_SHIPTOSTREET': '321 Blast Off Lane',
'PAYMENTREQUEST_0_SHIPTOSTREET2': 'Apt 456',
'PAYMENTREQUEST_0_SHIPTOCITY': 'New Moscow',
'PAYMENTREQUEST_0_SHIPTOSTATE': 'AK',
'PAYMENTREQUEST_0_SHIPTOZIP': '90210',
'PAYMENTREQUEST_0_SHIPTOCOUNTRYCODE': 'US'
}
self.cart.hiicart_settings.update(STORE_SETTINGS)
gateway = PaypalExpressCheckoutGateway(self.cart)
gateway._update_cart_details(pp_params)
self.assertEqual(self.cart.ship_first_name, 'Dmitri')
self.assertEqual(self.cart.ship_last_name, 'Shostakovich')
self.assertEqual(self.cart.ship_street1, '321 Blast Off Lane')
self.assertEqual(self.cart.ship_street2, 'Apt 456')
self.assertEqual(self.cart.ship_city, 'New Moscow')
self.assertEqual(self.cart.ship_state, 'AK')
self.assertEqual(self.cart.ship_postal_code, '90210')
self.assertEqual(self.cart.ship_country, 'US')
| mit | -8,636,027,602,864,837,000 | 43.129032 | 84 | 0.659357 | false |
marmarek/qubes-core-admin | qubes/vm/adminvm.py | 1 | 9608 | #
# The Qubes OS Project, https://www.qubes-os.org/
#
# Copyright (C) 2010-2015 Joanna Rutkowska <[email protected]>
# Copyright (C) 2013-2015 Marek Marczykowski-Górecki
# <[email protected]>
# Copyright (C) 2014-2015 Wojtek Porczyk <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
#
''' This module contains the AdminVM implementation '''
import asyncio
import subprocess
import libvirt
import qubes
import qubes.exc
import qubes.vm
from qubes.vm.qubesvm import _setter_kbd_layout
class AdminVM(qubes.vm.BaseVM):
'''Dom0'''
dir_path = None
name = qubes.property('name',
default='dom0', setter=qubes.property.forbidden)
qid = qubes.property('qid',
default=0, type=int, setter=qubes.property.forbidden)
uuid = qubes.property('uuid',
default='00000000-0000-0000-0000-000000000000',
setter=qubes.property.forbidden)
default_dispvm = qubes.VMProperty('default_dispvm',
load_stage=4,
allow_none=True,
default=(lambda self: self.app.default_dispvm),
doc='Default VM to be used as Disposable VM for service calls.')
include_in_backups = qubes.property('include_in_backups',
default=True, type=bool,
doc='If this domain is to be included in default backup.')
updateable = qubes.property('updateable',
default=True,
type=bool,
setter=qubes.property.forbidden,
doc='True if this machine may be updated on its own.')
# for changes in keyboard_layout, see also the same property in QubesVM
keyboard_layout = qubes.property(
'keyboard_layout',
type=str,
setter=_setter_kbd_layout,
default='us++',
doc='Keyboard layout for this VM')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._qdb_connection = None
self._libvirt_domain = None
if not self.app.vmm.offline_mode:
self.start_qdb_watch()
def __str__(self):
return self.name
def __lt__(self, other):
# order dom0 before anything
return self.name != other.name
@property
def attached_volumes(self):
return []
@property
def xid(self):
'''Always ``0``.
.. seealso:
:py:attr:`qubes.vm.qubesvm.QubesVM.xid`
'''
return 0
@qubes.stateless_property
def icon(self): # pylint: disable=no-self-use
"""freedesktop icon name, suitable for use in
:py:meth:`PyQt4.QtGui.QIcon.fromTheme`"""
return 'adminvm-black'
@property
def libvirt_domain(self):
'''Libvirt object for dom0.
.. seealso:
:py:attr:`qubes.vm.qubesvm.QubesVM.libvirt_domain`
'''
if self._libvirt_domain is None:
self._libvirt_domain = self.app.vmm.libvirt_conn.lookupByID(0)
return self._libvirt_domain
@staticmethod
def is_running():
'''Always :py:obj:`True`.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.is_running`
'''
return True
@staticmethod
def is_halted():
'''Always :py:obj:`False`.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.is_halted`
'''
return False
@staticmethod
def get_power_state():
'''Always ``'Running'``.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.get_power_state`
'''
return 'Running'
@staticmethod
def get_mem():
'''Get current memory usage of Dom0.
Unit is KiB.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.get_mem`
'''
# return psutil.virtual_memory().total/1024
with open('/proc/meminfo') as file:
for line in file:
if line.startswith('MemTotal:'):
return int(line.split(':')[1].strip().split()[0])
raise NotImplementedError()
def get_mem_static_max(self):
'''Get maximum memory available to Dom0.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.get_mem_static_max`
'''
if self.app.vmm.offline_mode:
# default value passed on xen cmdline
return 4096
try:
return self.app.vmm.libvirt_conn.getInfo()[1]
except libvirt.libvirtError as e:
self.log.warning('Failed to get memory limit for dom0: %s', e)
return 4096
def get_cputime(self):
'''Get total CPU time burned by Dom0 since start.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.get_cputime`
'''
try:
return self.libvirt_domain.info()[4]
except libvirt.libvirtError as e:
self.log.warning('Failed to get CPU time for dom0: %s', e)
return 0
def verify_files(self):
'''Always :py:obj:`True`
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.verify_files`
''' # pylint: disable=no-self-use
return True
def start(self, start_guid=True, notify_function=None,
mem_required=None):
'''Always raises an exception.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.start`
''' # pylint: disable=unused-argument,arguments-differ
raise qubes.exc.QubesVMNotHaltedError(
self, 'Cannot start Dom0 fake domain!')
def suspend(self):
'''Does nothing.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.suspend`
'''
raise qubes.exc.QubesVMError(self, 'Cannot suspend Dom0 fake domain!')
def shutdown(self):
'''Does nothing.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.shutdown`
'''
raise qubes.exc.QubesVMError(self, 'Cannot shutdown Dom0 fake domain!')
def kill(self):
'''Does nothing.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.kill`
'''
raise qubes.exc.QubesVMError(self, 'Cannot kill Dom0 fake domain!')
@property
def untrusted_qdb(self):
'''QubesDB handle for this domain.'''
if self._qdb_connection is None:
import qubesdb # pylint: disable=import-error
self._qdb_connection = qubesdb.QubesDB(self.name)
return self._qdb_connection
async def run_service(self, service, source=None, user=None,
filter_esc=False, autostart=False, gui=False, **kwargs):
'''Run service on this VM
:param str service: service name
:param qubes.vm.qubesvm.QubesVM source: source domain as presented to
this VM
:param str user: username to run service as
:param bool filter_esc: filter escape sequences to protect terminal \
emulator
:param bool autostart: if :py:obj:`True`, machine will be started if \
it is not running
:param bool gui: when autostarting, also start gui daemon
:rtype: asyncio.subprocess.Process
.. note::
User ``root`` is redefined to ``SYSTEM`` in the Windows agent code
'''
# pylint: disable=unused-argument
source = 'dom0' if source is None else self.app.domains[source].name
if filter_esc:
raise NotImplementedError(
'filter_esc=True not supported on calls to dom0')
if user is None:
user = 'root'
await self.fire_event_async('domain-cmd-pre-run', pre_event=True,
start_guid=gui)
if user != 'root':
cmd = ['runuser', '-u', user, '--']
else:
cmd = []
cmd.extend([
qubes.config.system_path['qrexec_rpc_multiplexer'],
service,
source,
'name',
self.name,
])
return (await asyncio.create_subprocess_exec(
*cmd,
**kwargs))
async def run_service_for_stdio(self, *args, input=None, **kwargs):
'''Run a service, pass an optional input and return (stdout, stderr).
Raises an exception if return code != 0.
*args* and *kwargs* are passed verbatim to :py:meth:`run_service`.
.. warning::
There are some combinations if stdio-related *kwargs*, which are
not filtered for problems originating between the keyboard and the
chair.
''' # pylint: disable=redefined-builtin
kwargs.setdefault('stdin', subprocess.PIPE)
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
p = await self.run_service(*args, **kwargs)
# this one is actually a tuple, but there is no need to unpack it
stdouterr = await p.communicate(input=input)
if p.returncode:
raise subprocess.CalledProcessError(p.returncode,
args[0], *stdouterr)
return stdouterr
| lgpl-2.1 | 412,797,591,439,523,840 | 29.791667 | 79 | 0.596128 | false |
danielsunzhongyuan/my_leetcode_in_python | lowest_common_ancestor_of_a_binary_tree_236.py | 1 | 1928 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
# Solution One: 99ms
# stack = [root]
# parent = {root: None}
# while p not in parent or q not in parent:
# node = stack.pop()
# if node.left:
# parent[node.left] = node
# stack.append(node.left)
# if node.right:
# parent[node.right] = node
# stack.append(node.right)
# ancestor_of_p = []
# while p:
# ancestor_of_p.append(p)
# p = parent[p]
# while q not in ancestor_of_p:
# q = parent[q]
# return q
# Solution Two:
if root in (None, p, q):
return root
left, right = (self.lowestCommonAncestor(kid, p, q) for kid in (root.left, root.right))
return root if left and right else left or right
def lowestCommonAncestor2(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
stack = [root]
parent = {root: None}
while p not in parent or q not in parent:
node = stack.pop()
if node.left:
parent[node.left] = node
stack.append(node.left)
if node.right:
parent[node.right] = node
stack.append(node.right)
ancestor_of_p = []
while p:
ancestor_of_p.append(p)
p = parent[p]
while q not in ancestor_of_p:
q = parent[q]
return q
| apache-2.0 | -4,629,820,916,250,762,000 | 28.661538 | 95 | 0.48029 | false |
beyondvalence/scratch | chp01.py | 1 | 1697 | # !/usr/bin/env python27
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 2 16:15:29 2016
@author: waynetliu
"""
#%%
from __future__ import division
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" }
]
print users
#%%
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
#%%
for user in users:
user["friends"]=[]
#%% appends together, not separately
for i, j in friendships:
users[i]["friends"].append(users[j]) # adds j as a friend of i
users[j]["friends"].append(users[i]) # adds i as a friend of j
print"\n", "users", "\n\n", users
#%%
# for first two friendship tuples
# hero - dunn, sue
# dunn - hero
# sue - hero (dunn)
def number_of_friends(user):
"""how many friends does user have?"""
return len(users["friends"]) # length of friends_id list
total_connections = sum(number_of_friends(user) for user in users) # 24
print "total connections: ", total_connections
num_users = len(users)
avg_connections = num_users / total_connections
# create a list of (ids, number of friends)
num_friends_by_id = [(users["id"], number_of_friends(user))
for user in users]
sorted(num_friends_by_id,
key=lambda (user_id, num_friends): num_friends, reverse=True)
print("/n", num_friends_by_id)
| bsd-2-clause | 5,344,159,609,228,425,000 | 27.283333 | 71 | 0.524455 | false |
arcyfelix/ML-DL-AI | Supervised Learning/GANs/GAN.py | 1 | 3364 | # -*- coding: utf-8 -*-
""" GAN Example
Use a generative adversarial network (GAN) to generate digit images from a
noise distribution.
References:
- Generative adversarial nets. I Goodfellow, J Pouget-Abadie, M Mirza,
B Xu, D Warde-Farley, S Ozair, Y. Bengio. Advances in neural information
processing systems, 2672-2680.
Links:
- [GAN Paper](https://arxiv.org/pdf/1406.2661.pdf).
"""
from __future__ import division, print_function, absolute_import
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tflearn
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data()
image_dim = 784 # 28*28 pixels
z_dim = 200 # Noise data points
total_samples = len(X)
# Generator
def generator(x, reuse=False):
with tf.variable_scope('Generator', reuse=reuse):
x = tflearn.fully_connected(x, 256, activation='relu')
x = tflearn.fully_connected(x, image_dim, activation='sigmoid')
return x
# Discriminator
def discriminator(x, reuse=False):
with tf.variable_scope('Discriminator', reuse=reuse):
x = tflearn.fully_connected(x, 256, activation='relu')
x = tflearn.fully_connected(x, 1, activation='sigmoid')
return x
# Build Networks
gen_input = tflearn.input_data(shape=[None, z_dim], name='input_noise')
disc_input = tflearn.input_data(shape=[None, 784], name='disc_input')
gen_sample = generator(gen_input)
disc_real = discriminator(disc_input)
disc_fake = discriminator(gen_sample, reuse=True)
# Define Loss
disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake))
gen_loss = -tf.reduce_mean(tf.log(disc_fake))
# Build Training Ops for both Generator and Discriminator.
# Each network optimization should only update its own variable, thus we need
# to retrieve each network variables (with get_layer_variables_by_scope) and set
# 'placeholder=None' because we do not need to feed any target.
gen_vars = tflearn.get_layer_variables_by_scope('Generator')
gen_model = tflearn.regression(gen_sample, placeholder=None, optimizer='adam',
loss=gen_loss, trainable_vars=gen_vars,
batch_size=64, name='target_gen', op_name='GEN')
disc_vars = tflearn.get_layer_variables_by_scope('Discriminator')
disc_model = tflearn.regression(disc_real, placeholder=None, optimizer='adam',
loss=disc_loss, trainable_vars=disc_vars,
batch_size=64, name='target_disc', op_name='DISC')
# Define GAN model, that output the generated images.
gan = tflearn.DNN(gen_model)
# Training
# Generate noise to feed to the generator
z = np.random.uniform(-1., 1., size=[total_samples, z_dim])
# Start training, feed both noise and real images.
gan.fit(X_inputs={gen_input: z, disc_input: X},
Y_targets=None,
n_epoch=100)
# Generate images from noise, using the generator network.
f, a = plt.subplots(2, 10, figsize=(10, 4))
for i in range(10):
for j in range(2):
# Noise input.
z = np.random.uniform(-1., 1., size=[1, z_dim])
# Generate image from noise. Extend to 3 channels for matplot figure.
temp = [[ii, ii, ii] for ii in list(gan.predict([z])[0])]
a[j][i].imshow(np.reshape(temp, (28, 28, 3)))
f.show()
plt.draw()
plt.waitforbuttonpress() | apache-2.0 | 2,824,382,926,397,684,000 | 36.388889 | 82 | 0.678062 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.