repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Pehrsons/boxee-tv4play | tests/testApi4.py | 1 | 4782 | #encoding:utf-8
# Must be run from same folder for relative import path to work!
# Run with python 2.4 for full boxee compatibility
import sys
sys.path.append("../tv.boxeeplay.tv4play2/")
from api4 import Api4Client as ApiClient
from api4_mc import category_to_list_item, show_to_list_item, episode_to_list_item, episode_list_item_to_playable
from logger import Level, SetEnabledPlus
from pirateplay import NoSuitableStreamError, NoStreamsError, pirateplayable_item
import itertools
import ip_info
import unittest
class IntegrationTestApi4(unittest.TestCase):
def setUp(self):
SetEnabledPlus(Level.DEBUG, True)
self.client = ApiClient()
def test_fetch_categories(self):
categories = []
categories.extend(self.client.categories)
self.assertTrue(len(categories) > 3)
def test_convert_categories(self):
mc_categories = itertools.imap(category_to_list_item, self.client.categories)
mc_cat_list = []
mc_cat_list.extend(mc_categories)
self.assertEquals(len(self.client.categories), len(mc_cat_list))
for cat in mc_cat_list:
self.assertCategory(cat)
def test_fetch_shows(self):
categories = self.client.categories
shows = []
shows.extend(self.client.get_shows(categories[2]))
self.assertTrue(len(shows) > 0)
def test_convert_shows(self):
categories = self.client.categories
shows = []
shows.extend(itertools.imap(show_to_list_item, self.client.get_shows(categories[2])))
for show in shows:
self.assertShow(show)
def test_fetch_episodes(self):
categories = self.client.categories
shows = []
shows.extend(self.client.get_shows(categories[3]))
episodes = []
episodes.extend(itertools.islice(self.client.get_episodes(shows[7]), 1000))
self.assertTrue(len(episodes) > 0)
def test_convert_episodes(self):
categories = self.client.categories
shows = []
shows.extend(self.client.get_shows(categories[4]))
episodes = []
episodes.extend(itertools.islice(self.client.get_episodes(shows[7]), 150))
mc_episodes = []
mc_episodes.extend(itertools.imap(episode_to_list_item, episodes))
for episode in mc_episodes:
self.assertEpisode
def test_latest_full_episodes(self):
episodes = []
episodes.extend(itertools.islice(self.client.get_latest_full_episodes(), 140))
self.assertEquals(len(episodes), 140)
mc_episodes = []
mc_episodes.extend(itertools.imap(episode_to_list_item, episodes))
for episode in mc_episodes:
self.assertEpisode(episode)
self.assertEquals(episode.GetProperty("episode"), "true")
try:
episode = episode_list_item_to_playable(episode)
self.assertString(pirateplayable_item(episode).GetPath())
except NoSuitableStreamError, e:
print str(e)
self.assertTrue(False)
except NoStreamsError, e:
print str(e)
self.assertTrue(False)
def test_latest_episodes(self):
episodes = []
episodes.extend(itertools.islice(self.client.get_latest_episodes(), 40))
self.assertEquals(len(episodes), 40)
mc_episodes = []
mc_episodes.extend(itertools.imap(episode_to_list_item, episodes))
for episode in mc_episodes:
self.assertEpisode(episode)
def assertCategory(self, category):
self.assertString(category.GetTitle())
self.assertString(category.GetLabel())
self.assertString(category.GetProperty("id"))
def assertShow(self, show):
self.assertString(show.GetTitle())
self.assertString(show.GetLabel())
self.assertString(show.GetDescription())
self.assertString(show.GetProperty("id"))
self.assertString(show.GetProperty("category"))
def assertEpisode(self, episode):
self.assertString(episode.GetTitle())
self.assertString(episode.GetLabel())
self.assertString(episode.GetDescription())
self.assertString(episode.GetStudio())
self.assertString(episode.GetProperty("category"))
self.assertTrue(episode.GetProperty("category") != "undefined")
self.assertString(episode.GetProperty("show"))
self.assertTrue(episode.GetProperty("show") != "undefined")
self.assertString(episode.GetProperty("id"))
def assertString(self, obj):
self.assertTrue(isinstance(obj, basestring) or isinstance(obj, unicode))
def test_geo_ip(self):
self.assertString(ip_info.get_country_name())
self.assertString(ip_info.get_country_code())
if __name__ == '__main__':
unittest.main()
| mit | 3,438,662,765,736,854,000 | 36.952381 | 113 | 0.659557 | false |
xccui/flink | flink-python/pyflink/table/tests/test_calc.py | 3 | 8853 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import array
import datetime
from decimal import Decimal
from pyflink.common import Row
from pyflink.table import DataTypes, BatchTableEnvironment, EnvironmentSettings
from pyflink.table.expressions import row
from pyflink.table.tests.test_types import ExamplePoint, PythonOnlyPoint, ExamplePointUDT, \
PythonOnlyUDT
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase
class StreamTableCalcTests(PyFlinkStreamTableTestCase):
def test_select(self):
t = self.t_env.from_elements([(1, 'hi', 'hello')], ['a', 'b', 'c'])
result = t.select(t.a + 1, t.b, t.c)
query_operation = result._j_table.getQueryOperation()
self.assertEqual('[plus(a, 1), b, c]',
query_operation.getProjectList().toString())
def test_alias(self):
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
t = t.alias("d, e, f")
result = t.select(t.d, t.e, t.f)
table_schema = result._j_table.getQueryOperation().getTableSchema()
self.assertEqual(['d', 'e', 'f'], list(table_schema.getFieldNames()))
def test_where(self):
t_env = self.t_env
t = t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.where((t.a > 1) & (t.b == 'Hello'))
query_operation = result._j_table.getQueryOperation()
self.assertEqual("and("
"greaterThan(a, 1), "
"equals(b, 'Hello'))",
query_operation.getCondition().toString())
def test_filter(self):
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.filter((t.a > 1) & (t.b == 'Hello'))
query_operation = result._j_table.getQueryOperation()
self.assertEqual("and("
"greaterThan(a, 1), "
"equals(b, 'Hello'))",
query_operation.getCondition().toString())
def test_from_element(self):
t_env = self.t_env
field_names = ["a", "b", "c", "d", "e", "f", "g", "h",
"i", "j", "k", "l", "m", "n", "o", "p", "q", "r"]
field_types = [DataTypes.BIGINT(), DataTypes.DOUBLE(), DataTypes.STRING(),
DataTypes.STRING(), DataTypes.DATE(),
DataTypes.TIME(),
DataTypes.TIMESTAMP(3),
DataTypes.INTERVAL(DataTypes.SECOND(3)),
DataTypes.ARRAY(DataTypes.DOUBLE()),
DataTypes.ARRAY(DataTypes.DOUBLE(False)),
DataTypes.ARRAY(DataTypes.STRING()),
DataTypes.ARRAY(DataTypes.DATE()),
DataTypes.DECIMAL(38, 18),
DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.DOUBLE())]),
DataTypes.MAP(DataTypes.STRING(), DataTypes.DOUBLE()),
DataTypes.BYTES(), ExamplePointUDT(),
PythonOnlyUDT()]
schema = DataTypes.ROW(
list(map(lambda field_name, field_type: DataTypes.FIELD(field_name, field_type),
field_names,
field_types)))
table_sink = source_sink_utils.TestAppendSink(field_names, field_types)
t_env.register_table_sink("Results", table_sink)
t = t_env.from_elements(
[(1, 1.0, "hi", "hello", datetime.date(1970, 1, 2), datetime.time(1, 0, 0),
datetime.datetime(1970, 1, 2, 0, 0),
datetime.timedelta(days=1, microseconds=10),
[1.0, None], array.array("d", [1.0, 2.0]),
["abc"], [datetime.date(1970, 1, 2)], Decimal(1), Row("a", "b")(1, 2.0),
{"key": 1.0}, bytearray(b'ABCD'), ExamplePoint(1.0, 2.0),
PythonOnlyPoint(3.0, 4.0))],
schema)
t.execute_insert("Results").wait()
actual = source_sink_utils.results()
expected = ['1,1.0,hi,hello,1970-01-02,01:00:00,1970-01-02 00:00:00.0,'
'86400000,[1.0, null],[1.0, 2.0],[abc],[1970-01-02],'
'1,1,2.0,{key=1.0},[65, 66, 67, 68],[1.0, 2.0],[3.0, 4.0]']
self.assert_equals(actual, expected)
def test_from_element_expression(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.FLOAT()]
schema = DataTypes.ROW(
list(map(lambda field_name, field_type: DataTypes.FIELD(field_name, field_type),
field_names,
field_types)))
table_sink = source_sink_utils.TestAppendSink(field_names, field_types)
t_env.register_table_sink("Results", table_sink)
t = t_env.from_elements([row(1, 'abc', 2.0), row(2, 'def', 3.0)], schema)
t.execute_insert("Results").wait()
actual = source_sink_utils.results()
expected = ['1,abc,2.0', '2,def,3.0']
self.assert_equals(actual, expected)
def test_blink_from_element(self):
t_env = BatchTableEnvironment.create(environment_settings=EnvironmentSettings
.new_instance().use_blink_planner()
.in_batch_mode().build())
field_names = ["a", "b", "c", "d", "e", "f", "g", "h",
"i", "j", "k", "l", "m", "n", "o", "p", "q"]
field_types = [DataTypes.BIGINT(), DataTypes.DOUBLE(), DataTypes.STRING(),
DataTypes.STRING(), DataTypes.DATE(),
DataTypes.TIME(),
DataTypes.TIMESTAMP(3),
DataTypes.INTERVAL(DataTypes.SECOND(3)),
DataTypes.ARRAY(DataTypes.DOUBLE()),
DataTypes.ARRAY(DataTypes.DOUBLE(False)),
DataTypes.ARRAY(DataTypes.STRING()),
DataTypes.ARRAY(DataTypes.DATE()),
DataTypes.DECIMAL(38, 18),
DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.DOUBLE())]),
DataTypes.MAP(DataTypes.STRING(), DataTypes.DOUBLE()),
DataTypes.BYTES(),
PythonOnlyUDT()]
schema = DataTypes.ROW(
list(map(lambda field_name, field_type: DataTypes.FIELD(field_name, field_type),
field_names,
field_types)))
table_sink = source_sink_utils.TestAppendSink(field_names, field_types)
t_env.register_table_sink("Results", table_sink)
t = t_env.from_elements(
[(1, 1.0, "hi", "hello", datetime.date(1970, 1, 2), datetime.time(1, 0, 0),
datetime.datetime(1970, 1, 2, 0, 0),
datetime.timedelta(days=1, microseconds=10),
[1.0, None], array.array("d", [1.0, 2.0]),
["abc"], [datetime.date(1970, 1, 2)], Decimal(1), Row("a", "b")(1, 2.0),
{"key": 1.0}, bytearray(b'ABCD'),
PythonOnlyPoint(3.0, 4.0))],
schema)
t.execute_insert("Results").wait()
actual = source_sink_utils.results()
expected = ['1,1.0,hi,hello,1970-01-02,01:00:00,1970-01-02 00:00:00.0,'
'86400000,[1.0, null],[1.0, 2.0],[abc],[1970-01-02],'
'1.000000000000000000,1,2.0,{key=1.0},[65, 66, 67, 68],[3.0, 4.0]']
self.assert_equals(actual, expected)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 | -9,219,673,020,569,421,000 | 47.911602 | 92 | 0.533604 | false |
himanshuo/osf.io | website/addons/s3/tests/test_view.py | 1 | 17088 | import mock
from nose.tools import * # noqa
import httplib as http
from boto.exception import S3ResponseError
from framework.auth import Auth
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, AuthUserFactory
from website.addons.s3.utils import validate_bucket_name
from website.util import api_url_for
from utils import create_mock_wrapper
from faker import Faker
fake = Faker()
class MockS3Bucket(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
class TestS3ViewsConfig(OsfTestCase):
def setUp(self):
super(TestS3ViewsConfig, self).setUp()
self.user = AuthUserFactory()
self.consolidated_auth = Auth(user=self.user)
self.auth = ('test', self.user.api_keys[0]._primary_key)
self.project = ProjectFactory(creator=self.user)
self.project.add_addon('s3', auth=self.consolidated_auth)
self.project.creator.add_addon('s3')
self.user_settings = self.user.get_addon('s3')
self.user_settings.access_key = 'We-Will-Rock-You'
self.user_settings.secret_key = 'Idontknowanyqueensongs'
self.user_settings.save()
self.node_settings = self.project.get_addon('s3')
self.node_settings.bucket = 'Sheer-Heart-Attack'
self.node_settings.user_settings = self.project.creator.get_addon('s3')
self.node_settings.save()
self.node_url = '/api/v1/project/{0}/'.format(self.project._id)
@mock.patch('website.addons.s3.views.config.does_bucket_exist')
@mock.patch('website.addons.s3.views.config.adjust_cors')
def test_s3_settings_no_bucket(self, mock_cors, mock_does_bucket_exist):
mock_does_bucket_exist.return_value = False
mock_cors.return_value = True
url = self.project.api_url + 's3/settings/'
rv = self.app.post_json(url, {}, expect_errors=True, auth=self.user.auth)
assert_true('trouble' in rv.body)
@mock.patch('website.addons.s3.views.config.does_bucket_exist')
@mock.patch('website.addons.s3.views.config.adjust_cors')
@mock.patch('website.addons.s3.utils.get_bucket_drop_down')
def test_s3_set_bucket(self, mock_cors, mock_exist, mock_dropdown):
mock_cors.return_value = True
mock_exist.return_value = True
mock_dropdown.return_value = ['mybucket']
url = self.project.api_url_for('s3_node_settings')
self.app.post_json(
url, {'s3_bucket': 'hammertofall'}, auth=self.user.auth,
)
self.project.reload()
self.node_settings.reload()
assert_equal(self.node_settings.bucket, 'hammertofall')
assert_equal(self.project.logs[-1].action, 's3_bucket_linked')
def test_s3_set_bucket_no_settings(self):
user = AuthUserFactory()
self.project.add_contributor(user, save=True)
url = self.project.api_url + 's3/settings/'
res = self.app.post_json(
url, {'s3_bucket': 'hammertofall'}, auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_s3_set_bucket_no_auth(self):
user = AuthUserFactory()
user.add_addon('s3')
self.project.add_contributor(user, save=True)
url = self.project.api_url + 's3/settings/'
res = self.app.post_json(
url, {'s3_bucket': 'hammertofall'}, auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_s3_set_bucket_already_authed(self):
user = AuthUserFactory()
user.add_addon('s3')
user_settings = user.get_addon('s3')
user_settings.access_key = 'foo'
user_settings.secret_key = 'bar'
user_settings.save()
self.project.add_contributor(user, save=True)
url = self.project.api_url + 's3/settings/'
res = self.app.post_json(
url, {'s3_bucket': 'hammertofall'}, auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
@mock.patch('website.addons.s3.api.S3Wrapper.from_addon')
def test_s3_set_bucket_registered(self, mock_from_addon):
mock_from_addon.return_value = create_mock_wrapper()
registration = self.project.register_node(
None, self.consolidated_auth, '', ''
)
url = registration.api_url + 's3/settings/'
res = self.app.post_json(
url, {'s3_bucket': 'hammertofall'}, auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
@mock.patch('website.addons.s3.views.config.has_access')
@mock.patch('website.addons.s3.views.config.create_osf_user')
def test_user_settings(self, mock_user, mock_access):
mock_access.return_value = True
mock_user.return_value = (
'osf-user-12345',
{
'access_key_id': 'scout',
'secret_access_key': 'ssshhhhhhhhh'
}
)
url = '/api/v1/settings/s3/'
self.app.post_json(
url,
{
'access_key': 'scout',
'secret_key': 'Atticus'
},
auth=self.user.auth
)
self.user_settings.reload()
assert_equals(self.user_settings.access_key, 'scout')
@mock.patch('website.addons.s3.model.AddonS3UserSettings.remove_iam_user')
def test_s3_remove_user_settings(self, mock_access):
mock_access.return_value = True
self.user_settings.access_key = 'to-kill-a-mocking-bucket'
self.user_settings.secret_key = 'itsasecret'
self.user_settings.save()
url = api_url_for('s3_remove_user_settings')
self.app.delete(url, auth=self.user.auth)
self.user_settings.reload()
assert_equals(self.user_settings.access_key, None)
assert_equals(self.user_settings.secret_key, None)
assert_equals(mock_access.call_count, 1)
@mock.patch('website.addons.s3.model.AddonS3UserSettings.remove_iam_user')
def test_s3_remove_user_settings_none(self, mock_access):
self.user_settings.access_key = None
self.user_settings.secret_key = None
self.user_settings.save()
url = api_url_for('s3_remove_user_settings')
self.app.delete(url, auth=self.user.auth)
self.user_settings.reload()
assert_equals(mock_access.call_count, 0)
@mock.patch('website.addons.s3.views.config.has_access')
def test_user_settings_no_auth(self, mock_access):
mock_access.return_value = False
url = '/api/v1/settings/s3/'
rv = self.app.post_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http.BAD_REQUEST)
@mock.patch('website.addons.s3.utils.get_bucket_drop_down')
@mock.patch('website.addons.s3.views.config.has_access')
@mock.patch('website.addons.s3.views.config.create_osf_user')
def test_node_settings_no_user_settings(self, mock_user, mock_access, mock_dropdown):
self.node_settings.user_settings = None
self.node_settings.save()
url = self.node_url + 's3/authorize/'
mock_access.return_value = True
mock_user.return_value = (
'osf-user-12345',
{
'access_key_id': 'scout',
'secret_access_key': 'ssshhhhhhhhh'
}
)
mock_dropdown.return_value = ['mybucket']
self.app.post_json(url, {'access_key': 'scout', 'secret_key': 'ssshhhhhhhhh'}, auth=self.user.auth)
self.user_settings.reload()
assert_equals(self.user_settings.access_key, 'scout')
@mock.patch('website.addons.s3.utils.get_bucket_drop_down')
def test_node_settings_no_user_settings_ui(self, mock_dropdown):
mock_dropdown.return_value = ['mybucket']
self.node_settings.user_settings.access_key = None
self.node_settings.user_settings = None
self.node_settings.save()
url = self.project.url + 'settings/'
rv = self.app.get(url, auth=self.user.auth)
assert_true('<label for="s3Addon">Access Key</label>' in rv.body)
@mock.patch('website.addons.s3.utils.get_bucket_list')
def test_s3_bucket_list(self, mock_bucket_list):
fake_buckets = [
MockS3Bucket(name=fake.domain_word())
for i in range(10)
]
mock_bucket_list.return_value = fake_buckets
url = self.node_settings.owner.api_url_for('s3_bucket_list')
ret = self.app.get(url, auth=self.user.auth)
assert_equals(ret.json, {'buckets': [bucket.name for bucket in fake_buckets]})
def test_s3_remove_node_settings_owner(self):
url = self.node_settings.owner.api_url_for('s3_remove_node_settings')
ret = self.app.delete(url, auth=self.user.auth)
assert_equal(ret.json['has_bucket'], False)
assert_equal(ret.json['node_has_auth'], False)
def test_s3_remove_node_settings_unauthorized(self):
url = self.node_settings.owner.api_url_for('s3_remove_node_settings')
ret = self.app.delete(url, auth=None, expect_errors=True)
assert_equal(ret.status_code, 401)
def test_s3_get_node_settings_owner(self):
url = self.node_settings.owner.api_url_for('s3_get_node_settings')
res = self.app.get(url, auth=self.user.auth)
expected_bucket = self.node_settings.bucket
expected_node_has_auth = True
expected_user_is_owner = True
result = res.json['result']
assert_equal(result['bucket'], expected_bucket)
assert_equal(result['node_has_auth'], expected_node_has_auth)
assert_equal(result['user_is_owner'], expected_user_is_owner)
def test_s3_get_node_settings_not_owner(self):
url = self.node_settings.owner.api_url_for('s3_get_node_settings')
non_owner = AuthUserFactory()
self.project.add_contributor(non_owner, save=True, permissions=['write'])
res = self.app.get(url, auth=non_owner.auth)
expected_bucket = self.node_settings.bucket
expected_node_has_auth = True
expected_user_is_owner = False
result = res.json['result']
assert_equal(result['bucket'], expected_bucket)
assert_equal(result['node_has_auth'], expected_node_has_auth)
assert_equal(result['user_is_owner'], expected_user_is_owner)
def test_s3_get_node_settings_unauthorized(self):
url = self.node_settings.owner.api_url_for('s3_get_node_settings')
unauthorized = AuthUserFactory()
ret = self.app.get(url, auth=unauthorized.auth, expect_errors=True)
assert_equal(ret.status_code, 403)
@mock.patch('website.addons.s3.views.config.add_s3_auth')
def test_s3_authorize_node_valid(self, mock_add):
mock_add.return_value = True
url = self.project.api_url_for('s3_authorize_node')
cred = {
'access_key': fake.password(),
'secret_key': fake.password(),
}
res = self.app.post_json(url, cred, auth=self.user.auth)
assert_equal(res.json['node_has_auth'], True)
def test_s3_authorize_node_invalid(self):
url = self.project.api_url_for('s3_authorize_node')
cred = {
'access_key': fake.password(),
}
res = self.app.post_json(url, cred, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
@mock.patch('website.addons.s3.views.config.add_s3_auth')
def test_s3_authorize_node_malformed(self, mock_add):
mock_add.return_value = False
url = self.project.api_url_for('s3_authorize_node')
cred = {
'access_key': fake.password(),
'secret_key': fake.password(),
}
res = self.app.post_json(url, cred, auth=self.user.auth, expect_errors=True)
assert_equal(res.json['message'], 'Incorrect credentials')
assert_equal(res.status_code, 400)
@mock.patch('website.addons.s3.views.config.add_s3_auth')
def test_s3_authorize_node_unauthorized(self, mock_add):
mock_add.return_value = True
url = self.project.api_url_for('s3_authorize_node')
cred = {
'access_key': fake.password(),
'secret_key': fake.password(),
}
unauthorized = AuthUserFactory()
res = self.app.post_json(url, cred, auth=unauthorized.auth, expect_errors=True)
assert_equal(res.status_code, 403)
@mock.patch('website.addons.s3.views.config.add_s3_auth')
def test_s3_authorize_user_valid(self, mock_add):
mock_add.return_value = True
url = self.project.api_url_for('s3_authorize_user')
cred = {
'access_key': fake.password(),
'secret_key': fake.password(),
}
res = self.app.post_json(url, cred, auth=self.user.auth)
assert_equal(res.json, {})
def test_s3_authorize_user_invalid(self):
url = self.project.api_url_for('s3_authorize_user')
cred = {
'access_key': fake.password(),
}
res = self.app.post_json(url, cred, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
@mock.patch('website.addons.s3.views.config.add_s3_auth')
def test_s3_authorize_user_malformed(self, mock_add):
mock_add.return_value = False
url = self.project.api_url_for('s3_authorize_user')
cred = {
'access_key': fake.password(),
'secret_key': fake.password(),
}
res = self.app.post_json(url, cred, auth=self.user.auth, expect_errors=True)
assert_equal(res.json['message'], 'Incorrect credentials')
assert_equal(res.status_code, 400)
def test_s3_node_import_auth_authorized(self):
url = self.project.api_url_for('s3_node_import_auth')
self.node_settings.deauthorize(auth=None, save=True)
res = self.app.post(url, auth=self.user.auth)
assert_equal(res.json['node_has_auth'], True)
assert_equal(res.json['user_is_owner'], True)
def test_s3_node_import_auth_unauthorized(self):
url = self.project.api_url_for('s3_node_import_auth')
self.node_settings.deauthorize(auth=None, save=True)
unauthorized = AuthUserFactory()
res = self.app.post(url, auth=unauthorized.auth, expect_errors=True)
assert_equal(res.status_code, 403)
class TestCreateBucket(OsfTestCase):
def setUp(self):
super(TestCreateBucket, self).setUp()
self.user = AuthUserFactory()
self.consolidated_auth = Auth(user=self.user)
self.auth = ('test', self.user.api_keys[0]._primary_key)
self.project = ProjectFactory(creator=self.user)
self.project.add_addon('s3', auth=self.consolidated_auth)
self.project.creator.add_addon('s3')
self.user_settings = self.user.get_addon('s3')
self.user_settings.access_key = 'We-Will-Rock-You'
self.user_settings.secret_key = 'Idontknowanyqueensongs'
self.user_settings.save()
self.node_settings = self.project.get_addon('s3')
self.node_settings.bucket = 'Sheer-Heart-Attack'
self.node_settings.user_settings = self.project.creator.get_addon('s3')
self.node_settings.save()
def test_bad_names(self):
assert_false(validate_bucket_name('bogus naMe'))
assert_false(validate_bucket_name(''))
assert_false(validate_bucket_name('no'))
assert_false(validate_bucket_name('.cantstartwithp'))
assert_false(validate_bucket_name('or.endwith.'))
assert_false(validate_bucket_name('..nodoubles'))
assert_false(validate_bucket_name('no_unders_in'))
def test_names(self):
assert_true(validate_bucket_name('imagoodname'))
assert_true(validate_bucket_name('still.passing'))
assert_true(validate_bucket_name('can-have-dashes'))
assert_true(validate_bucket_name('kinda.name.spaced'))
@mock.patch('website.addons.s3.views.crud.create_bucket')
@mock.patch('website.addons.s3.utils.get_bucket_drop_down')
def test_create_bucket_pass(self, mock_make, mock_dropdown):
mock_make.return_value = True
mock_dropdown.return_value = ['mybucket']
url = self.project.api_url_for('create_new_bucket')
ret = self.app.post_json(url, {'bucket_name': 'doesntevenmatter'}, auth=self.user.auth, expect_errors=True)
assert_equals(ret.status_int, http.OK)
@mock.patch('website.addons.s3.views.crud.create_bucket')
def test_create_bucket_fail(self, mock_make):
error = S3ResponseError(418, 'because Im a test')
error.message = 'This should work'
mock_make.side_effect = error
url = "/api/v1/project/{0}/s3/newbucket/".format(self.project._id)
ret = self.app.post_json(url, {'bucket_name': 'doesntevenmatter'}, auth=self.user.auth, expect_errors=True)
assert_equals(ret.body, '{"message": "This should work", "title": "Problem connecting to S3"}')
| apache-2.0 | 5,513,185,094,296,701,000 | 39.301887 | 115 | 0.631086 | false |
bmander/dancecontraption | django/db/models/__init__.py | 7 | 1441 | from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured
from django.db import connection
from django.db.models.loading import get_apps, get_app, get_models, get_model, register_models
from django.db.models.query import Q
from django.db.models.expressions import F
from django.db.models.manager import Manager
from django.db.models.base import Model
from django.db.models.aggregates import *
from django.db.models.fields import *
from django.db.models.fields.subclassing import SubfieldBase
from django.db.models.fields.files import FileField, ImageField
from django.db.models.fields.related import ForeignKey, OneToOneField, ManyToManyField, ManyToOneRel, ManyToManyRel, OneToOneRel
from django.db.models.deletion import CASCADE, PROTECT, SET, SET_NULL, SET_DEFAULT, DO_NOTHING, ProtectedError
from django.db.models import signals
# Admin stages.
ADD, CHANGE, BOTH = 1, 2, 3
def permalink(func):
"""
Decorator that calls urlresolvers.reverse() to return a URL using
parameters returned by the decorated function "func".
"func" should be a function that returns a tuple in one of the
following formats:
(viewname, viewargs)
(viewname, viewargs, viewkwargs)
"""
from django.core.urlresolvers import reverse
def inner(*args, **kwargs):
bits = func(*args, **kwargs)
return reverse(bits[0], None, *bits[1:3])
return inner
| bsd-3-clause | 3,226,490,769,852,274,700 | 41.382353 | 128 | 0.762665 | false |
RyanBeatty/Steer-Clear-Backend | steerclear/login/views.py | 3 | 5706 | from flask import (
Blueprint,
render_template,
url_for,
redirect,
current_app,
session,
request
)
from flask.ext.login import (
login_user,
logout_user,
login_required,
current_user
)
from flask.ext.principal import (
Principal,
Identity,
AnonymousIdentity,
identity_changed,
identity_loaded,
Permission,
RoleNeed,
UserNeed
)
from flask_restful import abort
from sqlalchemy import exc
from steerclear import login_manager, app
from steerclear.utils import cas
from steerclear.utils.permissions import (
admin_permission,
student_permission,
AccessRideNeed
)
from forms import *
from models import *
# setup login blueprint
login_bp = Blueprint('login', __name__)
# setup flask-principal
principal = Principal()
principal.init_app(app)
"""
create_roles
------------
Function called before app processes first request.
Creates the admin and student Roles if they do
not already exist
"""
@app.before_first_request
def create_roles():
# create student Role
if Role.query.filter_by(name='student').first() is None:
role = Role(name='student', description='Student Role')
db.session.add(role)
db.session.commit()
# create admin Role
if Role.query.filter_by(name='admin').first() is None:
role = Role(name='admin', description='Admin Role')
db.session.add(role)
db.session.commit()
"""
user_loader
-----------
Returns a user given the (unicode) user_id.
this needs to be implemented for flask-login extension to work
"""
@login_manager.user_loader
def user_loader(user_id):
return User.query.get(int(user_id))
"""
identity_loaded
---------------
Signal used by flask-principal. called when
loading the user Identity for the request.
"""
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
# Set the identity user object
identity.user = current_user
# Add the UserNeed to the identity
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
# Assuming the User model has a list of roles, update the
# identity with the roles that the user provides
if hasattr(current_user, 'roles'):
for role in current_user.roles:
identity.provides.add(RoleNeed(role.name))
# Assuming the User model has a list of rides the user
# has requested, add the needs to the identity
if hasattr(current_user, 'rides'):
for ride in current_user.rides:
identity.provides.add(AccessRideNeed(unicode(ride.id)))
"""
login
-----
main endpoint for logging users in and out
GET - returns the login page
POST - logs user in if valid username and password
and redirects to index page else returns the login template
:TODO: factor in password hashing + salt. add
more helpful error messages
"""
@login_bp.route('/login', methods=['GET', 'POST'])
def login():
# GET request. return login page
if request.method == 'GET':
return render_template('login.html', action=url_for('.login'))
# POST request. attempt to login
# must validate LoginForm and CAS server
form = LoginForm()
if form.validate_on_submit() and cas.validate_user(form.username.data, form.password.data):
# get User object if exists
user = User.query.filter_by(username=form.username.data).first()
if user:
# login user
login_user(user)
# Tell Flask-Principal the identity changed
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.id))
return redirect(url_for('driver_portal.index'))
return render_template('login.html', action=url_for('.login')), 400
"""
logout
------
Logs out the user. User must already be logged in, else
return 401. Once user is logged out, redirect to login page
"""
@login_bp.route('/logout', methods=['GET'])
@login_required
def logout():
logout_user()
# Remove session keys set by Flask-Principal
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
# Tell Flask-Principal the user is anonymous
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
return redirect(url_for('.login'))
"""
register
--------
Main endpoint for registering new users in the system
POST - takes a username/password form and creates a new user.
If a user already exists with the same username, flash an error message
and return the register screen again. On success, redirect user to login page
"""
@login_bp.route('/register', methods=['POST'])
def register():
# POST request. attempt to validate RegisterForm and user with CAS server
form = RegisterForm()
if form.validate_on_submit() and cas.validate_user(form.username.data, form.password.data):
# Find StudentRole. SHOULD EXIST ON STARTUP. IF NOT, THEN SERVER ERROR
student_role = Role.query.filter_by(name='student').first()
if student_role is None:
abort(500)
# attempt to create a new User in the db
new_user = User(
username=form.username.data,
phone=form.phone.data,
roles=[student_role]
)
try:
db.session.add(new_user)
db.session.commit()
except exc.IntegrityError:
# user already exists
return render_template('login.html', action=url_for('.register')), 409
return redirect(url_for('.login'))
return render_template('login.html', action=url_for('.register')), 400
| mit | 7,406,559,695,176,259,000 | 29.031579 | 95 | 0.657729 | false |
atvKumar/open-tamil | tamil/txt2ipa/unicode2ipa.py | 4 | 1851 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
# #
# (C) 2014 Arulalan.T <[email protected]> #
# Date : 02.08.2014 #
# #
# This file is part of open-tamil/txt2ipa #
# #
# txt2ipa is free software: you can redistribute it and/or #
# modify it under the terms of the GNU General Public License as published by#
# the Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. This program is distributed in the hope that it #
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty#
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General#
# Public License for more details. You should have received a copy of the GNU#
# General Public License along with this program. If not, see #
# <http://www.gnu.org/licenses/>. #
# #
##############################################################################
from .transliteration import tam2lat
from .ipaconvert import ipa
from .ipaconvert import broad as broad_ipa
def txt2ipa(text, broad=True):
lat = tam2lat(text)
lat = " " + lat + " "
ipa_text = ipa(lat)
# make memory free
del lat
if broad: ipa_text = broad_ipa(ipa_text)
return ipa_text
# end of def txt2ipa(text, broad=True):
| mit | -3,898,577,033,305,444,400 | 46.461538 | 81 | 0.440303 | false |
rohitw1991/smarttailorfrappe | frappe/api.py | 20 | 3228 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import json
import frappe
import frappe.handler
import frappe.client
import frappe.widgets.reportview
from frappe.utils.response import build_response
from frappe import _
def handle():
"""
/api/method/{methodname} will call a whitelisted method
/api/resource/{doctype} will query a table
examples:
?fields=["name", "owner"]
?filters=[["Task", "name", "like", "%005"]]
?limit_start=0
?limit_page_length=20
/api/resource/{doctype}/{name} will point to a resource
GET will return doclist
POST will insert
PUT will update
DELETE will delete
/api/resource/{doctype}/{name}?run_method={method} will run a whitelisted controller method
"""
parts = frappe.request.path[1:].split("/",3)
call = doctype = name = None
if len(parts) > 1:
call = parts[1]
if len(parts) > 2:
doctype = parts[2]
if len(parts) > 3:
name = parts[3]
if call=="method":
frappe.local.form_dict.cmd = doctype
return frappe.handler.handle()
elif call=="resource":
if "run_method" in frappe.local.form_dict:
method = frappe.local.form_dict.pop("run_method")
doc = frappe.get_doc(doctype, name)
doc.is_whitelisted(method)
if frappe.local.request.method=="GET":
if not doc.has_permission("read"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
doc.run_method(method, **frappe.local.form_dict)
if frappe.local.request.method=="POST":
if not doc.has_permission("write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
doc.run_method(method, **frappe.local.form_dict)
frappe.db.commit()
else:
if name:
if frappe.local.request.method=="GET":
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
frappe.local.response.update({"data": doc})
if frappe.local.request.method=="PUT":
data = json.loads(frappe.local.form_dict.data)
doc = frappe.get_doc(doctype, name)
# Not checking permissions here because it's checked in doc.save
doc.update(data)
frappe.local.response.update({
"data": doc.save().as_dict()
})
frappe.db.commit()
if frappe.local.request.method=="DELETE":
# Not checking permissions here because it's checked in delete_doc
frappe.delete_doc(doctype, name)
frappe.local.response.http_status_code = 202
frappe.local.response.message = "ok"
frappe.db.commit()
elif doctype:
if frappe.local.request.method=="GET":
if frappe.local.form_dict.get('fields'):
frappe.local.form_dict['fields'] = json.loads(frappe.local.form_dict['fields'])
frappe.local.response.update({
"data": frappe.call(frappe.widgets.reportview.execute,
doctype, **frappe.local.form_dict)})
if frappe.local.request.method=="POST":
data = json.loads(frappe.local.form_dict.data)
data.update({
"doctype": doctype
})
frappe.local.response.update({
"data": frappe.get_doc(data).insert().as_dict()
})
frappe.db.commit()
else:
raise frappe.DoesNotExistError
else:
raise frappe.DoesNotExistError
return build_response("json")
| mit | 3,654,877,232,835,149,300 | 28.081081 | 92 | 0.67596 | false |
orashi/PaintsPytorch | train_ft.py | 1 | 13323 | import argparse
import os
import random
import scipy.stats as stats
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from torch.autograd import grad
from data.finetune import CreateDataLoader
from models.standard import *
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', required=True, help='path to colored dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batchSize', type=int, default=16, help='input batch size')
parser.add_argument('--imageSize', type=int, default=512, help='the height / width of the input image to network')
parser.add_argument('--cut', type=int, default=1, help='cut backup frequency')
parser.add_argument('--niter', type=int, default=700, help='number of epochs to train for')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--lrG', type=float, default=0.0001, help='learning rate, default=0.0001')
parser.add_argument('--lrD', type=float, default=0.0001, help='learning rate, default=0.0001')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--optim', action='store_true', help='load optimizer\'s checkpoint')
parser.add_argument('--outf', default='', help='folder to output images and model checkpoints')
parser.add_argument('--optf', default='', help='folder to optimizer checkpoints')
parser.add_argument('--Diters', type=int, default=1, help='number of D iters per each G iter')
parser.add_argument('--manualSeed', type=int, default=2345, help='random seed to use. Default=1234')
parser.add_argument('--geni', type=int, default=0, help='continue gen image num')
parser.add_argument('--epoi', type=int, default=0, help='continue epoch num')
parser.add_argument('--env', type=str, default=None, help='tensorboard env')
parser.add_argument('--advW', type=float, default=0.0001, help='adversarial weight, default=0.0001')
parser.add_argument('--gpW', type=float, default=10, help='gradient penalty weight')
parser.add_argument('--drift', type=float, default=0.001, help='wasserstein drift weight')
opt = parser.parse_args()
print(opt)
####### regular set up
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
gen_iterations = opt.geni
try:
os.makedirs(opt.outf)
except OSError:
pass
# random seed setup # !!!!!
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
####### regular set up end
writer = SummaryWriter(log_dir=opt.env, comment='this is great')
dataloader = CreateDataLoader(opt)
netG = torch.nn.DataParallel(NetG(ngf=opt.ngf))
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netD = torch.nn.DataParallel(NetD(ndf=opt.ndf))
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
netF = torch.nn.DataParallel(NetF())
print(netF)
for param in netF.parameters():
param.requires_grad = False
netI = torch.nn.DataParallel(NetI())
print(netI)
criterion_MSE = nn.MSELoss()
one = torch.FloatTensor([1])
mone = one * -1
half_batch = opt.batchSize // 2
fixed_sketch = torch.FloatTensor()
fixed_hint = torch.FloatTensor()
fixed_sketch_feat = torch.FloatTensor()
if opt.cuda:
netD = netD.cuda()
netG = netG.cuda()
netF = netF.cuda()
netI = netI.cuda().eval()
fixed_sketch, fixed_hint, fixed_sketch_feat = fixed_sketch.cuda(), fixed_hint.cuda(), fixed_sketch_feat.cuda()
criterion_MSE = criterion_MSE.cuda()
one, mone = one.cuda(), mone.cuda()
# setup optimizer
optimizerG = optim.Adam(netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.9))
optimizerD = optim.Adam(netD.parameters(), lr=opt.lrD, betas=(opt.beta1, 0.9))
if opt.optim:
optimizerG.load_state_dict(torch.load('%s/optimG_checkpoint.pth' % opt.optf))
optimizerD.load_state_dict(torch.load('%s/optimD_checkpoint.pth' % opt.optf))
for param_group in optimizerG.param_groups:
param_group['lr'] = opt.lrG
for param_group in optimizerD.param_groups:
param_group['lr'] = opt.lrD
def calc_gradient_penalty(netD, real_data, fake_data, sketch_feat):
alpha = torch.rand(opt.batchSize, 1, 1, 1)
alpha = alpha.cuda() if opt.cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if opt.cuda:
interpolates = interpolates.cuda()
interpolates = Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates, Variable(sketch_feat))
gradients = grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda() if opt.cuda else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
# TODO:test gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.gpW
return gradient_penalty
def mask_gen():
mask1 = torch.cat(
[torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(opt.batchSize // 2)],
0).cuda()
mask2 = torch.cat([torch.zeros(1, 1, maskS, maskS).float() for _ in range(opt.batchSize // 2)],
0).cuda()
mask = torch.cat([mask1, mask2], 0)
return mask
flag = 1
lower, upper = 0, 1
mu, sigma = 1, 0.005
maskS = opt.imageSize // 4
X = stats.truncnorm(
(lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
for p in netG.parameters():
p.requires_grad = False # to avoid computation
for epoch in range(opt.niter):
data_iter = iter(CreateDataLoader(opt))
i = 0
while i < len(dataloader) - 16 // opt.batchSize:
############################
# (1) Update D network
###########################
for p in netD.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
for p in netG.parameters():
p.requires_grad = False # to avoid computation ft_params
# train the discriminator Diters times
Diters = opt.Diters
j = 0
while j < Diters and i < len(dataloader): #- 16 // opt.batchSize:
j += 1
netD.zero_grad()
data = data_iter.next()
real_cim, real_vim, real_sim = data
i += 1
###############################
if opt.cuda:
real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()
mask = mask_gen()
hint = torch.cat((real_vim * mask, mask), 1)
# train with fake
with torch.no_grad():
feat_sim = netI(Variable(real_sim)).data
fake_cim = netG(Variable(real_sim),
Variable(hint),
Variable(feat_sim)
).data
errD_fake = netD(Variable(fake_cim), Variable(feat_sim))
errD_fake = errD_fake.mean(0).view(1)
errD_fake.backward(one, retain_graph=True) # backward on score on real
errD_real = netD(Variable(real_cim), Variable(feat_sim))
errD_real = errD_real.mean(0).view(1)
errD = errD_real - errD_fake
errD_realer = -1 * errD_real + errD_real.pow(2) * opt.drift
# additional penalty term to keep the scores from drifting too far from zero
errD_realer.backward(one, retain_graph=True) # backward on score on real
gradient_penalty = calc_gradient_penalty(netD, real_cim, fake_cim, feat_sim)
gradient_penalty.backward()
optimizerD.step()
############################
# (2) Update G network
############################
if i < len(dataloader) - 16 // opt.batchSize:
if 0:#:flag: # fix samples
data = zip(*[data_iter.next() for _ in range(16 // opt.batchSize)])
real_cim, real_vim, real_sim = [torch.cat(dat, 0) for dat in data]
i += 1
if opt.cuda:
real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()
mask1 = torch.cat(
[torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(8)],
0).cuda()
mask2 = torch.cat([torch.zeros(1, 1, maskS, maskS).float() for _ in range(8)],
0).cuda()
mask = torch.cat([mask1, mask2], 0)
hint = torch.cat((real_vim * mask, mask), 1)
with torch.no_grad():
feat_sim = netI(Variable(real_sim)).data
writer.add_image('target imgs', vutils.make_grid(real_cim.mul(0.5).add(0.5), nrow=4))
writer.add_image('sketch imgs', vutils.make_grid(real_sim.mul(0.5).add(0.5), nrow=4))
writer.add_image('hint', vutils.make_grid((real_vim * mask).mul(0.5).add(0.5), nrow=4))
vutils.save_image(real_cim.mul(0.5).add(0.5),
'%s/color_samples' % opt.outf + '.png')
vutils.save_image(real_sim.mul(0.5).add(0.5),
'%s/blur_samples' % opt.outf + '.png')
fixed_sketch.resize_as_(real_sim).copy_(real_sim)
fixed_hint.resize_as_(hint).copy_(hint)
fixed_sketch_feat.resize_as_(feat_sim).copy_(feat_sim)
flag -= 1
for p in netD.parameters():
p.requires_grad = False # to avoid computation
for p in netG.parameters():
p.requires_grad = True
netG.zero_grad()
data = data_iter.next()
real_cim, real_vim, real_sim = data
i += 1
if opt.cuda:
real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()
mask = mask_gen()
hint = torch.cat((real_vim * mask, mask), 1)
with torch.no_grad():
feat_sim = netI(Variable(real_sim)).data
fake = netG(Variable(real_sim),
Variable(hint),
Variable(feat_sim))
errd = netD(fake, Variable(feat_sim))
errG = errd.mean() * opt.advW
errG.backward(mone, retain_graph=True)
feat1 = netF(fake)
with torch.no_grad():
feat2 = netF(Variable(real_cim))
contentLoss = criterion_MSE(feat1, feat2)
contentLoss.backward()
optimizerG.step()
############################
# (3) Report & 100 Batch checkpoint
############################
writer.add_scalar('VGG MSE Loss', contentLoss.data[0], gen_iterations)
writer.add_scalar('wasserstein distance', errD.data[0], gen_iterations)
writer.add_scalar('errD_real', errD_real.data[0], gen_iterations)
writer.add_scalar('errD_fake', errD_fake.data[0], gen_iterations)
writer.add_scalar('Gnet loss toward real', errG.data[0], gen_iterations)
writer.add_scalar('gradient_penalty', gradient_penalty.data[0], gen_iterations)
print('[%d/%d][%d/%d][%d] errD: %f err_G: %f err_D_real: %f err_D_fake %f content loss %f'
% (epoch, opt.niter, i, len(dataloader), gen_iterations,
errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0], contentLoss.data[0]))
if gen_iterations % 500 == 666:
with torch.no_grad():
fake = netG(Variable(fixed_sketch),
Variable(fixed_hint),
Variable(fixed_sketch_feat))
writer.add_image('colored imgs', vutils.make_grid(fake.data.mul(0.5).add(0.5), nrow=4),
gen_iterations)
gen_iterations += 1
# do checkpointing
if opt.cut == 0:
torch.save(netG.state_dict(), '%s/netG_epoch_only.pth' % opt.outf)
torch.save(netD.state_dict(), '%s/netD_epoch_only.pth' % opt.outf)
torch.save(optimizerG.state_dict(), '%s/optimG_checkpoint.pth' % opt.outf)
torch.save(optimizerD.state_dict(), '%s/optimD_checkpoint.pth' % opt.outf)
elif gen_iterations % 100 == 0:#(epoch + opt.epoi) % opt.cut == 0:
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, gen_iterations))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, gen_iterations))
torch.save(optimizerG.state_dict(), '%s/optimG_checkpoint.pth' % opt.outf)
torch.save(optimizerD.state_dict(), '%s/optimD_checkpoint.pth' % opt.outf)
| mit | -3,202,663,539,232,369,700 | 40.504673 | 114 | 0.589507 | false |
eggmaster/tempest | tempest/api/compute/servers/test_servers_negative.py | 3 | 21167 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
import testtools
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class ServersNegativeTestJSON(base.BaseV2ComputeTest):
credentials = ['primary', 'alt']
def setUp(self):
super(ServersNegativeTestJSON, self).setUp()
try:
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
except Exception:
self.__class__.server_id = self.rebuild_server(self.server_id)
def tearDown(self):
self.server_check_teardown()
super(ServersNegativeTestJSON, self).tearDown()
@classmethod
def setup_clients(cls):
super(ServersNegativeTestJSON, cls).setup_clients()
cls.client = cls.servers_client
cls.alt_client = cls.os_alt.servers_client
@classmethod
def resource_setup(cls):
super(ServersNegativeTestJSON, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
@test.attr(type=['negative'])
@test.idempotent_id('dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf')
def test_server_name_blank(self):
# Create a server with name parameter empty
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
name='')
@test.attr(type=['negative'])
@test.idempotent_id('b8a7235e-5246-4a8f-a08e-b34877c6586f')
def test_personality_file_contents_not_encoded(self):
# Use an unencoded file when creating a server with personality
file_contents = 'This is a test file.'
person = [{'path': '/etc/testfile.txt',
'contents': file_contents}]
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
personality=person)
@test.attr(type=['negative'])
@test.idempotent_id('fcba1052-0a50-4cf3-b1ac-fae241edf02f')
def test_create_with_invalid_image(self):
# Create a server with an unknown image
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
image_id=-1)
@test.attr(type=['negative'])
@test.idempotent_id('18f5227f-d155-4429-807c-ccb103887537')
def test_create_with_invalid_flavor(self):
# Create a server with an unknown flavor
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
flavor=-1,)
@test.attr(type=['negative'])
@test.idempotent_id('7f70a4d1-608f-4794-9e56-cb182765972c')
def test_invalid_access_ip_v4_address(self):
# An access IPv4 address must match a valid address pattern
IPv4 = '1.1.1.1.1.1'
self.assertRaises(lib_exc.BadRequest,
self.create_test_server, accessIPv4=IPv4)
@test.attr(type=['negative'])
@test.idempotent_id('5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0')
def test_invalid_ip_v6_address(self):
# An access IPv6 address must match a valid address pattern
IPv6 = 'notvalid'
self.assertRaises(lib_exc.BadRequest,
self.create_test_server, accessIPv6=IPv6)
@test.idempotent_id('7ea45b3e-e770-46fa-bfcc-9daaf6d987c0')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative'])
def test_resize_nonexistent_server(self):
# Resize a non-existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.resize,
nonexistent_server, self.flavor_ref)
@test.idempotent_id('ced1a1d7-2ab6-45c9-b90f-b27d87b30efd')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative'])
def test_resize_server_with_non_existent_flavor(self):
# Resize a server with non-existent flavor
nonexistent_flavor = data_utils.rand_uuid()
self.assertRaises(lib_exc.BadRequest, self.client.resize,
self.server_id, flavor_ref=nonexistent_flavor)
@test.idempotent_id('45436a7d-a388-4a35-a9d8-3adc5d0d940b')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative'])
def test_resize_server_with_null_flavor(self):
# Resize a server with null flavor
self.assertRaises(lib_exc.BadRequest, self.client.resize,
self.server_id, flavor_ref="")
@test.attr(type=['negative'])
@test.idempotent_id('d4c023a0-9c55-4747-9dd5-413b820143c7')
def test_reboot_non_existent_server(self):
# Reboot a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.reboot,
nonexistent_server, 'SOFT')
@test.idempotent_id('d1417e7f-a509-41b5-a102-d5eed8613369')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative'])
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(lib_exc.Conflict,
self.client.pause_server,
self.server_id)
self.client.unpause_server(self.server_id)
@test.attr(type=['negative'])
@test.idempotent_id('98fa0458-1485-440f-873b-fe7f0d714930')
def test_rebuild_reboot_deleted_server(self):
# Rebuild and Reboot a deleted server
server = self.create_test_server()
self.client.delete_server(server['id'])
self.client.wait_for_server_termination(server['id'])
self.assertRaises(lib_exc.NotFound,
self.client.rebuild,
server['id'], self.image_ref_alt)
self.assertRaises(lib_exc.NotFound, self.client.reboot,
server['id'], 'SOFT')
@test.attr(type=['negative'])
@test.idempotent_id('d86141a7-906e-4731-b187-d64a2ea61422')
def test_rebuild_non_existent_server(self):
# Rebuild a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.rebuild,
nonexistent_server,
self.image_ref_alt)
@test.attr(type=['negative'])
@test.idempotent_id('fd57f159-68d6-4c2a-902b-03070828a87e')
def test_create_numeric_server_name(self):
server_name = 12345
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
name=server_name)
@test.attr(type=['negative'])
@test.idempotent_id('c3e0fb12-07fc-4d76-a22e-37409887afe8')
def test_create_server_name_length_exceeds_256(self):
# Create a server with name length exceeding 256 characters
server_name = 'a' * 256
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
name=server_name)
@test.attr(type=['negative'])
@test.idempotent_id('4e72dc2d-44c5-4336-9667-f7972e95c402')
def test_create_with_invalid_network_uuid(self):
# Pass invalid network uuid while creating a server
networks = [{'fixed_ip': '10.0.1.1', 'uuid': 'a-b-c-d-e-f-g-h-i-j'}]
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
networks=networks)
@test.attr(type=['negative'])
@test.idempotent_id('7a2efc39-530c-47de-b875-2dd01c8d39bd')
def test_create_with_non_existent_keypair(self):
# Pass a non-existent keypair while creating a server
key_name = data_utils.rand_name('key')
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
key_name=key_name)
@test.attr(type=['negative'])
@test.idempotent_id('7fc74810-0bd2-4cd7-8244-4f33a9db865a')
def test_create_server_metadata_exceeds_length_limit(self):
# Pass really long metadata while creating a server
metadata = {'a': 'b' * 260}
self.assertRaises((lib_exc.BadRequest, lib_exc.OverLimit),
self.create_test_server,
meta=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('aa8eed43-e2cb-4ebf-930b-da14f6a21d81')
def test_update_name_of_non_existent_server(self):
# Update name of a non-existent server
server_name = data_utils.rand_name('server')
new_name = data_utils.rand_name('server') + '_updated'
self.assertRaises(lib_exc.NotFound, self.client.update_server,
server_name, name=new_name)
@test.attr(type=['negative'])
@test.idempotent_id('38204696-17c6-44da-9590-40f87fb5a899')
def test_update_server_set_empty_name(self):
# Update name of the server to an empty string
server_name = data_utils.rand_name('server')
new_name = ''
self.assertRaises(lib_exc.BadRequest, self.client.update_server,
server_name, name=new_name)
@test.attr(type=['negative'])
@test.idempotent_id('543d84c1-dd2e-4c6d-8cb2-b9da0efaa384')
def test_update_server_of_another_tenant(self):
# Update name of a server that belongs to another tenant
new_name = self.server_id + '_new'
self.assertRaises(lib_exc.NotFound,
self.alt_client.update_server, self.server_id,
name=new_name)
@test.attr(type=['negative'])
@test.idempotent_id('5c8e244c-dada-4590-9944-749c455b431f')
def test_update_server_name_length_exceeds_256(self):
# Update name of server exceed the name length limit
new_name = 'a' * 256
self.assertRaises(lib_exc.BadRequest,
self.client.update_server,
self.server_id,
name=new_name)
@test.attr(type=['negative'])
@test.idempotent_id('1041b4e6-514b-4855-96a5-e974b60870a3')
def test_delete_non_existent_server(self):
# Delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.delete_server,
nonexistent_server)
@test.attr(type=['negative'])
@test.idempotent_id('5c75009d-3eea-423e-bea3-61b09fd25f9c')
def test_delete_a_server_of_another_tenant(self):
# Delete a server that belongs to another tenant
self.assertRaises(lib_exc.NotFound,
self.alt_client.delete_server,
self.server_id)
@test.attr(type=['negative'])
@test.idempotent_id('75f79124-277c-45e6-a373-a1d6803f4cc4')
def test_delete_server_pass_negative_id(self):
# Pass an invalid string parameter to delete server
self.assertRaises(lib_exc.NotFound, self.client.delete_server, -1)
@test.attr(type=['negative'])
@test.idempotent_id('f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5')
def test_delete_server_pass_id_exceeding_length_limit(self):
# Pass a server ID that exceeds length limit to delete server
self.assertRaises(lib_exc.NotFound, self.client.delete_server,
sys.maxint + 1)
@test.attr(type=['negative'])
@test.idempotent_id('c5fa6041-80cd-483b-aa6d-4e45f19d093c')
def test_create_with_nonexistent_security_group(self):
# Create a server with a nonexistent security group
security_groups = [{'name': 'does_not_exist'}]
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
security_groups=security_groups)
@test.attr(type=['negative'])
@test.idempotent_id('3436b02f-1b1e-4f03-881e-c6a602327439')
def test_get_non_existent_server(self):
# Get a non existent server details
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.get_server,
nonexistent_server)
@test.attr(type=['negative'])
@test.idempotent_id('a31460a9-49e1-42aa-82ee-06e0bb7c2d03')
def test_stop_non_existent_server(self):
# Stop a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.servers_client.stop,
nonexistent_server)
@test.idempotent_id('6a8dc0c6-6cd4-4c0a-9f32-413881828091')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative'])
def test_pause_non_existent_server(self):
# pause a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.pause_server,
nonexistent_server)
@test.idempotent_id('705b8e3a-e8a7-477c-a19b-6868fc24ac75')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative'])
def test_unpause_non_existent_server(self):
# unpause a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.unpause_server,
nonexistent_server)
@test.idempotent_id('c8e639a7-ece8-42dd-a2e0-49615917ba4f')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative'])
def test_unpause_server_invalid_state(self):
# unpause an active server.
self.assertRaises(lib_exc.Conflict,
self.client.unpause_server,
self.server_id)
@test.idempotent_id('d1f032d5-7b6e-48aa-b252-d5f16dd994ca')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative'])
def test_suspend_non_existent_server(self):
# suspend a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.suspend_server,
nonexistent_server)
@test.idempotent_id('7f323206-05a9-4bf8-996b-dd5b2036501b')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative'])
def test_suspend_server_invalid_state(self):
# suspend a suspended server.
self.client.suspend_server(self.server_id)
self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
self.assertRaises(lib_exc.Conflict,
self.client.suspend_server,
self.server_id)
self.client.resume_server(self.server_id)
@test.idempotent_id('221cd282-bddb-4837-a683-89c2487389b6')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative'])
def test_resume_non_existent_server(self):
# resume a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.resume_server,
nonexistent_server)
@test.idempotent_id('ccb6294d-c4c9-498f-8a43-554c098bfadb')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative'])
def test_resume_server_invalid_state(self):
# resume an active server.
self.assertRaises(lib_exc.Conflict,
self.client.resume_server,
self.server_id)
@test.attr(type=['negative'])
@test.idempotent_id('7dd919e7-413f-4198-bebb-35e2a01b13e9')
def test_get_console_output_of_non_existent_server(self):
# get the console output for a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.get_console_output,
nonexistent_server, 10)
@test.attr(type=['negative'])
@test.idempotent_id('6f47992b-5144-4250-9f8b-f00aa33950f3')
def test_force_delete_nonexistent_server_id(self):
# force-delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.force_delete_server,
nonexistent_server)
@test.attr(type=['negative'])
@test.idempotent_id('9c6d38cc-fcfb-437a-85b9-7b788af8bf01')
def test_restore_nonexistent_server_id(self):
# restore-delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.restore_soft_deleted_server,
nonexistent_server)
@test.attr(type=['negative'])
@test.idempotent_id('7fcadfab-bd6a-4753-8db7-4a51e51aade9')
def test_restore_server_invalid_state(self):
# we can only restore-delete a server in 'soft-delete' state
self.assertRaises(lib_exc.Conflict,
self.client.restore_soft_deleted_server,
self.server_id)
@test.idempotent_id('abca56e2-a892-48ea-b5e5-e07e69774816')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative'])
def test_shelve_non_existent_server(self):
# shelve a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.shelve_server,
nonexistent_server)
@test.idempotent_id('443e4f9b-e6bf-4389-b601-3a710f15fddd')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative'])
def test_shelve_shelved_server(self):
# shelve a shelved server.
self.client.shelve_server(self.server_id)
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
self.client.wait_for_server_status(self.server_id,
'SHELVED_OFFLOADED',
extra_timeout=offload_time)
else:
self.client.wait_for_server_status(self.server_id,
'SHELVED')
server = self.client.get_server(self.server_id)
image_name = server['name'] + '-shelved'
params = {'name': image_name}
images = self.images_client.list_images(params)
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
self.assertRaises(lib_exc.Conflict,
self.client.shelve_server,
self.server_id)
self.client.unshelve_server(self.server_id)
@test.idempotent_id('23d23b37-afaf-40d7-aa5d-5726f82d8821')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative'])
def test_unshelve_non_existent_server(self):
# unshelve a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.unshelve_server,
nonexistent_server)
@test.idempotent_id('8f198ded-1cca-4228-9e65-c6b449c54880')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative'])
def test_unshelve_server_invalid_state(self):
# unshelve an active server.
self.assertRaises(lib_exc.Conflict,
self.client.unshelve_server,
self.server_id)
| apache-2.0 | 8,747,659,404,350,897,000 | 40.832016 | 78 | 0.617376 | false |
HewlettPackard/python-hpOneView | tests/unit/image_streamer/resources/test_deployment_groups.py | 2 | 2767 | # -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from unittest import TestCase
import mock
from hpOneView.connection import connection
from hpOneView.image_streamer.resources.deployment_groups import DeploymentGroups
from hpOneView.resources.resource import ResourceClient
class DeploymentGroupsTest(TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._client = DeploymentGroups(self.connection)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once(self, mock_get_all):
filter = 'name=TestName'
sort = 'name:ascending'
self._client.get_all(2, 500, filter, sort)
mock_get_all.assert_called_once_with(2, 500, filter=filter, sort=sort)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once_with_default_values(self, mock_get_all):
self._client.get_all()
mock_get_all.assert_called_once_with(0, -1, filter='', sort='')
@mock.patch.object(ResourceClient, 'get')
def test_get_called_once(self, mock_get):
self._client.get('57f2d803-9c11-4f9a-bc02-71804a0fcc3e')
mock_get.assert_called_once_with('57f2d803-9c11-4f9a-bc02-71804a0fcc3e')
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_called_once(self, mock_get_by):
self._client.get_by('name', 'OSS')
mock_get_by.assert_called_once_with('name', 'OSS')
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_name_called_once(self, mock_get_by):
self._client.get_by_name('OSS')
mock_get_by.assert_called_once_with('name', 'OSS')
| mit | -5,970,541,325,618,006,000 | 38.528571 | 81 | 0.716661 | false |
andmos/ansible | lib/ansible/modules/network/f5/bigip_iapp_template.py | 9 | 18993 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_iapp_template
short_description: Manages TCL iApp templates on a BIG-IP
description:
- Manages TCL iApp templates on a BIG-IP. This module will allow you to
deploy iApp templates to the BIG-IP and manage their lifecycle. The
conventional way to use this module is to import new iApps as needed
or by extracting the contents of the iApp archive that is provided at
downloads.f5.com and then importing all the iApps with this module.
This module can also update existing iApps provided that the source
of the iApp changed while the name stayed the same. Note however that
this module will not reconfigure any services that may have been
created using the C(bigip_iapp_service) module. iApps are normally
not updated in production. Instead, new versions are deployed and then
existing services are changed to consume that new template. As such,
the ability to update templates in-place requires the C(force) option
to be used.
version_added: 2.4
options:
force:
description:
- Specifies whether or not to force the uploading of an iApp. When
C(yes), will force update the iApp even if there are iApp services
using it. This will not update the running service though. Use
C(bigip_iapp_service) to do that. When C(no), will update the iApp
only if there are no iApp services using the template.
type: bool
name:
description:
- The name of the iApp template that you want to delete. This option
is only available when specifying a C(state) of C(absent) and is
provided as a way to delete templates that you may no longer have
the source of.
content:
description:
- Sets the contents of an iApp template directly to the specified
value. This is for simple values, but can be used with lookup
plugins for anything complex or with formatting. C(content) must
be provided when creating new templates.
state:
description:
- Whether the iApp template should exist or not.
default: present
choices:
- present
- absent
partition:
description:
- Device partition to manage resources on.
default: Common
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Add the iApp contained in template iapp.tmpl
bigip_iapp_template:
content: "{{ lookup('template', 'iapp.tmpl') }}"
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Update a template in place
bigip_iapp_template:
content: "{{ lookup('template', 'iapp-new.tmpl') }}"
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Update a template in place that has existing services created from it.
bigip_iapp_template:
content: "{{ lookup('template', 'iapp-new.tmpl') }}"
force: yes
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import re
import uuid
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.icontrol import upload_file
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.icontrol import upload_file
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Parameters(AnsibleF5Parameters):
api_attributes = []
returnables = []
@property
def name(self):
if self._values['name']:
return self._values['name']
if self._values['content']:
name = self._get_template_name()
return name
return None
@property
def content(self):
if self._values['content'] is None:
return None
result = self._squash_template_name_prefix()
result = self._replace_template_name(result)
return result
@property
def checksum(self):
return self._values['tmplChecksum']
def _squash_template_name_prefix(self):
"""Removes the template name prefix
This method removes that partition from the name
in the iApp so that comparisons can be done properly and entries
can be created properly when using REST.
:return string
"""
pattern = r'sys\s+application\s+template\s+/Common/'
replace = 'sys application template '
return re.sub(pattern, replace, self._values['content'])
def _replace_template_name(self, template):
"""Replaces template name at runtime
To allow us to do the switch-a-roo with temporary templates and
checksum comparisons, we need to take the template provided to us
and change its name to a temporary value so that BIG-IP will create
a clone for us.
:return string
"""
pattern = r'sys\s+application\s+template\s+[^ ]+'
if self._values['name']:
name = self._values['name']
else:
name = self._get_template_name()
replace = 'sys application template {0}'.format(fq_name(self.partition, name))
return re.sub(pattern, replace, template)
def _get_template_name(self):
pattern = r'sys\s+application\s+template\s+(?P<path>\/[^\{}"\'*?|#]+\/)?(?P<name>[^\{}"\'*?|#]+)'
matches = re.search(pattern, self._values['content'])
try:
result = matches.group('name').strip()
except IndexError:
result = None
if result:
return result
raise F5ModuleError(
"No template name was found in the template"
)
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def create(self):
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the iApp template")
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iApp template")
return True
def update(self):
self.have = self.read_current_from_device()
if not self.templates_differ():
return False
if not self.want.force and self.template_in_use():
return False
if self.module.check_mode:
return True
self._remove_iapp_checksum()
# The same process used for creating (load) can be used for updating
self.create_on_device()
self._generate_template_checksum_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/application/template/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def template_in_use(self):
uri = "https://{0}:{1}/mgmt/tm/sys/application/service/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
name = fq_name(self.want.partition, self.want.name)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
for item in response['items']:
if item['template'] == name:
return True
return False
def read_current_from_device(self):
self._generate_template_checksum_on_device()
uri = "https://{0}:{1}/mgmt/tm/sys/application/template/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def _remove_iapp_checksum(self):
"""Removes the iApp tmplChecksum
This is required for updating in place or else the load command will
fail with a "AppTemplate ... content does not match the checksum"
error.
:return:
"""
uri = "https://{0}:{1}/mgmt/tm/sys/application/template/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
params = dict(tmplChecksum=None)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def templates_differ(self):
# BIG-IP can generate checksums of iApps, but the iApp needs to be
# on the box to do this. Additionally, the checksum is MD5, but it
# is not an MD5 of the entire content of the template. Instead, it
# is a hash of some portion of the template that is unknown to me.
#
# The code below is responsible for uploading the provided template
# under a unique name and creating a checksum for it so that that
# checksum can be compared to the one of the existing template.
#
# Using this method we can compare the checksums of the existing
# iApp and the iApp that the user is providing to the module.
backup = self.want.name
# Override whatever name may have been provided so that we can
# temporarily create a new template to test checksums with
self.want.update({
'name': 'ansible-{0}'.format(str(uuid.uuid4()))
})
# Create and remove temporary template
temp = self._get_temporary_template()
# Set the template name back to what it was originally so that
# any future operations only happen on the real template.
self.want.update({
'name': backup
})
if temp.checksum != self.have.checksum:
return True
return False
def _get_temporary_template(self):
self.create_on_device()
temp = self.read_current_from_device()
self.remove_from_device()
return temp
def _generate_template_checksum_on_device(self):
command = 'tmsh generate sys application template {0} checksum'.format(
self.want.name
)
params = dict(
command="run",
utilCmdArgs='-c "{0}"'.format(command)
)
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def upload_file_to_device(self, content, name):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, content, name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def create_on_device(self):
remote_path = "/var/config/rest/downloads/{0}".format(self.want.name)
load_command = 'tmsh load sys application template {0}'.format(remote_path)
template = StringIO(self.want.content)
self.upload_file_to_device(template, self.want.name)
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port']
)
params = dict(
command="run",
utilCmdArgs='-c "{0}"'.format(load_command)
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
if 'commandResult' in response:
if 'Syntax Error' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
if 'ERROR' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/application/template/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
force=dict(
type='bool'
),
content=dict(),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,706,995,137,844,186,000 | 31.916811 | 105 | 0.612384 | false |
LUTAN/tensorflow | tensorflow/python/ops/image_ops_test.py | 10 | 100947 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import math
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.test_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.test_session(use_gpu=True):
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = rgb.eval()
self.assertAllClose(rgb_tf, rgb_np)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in xrange(images.shape[0]):
for y in xrange(images.shape[1]):
for x in xrange(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testShapeInference(self):
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.test_session(use_gpu=True):
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gray.get_shape().as_list())
with self.test_session(use_gpu=True):
gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
rgb = image_ops.grayscale_to_rgb(gray_tf)
self.assertEqual(rgb_shape, rgb.get_shape().as_list())
# Shape inference does not break for unknown shapes
with self.test_session(use_gpu=True):
rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
self.assertFalse(gray_unknown.get_shape())
with self.test_session(use_gpu=True):
gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
self.assertFalse(rgb_unknown.get_shape())
class AdjustGamma(test_util.TensorFlowTestCase):
def test_adjust_gamma_one(self):
"""Same image should be returned for gamma equal to one"""
with self.test_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=1)
y_tf = y.eval()
y_np = x_np
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_zero(self):
"""White image should be returned for gamma equal to zero"""
with self.test_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=0)
y_tf = y.eval()
dtype = x.dtype.as_numpy_dtype
y_np = np.array([dtypes.dtype_range[dtype][1]] * x_np.size)
y_np = y_np.reshape((8, 8))
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_less_one(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half"""
with self.test_session():
x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)
y = image_ops.adjust_gamma(x_np, gamma=0.5)
y_tf = np.trunc(y.eval())
y_np = np.array(
[[0, 31, 45, 55, 63, 71, 78, 84],
[90, 95, 100, 105, 110, 115, 119, 123],
[127, 131, 135, 139, 142, 146, 149, 153],
[156, 159, 162, 165, 168, 171, 174, 177],
[180, 183, 186, 188, 191, 194, 196, 199],
[201, 204, 206, 209, 211, 214, 216, 218],
[221, 223, 225, 228, 230, 232, 234, 236],
[238, 241, 243, 245, 247, 249, 251, 253]],
dtype=np.float32)
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_greater_one(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to two"""
with self.test_session():
x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)
y = image_ops.adjust_gamma(x_np, gamma=2)
y_tf = np.trunc(y.eval())
y_np = np.array(
[[0, 0, 0, 0, 1, 1, 2, 3],
[4, 5, 6, 7, 9, 10, 12, 14],
[16, 18, 20, 22, 25, 27, 30, 33],
[36, 39, 42, 45, 49, 52, 56, 60],
[64, 68, 72, 76, 81, 85, 90, 95],
[100, 105, 110, 116, 121, 127, 132, 138],
[144, 150, 156, 163, 169, 176, 182, 189],
[196, 203, 211, 218, 225, 233, 241, 249]],
dtype=np.float32)
self.assertAllClose(y_tf, y_np, 1e-6)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_hue(x, delta_h)
y_tf = y.eval()
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class AdjustHueBenchmark(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(
image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = "%s" % (cpu_count) if cpu_count is not None else "_all"
print("benchmarkAdjustHue_299_299_3_cpu%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustHue_299_299_3_cpu%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue("/cpu:0", 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue("/cpu:0", None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
class AdjustSaturationBenchmark(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(
image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for _ in xrange(warmup_rounds):
sess.run(run_op)
start = time.time()
for _ in xrange(benchmark_rounds):
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = "%s" % (cpu_count) if cpu_count is not None else "_all"
print("benchmarkAdjustSaturation_599_599_3_cpu%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustSaturation_599_599_3_cpu%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation("/cpu:0", 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation("/cpu:0", None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
class ResizeBilinearBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bilinear(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_bilinear_%s_%s_%s" %
(image_size[0], image_size[1], num_channels)))
print("%s : %.2f ms/img" % (results["name"], 1000 * results["wall_time"]
/ (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class ResizeBicubicBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bicubic(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
min_iters=20,
name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" % (results["name"], 1000 * results["wall_time"]
/ (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
def benchmarkSimilar4Channel(self):
self._benchmarkResize((183, 229), 4)
def benchmarkScaleUp4Channel(self):
self._benchmarkResize((141, 186), 4)
def benchmarkScaleDown4Channel(self):
self._benchmarkResize((749, 603), 4)
class ResizeAreaBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal([batch_size, image_size[0],
image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess, benchmark_op,
name=("resize_area_%s_%s_%s" %
(image_size[0], image_size[1], num_channels)))
print("%s : %.2f ms/img" % (
results["name"],
1000*results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def _adjust_saturation(self, image, saturation_factor):
image = ops.convert_to_tensor(image, name="image")
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
saturation_adjusted_image = gen_image_ops.adjust_saturation(
flt_image, saturation_factor)
return image_ops.convert_image_dtype(saturation_adjusted_image,
orig_dtype)
def testHalfSaturationFused(self):
x_shape = [2, 2, 3]
x_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_rgb_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_rgb_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_rgb_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturationFused(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.test_session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
y_fused = self._adjust_saturation(x_np, scale).eval()
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class FlipTransposeRotateTest(test_util.TensorFlowTestCase):
def testIdempotentLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf)
count_flipped = 0
count_unflipped = 0
for _ in range(50):
y_tf = y.eval()
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertGreaterEqual(count_flipped, 1)
self.assertGreaterEqual(count_unflipped, 1)
def testIdempotentUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf)
count_flipped = 0
count_unflipped = 0
for _ in range(50):
y_tf = y.eval()
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertGreaterEqual(count_flipped, 1)
self.assertGreaterEqual(count_unflipped, 1)
def testIdempotentTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(image_ops.transpose_image(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testPartialShapes(self):
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose_image, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertEqual(3, transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims = op(p_unknown_dims)
self.assertEqual(3, transformed_unknown_dims.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegexp(ValueError, "must be three-dimensional"):
op(p_wrong_rank)
with self.assertRaisesRegexp(ValueError, "must be > 0"):
op(p_zero_dim)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.test_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, rotated.eval())
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.test_session(use_gpu=True):
k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
y_tf = image_ops.rot90(image, k_placeholder)
for k in xrange(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
class RandomFlipTest(test_util.TensorFlowTestCase):
def testRandomLeftRight(self):
x_np = np.array([0, 1], dtype=np.uint8).reshape([1, 2, 1])
num_iterations = 500
hist = [0, 0]
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf)
for _ in xrange(num_iterations):
y_np = y.eval().flatten()[0]
hist[y_np] += 1
# Ensure that each entry is observed within 4 standard deviations.
four_stddev = 4.0 * np.sqrt(num_iterations / 2.0)
self.assertAllClose(hist, [num_iterations / 2.0] * 2, atol=four_stddev)
def testRandomUpDown(self):
x_np = np.array([0, 1], dtype=np.uint8).reshape([2, 1, 1])
num_iterations = 500
hist = [0, 0]
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf)
for _ in xrange(num_iterations):
y_np = y.eval().flatten()[0]
hist[y_np] += 1
# Ensure that each entry is observed within 4 standard deviations.
four_stddev = 4.0 * np.sqrt(num_iterations / 2.0)
self.assertAllClose(hist, [num_iterations / 2.0] * 2, atol=four_stddev)
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval()
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, 1e-6)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
class PerImageWhiteningTest(test_util.TensorFlowTestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
x2 = np.square(x).astype(np.float32)
mn = np.mean(x)
vr = np.mean(x2) - (mn * mn)
stddev = max(math.sqrt(vr), 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
def testBasic(self):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=np.int32).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.per_image_standardization(x)
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_standardization(im)
with self.test_session(use_gpu=True):
whiten_np = whiten.eval()
self.assertFalse(np.any(np.isnan(whiten_np)))
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must be at least three-dimensional.")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"assertion failed:",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (([-1, 0, 3, 3], "offset_height must be >= 0"),
([0, -1, 3, 3], "offset_width must be >= 0"),
([0, 0, 0, 3], "target_height must be > 0"),
([0, 0, 3, 0], "target_width must be > 0"),
([2, 0, 3, 3], "height must be >= target + offset"),
([0, 2, 3, 3], "width must be >= target + offset"))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
class CentralCropTest(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
self.assertEqual(y.op.name, x.op.name)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testShapeInference(self):
# Test no-op fraction=1.0
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
self._assertShapeInference(None, 1.0, None)
# TODO(toddw): Currently central_crop() doesn't infer the result shape even
# when it's possible. If we change it to do so, we can test as follows:
#
# self._assertShapeInference([50, 60, 3], 0.5, [25, 30, 3])
# self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
# self._assertShapeInference([50, None, 3], 0.5, [25, None, 3])
# self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
# self._assertShapeInference([50, 60, None], 0.5, [25, 30, None])
# self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# self._assertShapeInference(None, 0.5, None)
def testError(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.pad_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._PadToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
def testPadding(self):
x = [1, 2, 3,
4, 5, 6,
7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0,
1, 2, 3,
4, 5, 6,
7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3,
0, 4, 5, 6,
0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3,
4, 5, 6,
7, 8, 9,
0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0,
4, 5, 6, 0,
7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must be at least three-dimensional")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 2, 2),
([2, 0, 2], 2, 2),
([2, 2, 0], 2, 2))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The orignal error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behaviour
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = ((-1, 0, 4, 4, "offset_height must be >= 0"),
(0, -1, 4, 4, "offset_width must be >= 0"),
(2, 0, 4, 4, "height must be <= target - offset"),
(0, 2, 4, 4, "width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(x, x_shape, *config_item)
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
def _testSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered, aspect_ratio_range,
area_range):
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (np.array(
bounding_box, dtype=np.float32).reshape([1, 1, 4]))
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
num_iter = 1000
with self.test_session(use_gpu=True):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(
bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = y.eval()
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# Ensure that each entry is observed within 3 standard deviations.
# num_bins = 10
# aspect_ratio_hist, _ = np.histogram(aspect_ratios,
# bins=num_bins,
# range=aspect_ratio_range)
# mean = np.mean(aspect_ratio_hist)
# stddev = np.sqrt(mean)
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# TODO(irving): Since the rejection probability is not independent of the
# aspect ratio, the aspect_ratio random value is not exactly uniformly
# distributed in [min_aspect_ratio, max_aspect_ratio). This test should be
# fixed to reflect the true statistical property, then tightened to enforce
# a stricter bound. Or, ideally, the sample_distorted_bounding_box Op
# be fixed to not use rejection sampling and generate correctly uniform
# aspect ratios.
# self.assertAllClose(aspect_ratio_hist,
# [mean] * num_bins, atol=3.6 * stddev)
# The resulting crop will not be uniformly distributed in area. In practice,
# we find that the area skews towards the small sizes. Instead, we perform
# a weaker test to ensure that the area ratios are merely within the
# specified bounds.
self.assertLessEqual(max(area_ratios), area_range[1])
self.assertGreaterEqual(min(area_ratios), area_range[0])
# For reference, here is what the distribution of area ratios look like.
area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
print("area_ratio_hist ", area_ratio_hist)
# Ensure that fraction_object_covered is satisfied.
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
def testWholeImageBoundingBox(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
self._testSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testWithBoundingBox(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
# Create an object with 1's in a region with area A and require that
# the total pixel values >= 0.1 * A.
min_object_covered = 0.1
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
self._testSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_object_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShape(self):
with self.test_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[0.0, 0.0, 1.0, 1.0],
shape=[4],
dtype=dtypes.float32,)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
class ResizeImagesTest(test_util.TensorFlowTestCase):
OPTIONS = [image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC,
image_ops.ResizeMethod.AREA]
TYPES = [np.uint8, np.int8, np.int16, np.int32, np.int64,
np.float32, np.float64]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, opt, nptype):
if opt == image_ops.ResizeMethod.NEAREST_NEIGHBOR \
and nptype in [np.float32, np.float64]:
return True
else:
return False
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [127, 127, 64, 64,
127, 127, 64, 64,
64, 64, 127, 127,
64, 64, 127, 127,
50, 50, 100, 100,
50, 50, 100, 100]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for opt in self.OPTIONS:
if test.is_gpu_available() and self.shouldRunOnGPU(opt, nptype):
with self.test_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.test_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, [target_height, target_width],
self.OPTIONS[0])
yshape = array_ops.shape(y)
newshape = yshape.eval()
self.assertAllEqual(single_shape, newshape)
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [127, 127, 64, 64,
127, 127, 64, 64,
64, 64, 127, 127,
64, 64, 127, 127,
50, 50, 100, 100,
50, 50, 100, 100]
new_size = array_ops.placeholder(dtypes.int32, shape=(2))
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, new_size, opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.test_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, new_size, self.OPTIONS[0])
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(single_shape, newshape)
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, 4.0],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [None, 4],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, None],
image_ops.ResizeMethod.BILINEAR)
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [127, 127, 64, 64,
127, 127, 64, 64,
64, 64, 127, 127,
64, 64, 127, 127,
50, 50, 100, 100,
50, 50, 100, 100]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [height, width], opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [127, 127, 64, 64,
127, 127, 64, 64,
64, 64, 127, 127,
64, 64, 127, 127,
50, 50, 100, 100,
50, 50, 100, 100]
expected_data = [127, 64,
64, 127,
50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for opt in self.OPTIONS:
if test.is_gpu_available() and self.shouldRunOnGPU(opt, nptype):
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
opt)
expected = np.array(expected_data).reshape(target_shape)
resized = y.eval()
self.assertAllClose(resized, expected, atol=1e-5)
def testResizeUp(self):
img_shape = [1, 3, 2, 1]
data = [64, 32,
32, 64,
50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 48.0, 32.0, 32.0,
48.0, 48.0, 48.0, 48.0,
32.0, 48.0, 64.0, 64.0,
41.0, 61.5, 82.0, 82.0,
50.0, 75.0, 100.0, 100.0,
50.0, 75.0, 100.0, 100.0]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0,
64.0, 64.0, 32.0, 32.0,
32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0,
50.0, 50.0, 100.0, 100.0,
50.0, 50.0, 100.0, 100.0]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0,
64.0, 64.0, 32.0, 32.0,
32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0,
50.0, 50.0, 100.0, 100.0,
50.0, 50.0, 100.0, 100.0]
for nptype in self.TYPES:
for opt in [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.AREA]:
if test.is_gpu_available() and self.shouldRunOnGPU(opt, nptype):
with self.test_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], opt)
resized = y.eval()
expected = np.array(expected_data[opt]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [128, 128, 64, 64, 128, 128, 64, 64,
64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [128, 135, 96, 55, 64, 114, 134, 128,
78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84,
74, 70, 95, 122, 115, 69, 49, 55,
100, 105, 75, 43, 50, 89, 105, 100,
57, 54, 74, 96, 91, 65, 55, 58,
70, 69, 75, 81, 80, 72, 69, 70,
105, 112, 75, 36, 45, 92, 111, 105]
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethod.BICUBIC)
resized = y.eval()
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [128, 64, 32, 16, 8, 4,
4, 8, 16, 32, 64, 128,
128, 64, 32, 16, 8, 4,
5, 10, 15, 20, 25, 30,
30, 25, 20, 15, 10, 5,
5, 10, 15, 20, 25, 30]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [73, 33, 23, 39,
73, 33, 23, 39,
14, 16, 19, 21,
14, 16, 19, 21]
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = y.eval()
self.assertAllClose(resized, expected, atol=1)
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
gpu_val = out_op.eval()
with self.test_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
cpu_val = out_op.eval()
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.BILINEAR,
align_corners=align_corners)
value[use_gpu] = out_op.eval()
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_crop_or_pad(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_crop_or_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Pad even along col.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0,
0, 5, 6, 7, 8, 0]
y_shape = [2, 6, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along col.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0,
0, 5, 6, 7, 8, 0, 0]
y_shape = [2, 7, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0,
1, 2, 3, 4,
5, 6, 7, 8,
0, 0, 0, 0]
y_shape = [4, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along row.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0,
1, 2, 3, 4,
5, 6, 7, 8,
0, 0, 0, 0,
0, 0, 0, 0]
y_shape = [5, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCrop(self):
# Crop even along col.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [2, 3,
6, 7]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along col.
x = [1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12]
x_shape = [2, 6, 1]
y = [2, 3, 4,
8, 9, 10]
y_shape = [2, 3, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
x = [1, 2,
3, 4,
5, 6,
7, 8]
x_shape = [4, 2, 1]
y = [3, 4,
5, 6]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
x = [1, 2,
3, 4,
5, 6,
7, 8,
9, 10,
11, 12,
13, 14,
15, 16]
x_shape = [8, 2, 1]
y = [3, 4,
5, 6,
7, 8,
9, 10,
11, 12]
y_shape = [5, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCropAndPad(self):
# Pad along row but crop along col.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0,
2, 3,
6, 7,
0, 0]
y_shape = [4, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
x = [1, 2,
3, 4,
5, 6,
7, 8]
x_shape = [4, 2, 1]
y = [0, 3, 4, 0,
0, 5, 6, 0]
y_shape = [2, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
target_height, target_width = [4, 4]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
target_height, target_width = [1, 1]
x = []
for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The orignal error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behaviour
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# target_height <= 0
target_height, target_width = [0, 5]
self._assertRaises(x, x_shape, target_height, target_width,
"target_height must be > 0")
# target_width <= 0
target_height, target_width = [5, 0]
self._assertRaises(x, x_shape, target_height, target_width,
"target_width must be > 0")
def _SimpleColorRamp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.test_session(use_gpu=True) as sess:
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 1.4)
def testCmyk(self):
# Confirm that CMYK reads in as RGB
base = "tensorflow/core/lib/jpeg/testdata"
rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
shape = 256, 128, 3
for channels in 3, 0:
with self.test_session(use_gpu=True) as sess:
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
io_ops.read_file(cmyk_path), channels=channels)
rgb, cmyk = sess.run([rgb, cmyk])
self.assertEqual(rgb.shape, shape)
self.assertEqual(cmyk.shape, shape)
error = self.averageError(rgb, cmyk)
self.assertLess(error, 4)
def testSynthetic(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE")
jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST")
jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input, but
# note this is worse than the slower algorithm because it is
# less accurate.
self.assertLess(self.averageError(image0, image1), 0.95)
# Repeated compression / decompression will have a higher error
# with a lossier algorithm.
self.assertLess(self.averageError(image1, image2), 1.05)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.test_session(use_gpu=True) as sess:
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(jpeg0)
image1, image2 = sess.run([image1, image2])
# The images should be the same.
self.assertAllClose(image1, image2)
def testShape(self):
with self.test_session(use_gpu=True) as sess:
jpeg = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = "tensorflow/core/lib/png/testdata/"
inputs = (1, "lena_gray.png"), (4, "lena_rgba.png")
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.test_session(use_gpu=True) as sess:
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = sess.run([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, image1.eval())
def testSynthetic(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = sess.run([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = sess.run([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 800)
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.test_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = sess.run([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.test_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = sess.run([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testShape(self):
with self.test_session(use_gpu=True):
png = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class GifTest(test_util.TensorFlowTestCase):
def testValid(self):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
filename = "scan.gif"
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.test_session(use_gpu=True) as sess:
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = sess.run([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testInValid(self):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
filename = "optimized.gif"
with self.test_session(use_gpu=True) as sess:
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
with self.assertRaises(errors.InvalidArgumentError):
gif0, image0 = sess.run([gif0, image0])
def testShape(self):
with self.test_session(use_gpu=True) as sess:
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.test_session(use_gpu=True):
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y.eval(), y_np, atol=1e-5)
def testNoConvert(self):
# Make sure converting to the same data type creates only an identity op
with self.test_session(use_gpu=True):
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEquals(y.op.type, "Identity")
self.assertEquals(y.op.inputs[0], image)
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.test_session(use_gpu=True):
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.test_session(use_gpu=True):
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.test_session(use_gpu=True):
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
def testConvertBetweenInt16AndInt8(self):
with self.test_session(use_gpu=True):
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8,
[0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16,
[0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8,
[0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16,
[0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16,
[0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16,
[0, 255 * 256])
class TotalVariationTest(test_util.TensorFlowTestCase):
"""Tests the function total_variation() in image_ops.
We test a few small handmade examples, as well as
some larger examples using an equivalent numpy
implementation of the total_variation() function.
We do NOT test for overflows and invalid / edge-case arguments.
"""
def _test(self, x_np, y_np):
"""Test that the TensorFlow implementation of
total_variation(x_np) calculates the values in y_np.
Note that these may be float-numbers so we only test
for approximate equality within some narrow error-bound.
"""
# Create a TensorFlow session.
with self.test_session(use_gpu=True):
# Add a constant to the TensorFlow graph that holds the input.
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# Add ops for calculating the total variation using TensorFlow.
y = image_ops.total_variation(images=x_tf)
# Run the TensorFlow session to calculate the result.
y_tf = y.eval()
# Assert that the results are as expected within
# some small error-bound in case they are float-values.
self.assertAllClose(y_tf, y_np)
def _total_variation_np(self, x_np):
"""Calculate the total variation of x_np using numpy.
This implements the same function as TensorFlow but
using numpy instead.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
dim = len(x_np.shape)
if dim == 3:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
# Sum for all axis.
sum_axis = None
elif dim == 4:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
# Only sum for the last 3 axis.
sum_axis = (1, 2, 3)
else:
# This should not occur in this test-code.
pass
tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
np.sum(np.abs(dif2), axis=sum_axis)
return tot_var
def _test_tensorflow_vs_numpy(self, x_np):
"""Test the TensorFlow implementation against a numpy implementation.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
# Calculate the y-values using the numpy implementation.
y_np = self._total_variation_np(x_np)
self._test(x_np, y_np)
def _generateArray(self, shape):
"""Generate an array of the given shape for use in testing.
The numbers are calculated as the cumulative sum, which
causes the difference between neighboring numbers to vary."""
# Flattened length of the array.
flat_len = np.prod(shape)
a = np.array(range(flat_len), dtype=int)
a = np.cumsum(a)
a = a.reshape(shape)
return a
def testTotalVariationNumpy(self):
"""Test the TensorFlow implementation against a numpy implementation.
The two implementations are very similar so it is possible that both
have the same bug, which would not be detected by this test. It is
therefore necessary to test with manually crafted data as well."""
# Generate a test-array.
# This is an 'image' with 100x80 pixels and 3 color channels.
a = self._generateArray(shape=(100, 80, 3))
# Test the TensorFlow implementation vs. numpy implementation.
# We use a numpy implementation to check the results that are
# calculated using TensorFlow are correct.
self._test_tensorflow_vs_numpy(a)
self._test_tensorflow_vs_numpy(a + 1)
self._test_tensorflow_vs_numpy(-a)
self._test_tensorflow_vs_numpy(1.1 * a)
# Expand to a 4-dim array.
b = a[np.newaxis, :]
# Combine several variations of the image into a single 4-dim array.
multi = np.vstack((b, b + 1, -b, 1.1 * b))
# Test that the TensorFlow function can also handle 4-dim arrays.
self._test_tensorflow_vs_numpy(multi)
def testTotalVariationHandmade(self):
"""Test the total variation for a few handmade examples."""
# We create an image that is 2x2 pixels with 3 color channels.
# The image is very small so we can check the result by hand.
# Red color channel.
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (7-2) = 3 + 5 = 8
# sum col dif = (2-1) + (7-4) = 1 + 3 = 4
r = [[1, 2],
[4, 7]]
# Blue color channel.
# sum row dif = 18 + 29 = 47
# sum col dif = 7 + 18 = 25
g = [[11, 18],
[29, 47]]
# Green color channel.
# sum row dif = 120 + 193 = 313
# sum col dif = 47 + 120 = 167
b = [[73, 120],
[193, 313]]
# Combine the 3 color channels into a single 3-dim array.
# The shape is (2, 2, 3) corresponding to (height, width and color).
a = np.dstack((r, g, b))
# Total variation for this image.
# Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
tot_var = 564
# Calculate the total variation using TensorFlow and assert it is correct.
self._test(a, tot_var)
# If we add 1 to all pixel-values then the total variation is unchanged.
self._test(a + 1, tot_var)
# If we negate all pixel-values then the total variation is unchanged.
self._test(-a, tot_var)
# Scale the pixel-values by a float. This scales the total variation as well.
b = 1.1 * a
self._test(b, 1.1 * tot_var)
# Scale by another float.
c = 1.2 * a
self._test(c, 1.2 * tot_var)
# Combine these 3 images into a single array of shape (3, 2, 2, 3)
# where the first dimension is for the image-number.
multi = np.vstack((a[np.newaxis, :],
b[np.newaxis, :],
c[np.newaxis, :]))
# Check that TensorFlow correctly calculates the total variation
# for each image individually and returns the correct array.
self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
if __name__ == "__main__":
googletest.main()
| apache-2.0 | 6,134,911,788,405,626,000 | 35.561753 | 81 | 0.591578 | false |
deanmalmgren/textract | tests/test_exceptions.py | 1 | 2187 | import unittest
import os
import subprocess
import uuid
from . import base
class ExceptionTestCase(base.GenericUtilities, unittest.TestCase):
"""This class contains a bunch of tests to make sure that textract
fails in expected ways.
"""
def test_unsupported_extension_cli(self):
"""Make sure unsupported extension exits with non-zero status"""
filename = self.get_temp_filename(extension="extension")
command = "textract %(filename)s 2> /dev/null" % locals()
self.assertEqual(1, subprocess.call(command, shell=True))
os.remove(filename)
def test_unsupported_extension_python(self):
"""Make sure unsupported extension raises the correct error"""
filename = self.get_temp_filename(extension="extension")
import textract
from textract.exceptions import ExtensionNotSupported
with self.assertRaises(ExtensionNotSupported):
textract.process(filename)
os.remove(filename)
def test_missing_filename_cli(self):
"""Make sure missing files exits with non-zero status"""
filename = self.get_temp_filename()
os.remove(filename)
command = "textract %(filename)s 2> /dev/null" % locals()
self.assertEqual(1, subprocess.call(command, shell=True))
def test_missing_filename_python(self):
"""Make sure missing files raise the correct error"""
filename = self.get_temp_filename()
os.remove(filename)
import textract
from textract.exceptions import MissingFileError
with self.assertRaises(MissingFileError):
textract.process(filename)
def test_shell_parser_run(self):
"""get a useful error message when a dependency is missing"""
from textract.parsers import utils
from textract.parsers import exceptions
parser = utils.ShellParser()
try:
# There shouldn't be a command on the path matching a random uuid
parser.run([str(uuid.uuid4())])
except exceptions.ShellError as e:
self.assertTrue(e.is_not_installed())
else:
self.assertTrue(False, "Expected ShellError")
| mit | -4,080,433,351,844,123,600 | 37.368421 | 77 | 0.665752 | false |
django-inplaceedit/django-inplaceedit-bootstrap | testing/testing/settings.py | 1 | 8477 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 by Pablo Martín <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this programe. If not, see <http://www.gnu.org/licenses/>.
# Django settings for testing project.
from os import path
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
BASEDIR = path.dirname(path.abspath(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': path.join(BASEDIR, 'testing.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = path.join(BASEDIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/my_media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '98qi@6+%3nt__m_o6@o(n8%+!)yjxrl*fcs%l@2g=e-*4fu4h%'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
ALLOWED_HOSTS = [
'localhost',
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'testing.urls'
TEMPLATE_DIRS = (
path.join(BASEDIR, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'inplaceeditform_bootstrap',
'inplaceeditform',
'django.contrib.admin',
'testing.app',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# django-inplaceedit customization
#INPLACEEDIT_EDIT_EMPTY_VALUE = 'Double click to edit...'
INPLACEEDIT_AUTO_SAVE = True
INPLACEEDIT_EVENT = 'click'
#INPLACEEDIT_DISABLE_CLICK = False
#INPLACEEDIT_EDIT_MESSAGE_TRANSLATION = 'Write a translation...'
#INPLACEEDIT_SUCCESS_TEXT = 'Successfully saved...'
#INPLACEEDIT_UNSAVED_TEXT = 'You have unsaved changes!!!!'
#INPLACE_ENABLE_CLASS = 'enable'
#DEFAULT_INPLACE_EDIT_OPTIONS = {}
#DEFAULT_INPLACE_EDIT_OPTIONS_ONE_BY_ONE = False
#ADAPTOR_INPLACEEDIT_EDIT = 'inplace_edit.perms.AdminDjangoPermEditInline'
#ADAPTOR_INPLACEEDIT = {}
#INPLACE_GET_FIELD_URL = None
#INPLACE_SAVE_URL = None
#INPLACE_FIELD_TYPES = 'input, select, textarea'
#INPLACE_FOCUS_WHEN_EDITING = True
# django-inplaceedit-bootstrap customization
INPLACEEDIT_EDIT_TOOLTIP_TEXT = 'Click to edit' # By default 'Double click to edit'
# If inplaceeditform_extra_fields is installed
try:
import inplaceeditform_extra_fields
INSTALLED_APPS += ('inplaceeditform_extra_fields',)
ADAPTOR_INPLACEEDIT = {'image_thumb': 'inplaceeditform_extra_fields.fields.AdaptorImageThumbnailField',
'tiny': 'inplaceeditform_extra_fields.fields.AdaptorTinyMCEField',
'tiny_simple': 'inplaceeditform_extra_fields.fields.AdaptorSimpleTinyMCEField'}
try:
import sorl
INSTALLED_APPS += ('sorl.thumbnail',)
THUMBNAIL_DEBUG = DEBUG
except ImportError:
pass
except ImportError:
pass
# If bootstrap3_datetime is installed
try:
import bootstrap3_datetime
INSTALLED_APPS += ('bootstrap3_datetime',)
ADAPTOR_INPLACEEDIT = ADAPTOR_INPLACEEDIT or {}
ADAPTOR_INPLACEEDIT['date'] = 'inplaceeditform_bootstrap.fields.AdaptorDateBootStrapField'
ADAPTOR_INPLACEEDIT['datetime'] = 'inplaceeditform_bootstrap.fields.AdaptorDateTimeBootStrapField'
except ImportError:
pass
# Custom settings to the different django versions
import django
if django.VERSION[0] >= 1 and django.VERSION[1] >= 4:
TEMPLATE_CONTEXT_PROCESSORS += ('django.core.context_processors.tz',)
if django.VERSION[0] >= 1 and django.VERSION[1] >= 3:
INSTALLED_APPS += ('django.contrib.staticfiles',)
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = path.join(BASEDIR, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_CONTEXT_PROCESSORS += ('django.core.context_processors.static',)
if django.VERSION[0] >= 1 and django.VERSION[1] >= 2:
INSTALLED_APPS += ('django.contrib.messages',)
| lgpl-3.0 | 484,212,716,362,200,060 | 34.024793 | 123 | 0.694195 | false |
googleinterns/risr | risr-app/risr_proj/manage.py | 1 | 1298 | #!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Main function for the utility."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'risr_proj.settings')
try:
# pylint: disable=import-outside-toplevel
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| apache-2.0 | 7,190,333,930,604,592,000 | 33.157895 | 74 | 0.701849 | false |
ver228/tierpsy-tracker | tierpsy/analysis/contour_orient/correctVentralDorsal.py | 2 | 3779 | import json
import os
import numpy as np
import tables
import warnings
from tierpsy.analysis.ske_filt.getFilteredSkels import _h_calAreaSignedArray
from tierpsy.helper.params import read_ventral_side, single_db_ventral_side
VALID_CNT = ['clockwise', 'anticlockwise', 'unknown']
def _add_ventral_side(skeletons_file, ventral_side=''):
#I am giving priority to a contour stored in experiments_info, rather than one read by the json file.
#currently i am only using the experiments_info in the re-analysis of the old schafer database
try:
ventral_side_f = single_db_ventral_side(skeletons_file)
except (tables.exceptions.NoSuchNodeError, KeyError):
ventral_side_f = ''
if ventral_side_f in VALID_CNT:
if not ventral_side or (ventral_side == ventral_side_f):
ventral_side = ventral_side_f
else:
raise ValueError('The given contour orientation ({}) and the orientation stored in /experiments_info group ({}) differ. Change /experiments_info or the parameters file to solve this issue.'.format(ventral_side, ventral_side_f) )
#add ventral side if given
if ventral_side in VALID_CNT:
with tables.File(skeletons_file, 'r+') as fid:
fid.get_node('/trajectories_data').attrs['ventral_side'] = ventral_side
return ventral_side
def _switch_cnt(skeletons_file):
with tables.File(skeletons_file, 'r+') as fid:
# since here we are changing all the contours, let's just change
# the name of the datasets
side1 = fid.get_node('/contour_side1')
side2 = fid.get_node('/contour_side2')
side1.rename('contour_side1_bkp')
side2.rename('contour_side1')
side1.rename('contour_side2')
def isBadVentralOrient(skeletons_file, ventral_side=''):
print(ventral_side)
ventral_side = _add_ventral_side(skeletons_file, ventral_side)
if not ventral_side in VALID_CNT:
return True
elif ventral_side == 'unknown':
is_bad = False
elif ventral_side in ['clockwise', 'anticlockwise']:
with tables.File(skeletons_file, 'r') as fid:
has_skeletons = fid.get_node('/trajectories_data').col('has_skeleton')
# let's use the first valid skeleton, it seems like a waste to use all the other skeletons.
# I checked earlier to make sure the have the same orientation.
valid_ind = np.where(has_skeletons)[0]
if valid_ind.size == 0:
#no valid skeletons, nothing to do here.
is_bad = True
else:
cnt_side1 = fid.get_node('/contour_side1')[valid_ind[0], :, :]
cnt_side2 = fid.get_node('/contour_side2')[valid_ind[0], :, :]
A_sign = _h_calAreaSignedArray(cnt_side1, cnt_side2)
# if not (np.all(A_sign > 0) or np.all(A_sign < 0)):
# raise ValueError('There is a problem. All the contours should have the same orientation.')
if ventral_side == 'clockwise':
is_bad = A_sign[0] < 0
elif ventral_side == 'anticlockwise':
is_bad = A_sign[0] > 0
else:
raise ValueError
if is_bad:
_switch_cnt(skeletons_file)
is_bad = False
return is_bad
def ventral_orient_wrapper(func, skel_f, ventral_side, *args, **argkws):
if isBadVentralOrient(skel_f, ventral_side):
raise ValueError('Cannot continue the ventral side {} given is empty or incorrect'.format(ventral_side))
return func(*args, **argkws)
def isGoodVentralOrient(skeletons_file, ventral_side=''):
return not isBadVentralOrient(skeletons_file, ventral_side=ventral_side)
| mit | -2,725,099,786,554,675,700 | 38.364583 | 240 | 0.626885 | false |
alephu5/Soundbyte | statistics/cusum.py | 1 | 1395 | #! ../environment/bin/python3.3
#This test looks at the maximum excursion from 0 of the random walk
#defined by the cumulative sum of adjusted -1, +1 digits in the sequence.
#The cumulative sum may be considered as a random walk and for a random
#sequence this should be near 0.
from scipy.stats import norm
from numpy import sqrt, floor, max, sum
from math import erf
import ipdb
def max_psum(bits, mode):
"""Computes the maximum partial sum of the sequence. If the mode
is 0 the direction is forward and if it is 1 it is reversed."""
bits = bits.transform_bit(0,-1)
if mode ==1:
bits.reverse()
return max([abs(sum(bits[:i + 1])) for i in range(len(bits))])
def cusum(bits, mode=0):
n = len(bits)
z = max_psum(bits, mode)
pvalue = 1
if z == 0:
return -1
start = int(floor((-n / z + 1) * 0.25)) + 1
stop = int(floor((n / z - 1) * 0.25)) + 2
sum1 = 0
for k in range(start, stop):
sum1 += norm.cdf(((4 * k + 1) * z) / sqrt(n))
sum1 -= norm.cdf(((4 * k - 1) * z) / sqrt(n))
start = int(floor((-n / z - 3) * 0.25)) + 1
sum2 = 0
for k in range(start, stop):
sum2 += norm.cdf(((4 * k + 3) * z) / sqrt(n))
sum2 -= norm.cdf(((4 * k + 1) * z) / sqrt(n))
return 1 - sum1 + sum2
def israndom(pvalue):
if pvalue < 0.01:
return False
else:
return True
| gpl-3.0 | -8,565,042,846,497,327,000 | 30 | 73 | 0.576344 | false |
Elnatan/js-symbolic-executor | cvc3/java/run_tests.py | 5 | 31160 | #!/usr/bin/env python
import sys, os, re, math, time
#, signal
#import resource
# Alexander Fuchs
# update of run_test:
# - works also under windows
# - measures cpu time, not wall clock time, per test case
# - works also for smtlib directories
# BROKEN:
# doesn't work as intended, due to problems on windows.
# so time measurement is actually not as accurate as hoped for.
# documentation taken from run_tests.py:
# Run regression tests of a given level (default: 0, meaning
# minimum amount of tests). The higher the regression level, the more
# tests to run, and the harder they get.
# Each test may contain information about its regression level,
# expected outcome, expected runtime, whether it produces a proof,
# etc. in the format given below. This script will scan the first 100
# lines of each test and try to collect this information.
# If some info is not found, defaults are assumed. Default regression
# level is 0, expected runtime is unbounded, outcome is undefined
# (whatever it returns is OK), proof should be produced if outcome is
# Valid, and if it is produced, it'll be verified.
# Test info is given in the comments; here are examples
#
# %%% Regression level = 2
# %%% Result = Valid %% or Invalid, or Unknown
# %%% Runtime = 10 %% in seconds
# %%% Proof = yes %% or 'no', if it doesn't produce a proof
# %%% Language = presentation %% or 'internal'
# The number of '%' and following spaces can vary, case is not
# important. Any text after the value is ignored. Any comments that
# are not recognized are also ignored.
### Constants
# :TODO: @TOP@ with configure, WIN
# general setup
#TEST_PATH = "/home/alexander/d/CVC/REPOSITORY/cvc3_fix/cvc3/testcases"
#RUN_PATH = "/home/alexander/d/CVC/REPOSITORY/FRESH/cvc3/bin"
#os.environ["PATH"] = RUN_PATH + ":" + os.environ["PATH"]
# max. number of lines to read from the testcase file
# when looking for info comments
MAX_INFO_LINES = 100
# for printing, the width of the label column
WIDTH = 24
PRINT_SUMMARY = 1
# valid problem file extensions
FILE_EXTENSIONS = ["cvc", "cvc3", "svc", "smt", "lisp", "lsp"]
# command line options
OPT_VERBOSE = "v"
OPT_QUIET = "q"
OPT_LEVEL = "l"
OPT_TIMEOUT = "t"
OPT_MEMOUT = "m"
OPT_CHECK_TIMEOUT = "rt"
OPT_LANG = "lang"
OPT_VC = "vc"
# test case options
PRO_LEVEL = "l"
PRO_LANG = "lang"
PRO_TIMEOUT = "t"
PRO_RESULT = "result"
#alarm_pipe = None
#alarm_raised = False
#def alarmHandler(signum, frame):
# raise 'Timeout'
#alarm_pipe.close()
#alarm_raise = True
#signal.signal(signal.SIGALRM, alarmHandler)
# while 1:
# try:
# signal.alarm(5)
# t = sys.stdin.readline()
# signal.alarm(0)
# print t
# except 'Timeout':
# print "too slow"
### helper functions
#def find
def forall (pred, seq):
return reduce(lambda acc, x: acc and pred(x), seq, True)
def exists (pred, seq):
return reduce(lambda acc, x: acc or pred(x), seq, False)
def find (pred, seq):
for x in seq:
if pred(x):
return x
### time
def cut_time (time):
return math.floor ((time * 10) + 0.5) / 10
def get_children_time ():
#print(os.times())
(_, _, child_system_time, child_user_time, _) = os.times()
return child_system_time + child_user_time
def get_start_time ():
return get_children_time()
def get_used_time (start_time):
end_time = get_children_time()
return cut_time(end_time - start_time)
### configuration
# default values for options
optionsDefault = {
OPT_VERBOSE : True,
OPT_LEVEL : 0,
OPT_TIMEOUT : 0,
OPT_MEMOUT : 0,
OPT_CHECK_TIMEOUT : False,
OPT_LANG : "all",
OPT_VC : "cvc3",
}
# precedence order of options is:
# 1. defined as command line options
# 2. defined in problem file
# 3. defined as default in optionsDefault
# otherwise fail
class Config:
def __init__ (self, options, prover_options):
# configuration options for this script
self.options = options
# options to be passed to the prover
self.prover_options = prover_options
# option: name of option whose value is requested
# optionsProblem: options specified in the current problem
def get (self, option, optionsProblem = None):
if self.options.has_key(option):
return self.options[option]
elif optionsProblem != None and optionsProblem.has_key(option):
return optionsProblem[option]
elif optionsDefault.has_key(option):
return optionsDefault[option]
else:
raise ("unknown option: " + str(option))
def getProverOptions (self):
return self.prover_options
### evaluation of option settings per problem file
def match_line(key, line):
match = re.search("^(;|\s|%|#)*" + key + "\s*=\s*(?P<value>\S+)", line)
if match != None:
return match.group("value").lower()
# Read the first 'maxInfoLines' of the problem specification
# and fetch information from the comments
def get_problem_opt (name):
#print ("get_problem_opt" + " " + name);
options = {}
prover_options = []
try:
problem_file = open (name)
lines = 0
# readline will just return "\n" after EOF
while (lines < MAX_INFO_LINES):
lines += 1
line = problem_file.readline()
match = match_line("Regression level", line)
if match != None:
try:
options[PRO_LEVEL] = int(match)
except ValueError:
sys.stderr.write("Regression level requires an integer argument, got : " + match + " in " + name + "\n")
continue
match = match_line("Result", line)
if match != None:
if match in ["valid", "invalid", "satisfiable", "unsatisfiable", "unknown"]:
options[PRO_RESULT] = match
else:
sys.stderr.write("Result has invalid argument: " + match + " in " + name + "\n")
continue
match = re.search("^\s*:status\s*(?P<value>\S+)", line)
if match != None:
match = match.group("value").lower()
if match == "unsat":
options[PRO_RESULT] = "unsat"
#options[PRO_RESULT] = "unsatisfiable"
elif match == "sat":
options[PRO_RESULT] = "sat"
#options[PRO_RESULT] = "satisfiable"
elif match == "unknown":
options[PRO_RESULT] = "unknown"
else:
sys.stderr.write("status has invalid argument: " + match + " in " + name + "\n")
match = match_line("Runtime", line)
if match != None:
try:
options[PRO_TIMEOUT] = int(match)
except ValueError:
sys.stderr.write("Runtime requires an integer argument, got : " + match + " in " + name + "\n")
continue
match = match_line("Language", line)
if match != None:
options[PRO_LANG] = match
continue
match = match_line("Program Options", line)
if match != None:
prover_options = match.split()
continue
problem_file.close ()
except IOError, (error_nr, error_string):
print ("Couldn't open " + name + " : " + error_string)
return (options, prover_options)
# If regression level is not set, make it 3. So, if a lower level
# is requested, only explicitly marked tests will be run.
if not options.has_key(PRO_LEVEL):
options[PRO_LEVEL] = 3
# If the input language is not defined, guess it by extension
if not options.has_key(PRO_LANG):
ext = find(lambda x: name.endswith(x), FILE_EXTENSIONS)
if ext == None:
sys.stderr.write("Couldn't determine language of " + name + "\n")
elif ext == "cvc" or ext == "cvc3" or ext == "svc":
options[PRO_LANG] = "presentation"
elif ext == "smt":
options[PRO_LANG] = "smtlib"
elif ext == "lisp" or ext == "lsp":
options[PRO_LANG] = "lisp"
else:
sys.stderr.write("unexpected extension " + ext + " in " + name + "\n")
return (options, prover_options)
### command line parameters
optionsHelp = {
"-h" : "Print this help and exit",
"-" + OPT_VERBOSE : "Be verbose (default, opposite of -q)",
"-" + OPT_QUIET : "Quiet mode (opposite of -v)",
"-" + OPT_LEVEL + " n" : "Set regression level (default 0, the easiest level)",
"-" + OPT_TIMEOUT + " secs" : "Run each executable for at most 'secs' seconds [0 = no limit]",
"-" + OPT_MEMOUT + " MB" : "Abort if memory limit is exceeded [0 = no limit]",
"+" + OPT_CHECK_TIMEOUT: "Check that each test finishes within the specified runtime",
"-" + OPT_CHECK_TIMEOUT: "Do not check whether test finishes within the specified runtime (default)",
"-" + OPT_LANG + " name" : "Use the named input language only (default=all)",
"-" + OPT_VC + " prog" : "Use prog to run tests (default=cvc3)",
}
usageString = \
'''run_tests.py [ options ] [ test1 test2 ... ] [ -- [ command line options ] ]"
Run regression tests. Concrete test files or directories
with test files should be specified by name with a full path or
relative path to the current directory. If none specified, all
subdirectories are searched for test files. Subdirectories
are searched recursively, but symbolic links do directories are
not followed.
Default running mode is overriden by test specs;
test specs are overriden by command line options."'''
### command line parameter evaluation
# conversion of an argument from string to int
def to_int(option, value):
try:
return int(value)
except ValueError:
sys.stderr.write("Option " + option + " requires an integer argument, got : " + value + " \n")
sys.exit(1)
# increment the position in sys.argv
def next_argument(i, option):
i += 1
if i > sys.argv:
sys.stderr.write("Option " + option + " requires an argument\n")
sys.stderr.write("Run run_tests -h for help\n")
sys.exit(1)
return i
# evaluate sys.argv
def eval_arguments ():
# results of argument evaluation:
# options for this script
options = {}
# list of testcases to run
testcases = []
# prover options
prover_options = []
i = 1
while i < len(sys.argv):
# first we expect options for the script,
# then after "--" options for the prover.
if sys.argv[i] == "--":
i += 1
prover_options = sys.argv[i:]
break
elif sys.argv[i] == "-h":
print(usageString)
for (option, help_string) in optionsHelp.iteritems():
print(option.ljust(12) + help_string)
sys.exit()
elif sys.argv[i] == "+" + OPT_CHECK_TIMEOUT:
options[OPT_CHECK_TIMEOUT] = True
elif sys.argv[i] == "-" + OPT_CHECK_TIMEOUT:
options[OPT_CHECK_TIMEOUT] = False
elif sys.argv[i] == "-" + OPT_VERBOSE:
options[OPT_VERBOSE] = True
elif sys.argv[i] == "-" + OPT_QUIET:
options[OPT_VERBOSE] = False
elif sys.argv[i] == "-" + OPT_LANG:
i = next_argument(i, sys.argv[i])
options[OPT_LANG] = sys.argv[i]
elif sys.argv[i] == "-" + OPT_LEVEL:
i = next_argument(i, sys.argv[i])
options[OPT_LEVEL] = to_int(OPT_LEVEL, sys.argv[i])
elif sys.argv[i] == "-" + OPT_TIMEOUT:
i = next_argument(i, sys.argv[i])
options[OPT_TIMEOUT] = to_int(OPT_TIMEOUT, sys.argv[i])
elif sys.argv[i] == "-" + OPT_MEMOUT:
i = next_argument(i, sys.argv[i])
options[OPT_MEMOUT] = to_int(OPT_MEMOUT, sys.argv[i])
elif sys.argv[i] == "-" + OPT_VC:
i = next_argument(i, sys.argv[i])
options[OPT_VC] = sys.argv[i]
# This must be a testcase name
else:
testcases.append(sys.argv[i])
i = i + 1
return (options, testcases, prover_options)
### get test cases
# 'enumeration'
RES_TESTS, RES_TIME, RES_CORRECT, RES_PROBLEMATIC, RES_INCORRECT, \
RES_FAILED, RES_TIMEOUT, RES_MEMOUT, RES_ARITH, RES_TOO_LONG, RES_MUCH_TOO_LONG, RES_TOO_FAST, \
RES_MUCH_TOO_FAST, RES_LANG, RES_STRANGE = range(15)
def create_results ():
results = {}
results[RES_TESTS] = 0
results[RES_TIME] = 0
results[RES_CORRECT] = 0
results[RES_PROBLEMATIC] = 0
results[RES_INCORRECT] = []
results[RES_FAILED] = []
results[RES_TIMEOUT] = []
results[RES_MEMOUT] = []
results[RES_ARITH] = []
results[RES_TOO_LONG] = []
results[RES_MUCH_TOO_LONG] = []
results[RES_TOO_FAST] = []
results[RES_MUCH_TOO_FAST] = []
results[RES_LANG] = []
results[RES_STRANGE] = []
return results
### run tests
# is file name a test case name?
def is_test_case(config, problem_options):
# a test case
if problem_options.has_key(PRO_LANG):
# either all or this particular language must be ok
return config.get(OPT_LANG) == "all" or config.get(OPT_LANG) == problem_options[PRO_LANG]
# unknown file type
else:
return 0
def run_test (config, name, results, check_lang = False):
(problem_options, problem_prover_options) = get_problem_opt(name)
#
if is_test_case(config, problem_options):
# Check regression level
if problem_options[PRO_LEVEL] > config.get(OPT_LEVEL):
# Regression level of this test is too high; skip it
return
# Print the testcase name
print("=" * WIDTH)
print(name + " : ")
# Print some testcase specific info
print_test_info(config, problem_options, problem_prover_options)
# setup prover arguments
arguments = []
arguments.append(config.get(OPT_VC))
# we don't check for proofs anyway, so we disable them
arguments.append("-proofs")
# set language
if problem_options[PRO_LANG] != "representation":
arguments.append("-lang")
arguments.append(problem_options[PRO_LANG])
# add general prover options
for arg in config.getProverOptions():
arguments.append(arg)
# add problem specific prover options
for arg in problem_prover_options:
arguments.append(arg)
if config.get(OPT_TIMEOUT) > 0:
arguments.append("-timeout");
arguments.append(repr(config.get(OPT_TIMEOUT)))
arguments.append(name)
# redirect error to stdout
arguments.append(" 2>&1")
command = " ".join(arguments)
#print("***")
print("Running " + command)
print
#print("***");
#reader, writer = os.pipe()
#start_time = get_start_time()
start_time = time.time()
pipe = os.popen(command);
#global alarm_pipe
#global alarm_raised
#if config.get(OPT_TIMEOUT) > 0:
#alarm_pipe = pipe
#alarm_raised = False
#signal.alarm(config.get(OPT_TIMEOUT))
#pid = os.fork()
# if pid == 0:
# try:
# # set_resource_limits
# #if config.get(OPT_TIMEOUT) > 0:
# # resource.setrlimit (resource.RLIMIT_CPU, (config.get(OPT_TIMEOUT), config.get(OPT_TIMEOUT)))
# #if config.get(OPT_MEMOUT) > 0:
# # MEMORY_LIMIT = config.get(OPT_MEMOUT) * 1024 * 1024
# # resource.setrlimit (resource.RLIMIT_AS, (MEMORY_LIMIT, MEMORY_LIMIT))
# # forward output to parent process
# os.close(reader)
# os.dup2(writer, sys.stdout.fileno ())
# os.dup2(writer, sys.stderr.fileno ())
# # run prover
# os.execvp(arguments[0], arguments)
# except OSError, (error_nr, error_string):
# sys.stderr.write("Error in executing '" + command + "': " + error_string + "\n")
# sys.exit(error_nr)
# else:
#os.wait()
#os.close(writer)
results[RES_TESTS] += 1
# run prover
#os.execvp(config.get(OPT_VC), arguments)
#pipe = os.popen(command)
#pipe = os.fdopen(reader, 'r')
# check output
result = None
resultError = None
resultTimeout = False
resultMemout = False
resultArith = False
#:TODO: select on pipe with timeout
#try:
if True:
for line in pipe:
print line,
if line.startswith("*** Out of memory") \
or line.startswith("Out of memory") \
or line.startswith("terminate called after throwing an instance of 'std::bad_alloc'"):
resultMemout = True
# cvc timout: cygwin/.net
if line.startswith("Interrupted by signal 14 (self-timeout).") or line.startswith("self-timeout"):
resultTimeout = True
if line.count("arithmetic overflow"):
resultArith = True
# get only first result
if result == None:
chomped = line.rstrip().lower()
if chomped in ["valid.", "invalid.", "satisfiable.", "unsatisfiable.", "unknown.", "unsat.", "sat."]:
result = chomped[:-1]
elif chomped in ["unknown", "unsat", "sat"]:
result = chomped
#except 'Timeout':
# resultTimeout = True
# pipe.close()
# exit_val = -1
#signal.alarm(0)
#if alarm_raised:
# alarm_pipe = None
# alarm_raised = False
# resultTimeout = True
#else:
#if not resultTimeout:
exit_val = pipe.close()
#(_, exit_val) = os.wait ()
#used_time = get_used_time(start_time)
end_time = time.time()
used_time = cut_time(end_time - start_time)
# check run time
print("Program Runtime: " + str(used_time) + " sec"),
if result != None:
results[RES_TIME] += used_time
if config.get(OPT_CHECK_TIMEOUT) and problem_options.has_key(PRO_TIMEOUT):
expected_time = problem_options[PRO_TIMEOUT]
if used_time > expected_time:
if used_time > 10 * expected_time:
results[RES_MUCH_TOO_LONG].append(name)
print(" MUCH")
print(" LONGER than expected: " + str(expected_time) + " sec")
results[RES_TOO_LONG].append(name)
results[RES_PROBLEMATIC] += 1
elif ((problem_options[OPT_TIMEOUT] >= 4 and expected_time <= 4
and used_time < expected_time - 2)
or
(used_time > 15 and used_time <= (17 * expected_time) / 20)):
if used_time <= expected_time / 2:
results[RES_MUCH_TOO_FAST].append(name)
print(" MUCH")
print(" FASTER than expected: " + str(expected_time) + " sec")
results[RES_TOO_LONG].append(name)
results[RES_PROBLEMATIC] += 1
print
# check prover status
# resource out: memory
if resultMemout:
results[RES_MEMOUT].append(name)
resultError = RES_MEMOUT
print("*** Out of memory ")
# resource out: arithmetic precision
elif resultArith:
results[RES_ARITH].append(name)
resultError = RES_ARITH
print("*** arithmetic overflow ")
# resource out: time - at least on my linux version ... is this defined somewhere?
elif resultTimeout or (exit_val == 9 and config.get(OPT_TIMEOUT) > 0 and used_time >= config.get(OPT_TIMEOUT)):
results[RES_TIMEOUT].append(name)
resultError = RES_TIMEOUT
print("*** Timed out ")
elif exit_val != None:
if config.get(OPT_TIMEOUT) == 0 and config.get(OPT_MEMOUT) == 0:
results[RES_FAILED].append(name)
print("*** FAILED with exit code " + str(exit_val))
sys.stderr.write("Warning, unexpected termination with exit code " + str(exit_val) + "\n")
else:
results[RES_FAILED].append(name)
print("*** FAILED with exit code " + str(exit_val))
sys.stderr.write("Warning, unexpected termination with exit code " + str(exit_val) + "\n")
# check that computed result is the expected result
elif problem_options.has_key(PRO_RESULT):
if result == None:
results[RES_FAILED].append(name)
sys.stdout.write("FAILED (no result, expected " + problem_options[PRO_RESULT] + ")\n")
sys.stderr.write("FAILED (no result, expected " + problem_options[PRO_RESULT] + ")\n")
elif problem_options[PRO_RESULT] != result:
if result == "unknown":
results[RES_STRANGE].append(name)
results[RES_PROBLEMATIC] += 1
sys.stdout.write("Warning, expected " + problem_options[PRO_RESULT] + " but got unknown\n")
sys.stderr.write("Warning, expected " + problem_options[PRO_RESULT] + " but got unknown\n")
elif problem_options[PRO_RESULT] == "unknown":
results[RES_STRANGE].append(name)
results[RES_PROBLEMATIC] += 1
sys.stdout.write("Warning, expected unknown but got " + result + "\n")
sys.stderr.write("Warning, expected unknown but got " + result + "\n")
else:
results[RES_INCORRECT].append(name)
resultError = RES_INCORRECT
sys.stdout.write("FAILED (incorrect result, expected " + problem_options[PRO_RESULT] + " but got " + result + ")\n")
sys.stderr.write("FAILED (incorrect result, expected " + problem_options[PRO_RESULT] + " but got " + result + ")\n")
else:
results[RES_CORRECT] += 1
print("Result is correct")
# any result is fine, as we don't know the correct result
elif result != None:
results[RES_CORRECT] += 1
print("Result is correct")
# no result
else:
results[RES_STRANGE].append(name)
results[RES_PROBLEMATIC] += 1
print("No result")
if PRINT_SUMMARY:
short_name = os.path.basename(name)
if result != None:
printResult = result
elif resultError == RES_INCORRECT:
printResult = "unsound:"
elif resultError == RES_TIMEOUT:
printResult = "timeout"
elif resultError == RES_MEMOUT:
printResult = "memout"
elif resultError == RES_ARITH:
printResult = "arith_overflow"
else:
printResult = "???"
print("SUMMARY: " + name)
print((short_name + " ").ljust(40) + (printResult + " ").ljust(20) + str(used_time))
elif check_lang:
results[RES_LANG].append(name)
results[RES_PROBLEMATIC] += 1
else:
print("IGNORE " + name)
sys.stdout.flush()
# expects strings, potentially with number
def cmpStrNum(x, y):
if x == y:
return 0
# find first different character
xLength = len(x)
yLength = len(y)
index = 0
while (index < xLength and index < yLength):
if x[index] == y[index]:
index += 1
elif (not x[index].isdigit()) or (not y[index].isdigit()):
return cmp(x[index], y[index])
# compare as numbers
else:
# find start of number
start = index
while start >= 0 and x[start].isdigit():
start -= 1
start += 1
xEnd = index
while xEnd < xLength and x[xEnd].isdigit():
xEnd += 1
yEnd = index
while yEnd < yLength and y[yEnd].isdigit():
yEnd += 1
xNum = int(x[start:xEnd])
yNum = int(y[start:yEnd])
return cmp(xNum, yNum)
# one string is prefix of the other
if index >= xLength:
return -1
if index >= yLength:
return 1
else:
raise ("cmpStrNum: " + x + " " + y)
# find all test cases in directory
def find_test_case((config, results), dir_name, files_in_dir):
files_in_dir.sort(cmpStrNum)
for file_name in files_in_dir:
file_path = os.path.join (dir_name, file_name)
if os.path.isfile (file_path) and exists(lambda x: file_name.endswith(x), FILE_EXTENSIONS):
run_test(config, file_path, results)
def run_tests(config, test_cases):
#test_cases.sort()
results = create_results()
for test_case in test_cases:
# if a file, try it
if os.path.isfile(test_case):
run_test(config, test_case, results, True)
# else traverse subdirectories
elif os.path.isdir(test_case):
os.path.walk(test_case, find_test_case, (config, results))
else:
sys.stderr.write("*** WARNING: cannot find testcase "
+ test_case + " : no such file or directory\n")
return results
### printing
def print_setup(config):
if config.get(OPT_VERBOSE):
print("*" * WIDTH)
# get prover to use (and remove eol)
prover = os.popen("which " + config.get(OPT_VC)).readline()[:-1]
print("Prover: ".ljust(WIDTH) + prover)
print("Regression level: ".ljust(WIDTH) + repr(config.get(OPT_LEVEL)))
print("Language: ".ljust(WIDTH) + config.get(OPT_LANG))
if(config.get(OPT_TIMEOUT) > 0):
print("Time limit per test: ".ljust(WIDTH) + str(config.get(OPT_TIMEOUT)) + " sec")
if(config.get(OPT_MEMOUT) > 0):
print("Memory limit per test: ".ljust(WIDTH) + str(config.get(OPT_MEMOUT)) + " MB")
#print("PATH = ", $ENV{'PATH'})
print("*" * WIDTH)
def print_test_info (config, problem_options, problem_prover_options):
if config.get(OPT_VERBOSE):
print("Language: " + problem_options[PRO_LANG])
if config.get(OPT_CHECK_TIMEOUT) and problem_options.has_key(PRO_TIMEOUT):
print("Expected runtime: " + problem_options[PRO_TIMEOUT] + " sec")
if problem_options.has_key(PRO_RESULT):
print("Expected result: " + problem_options[PRO_RESULT])
print("Program options: " + " ".join(config.getProverOptions() + problem_prover_options))
def print_end (config, results):
print
print("Statistics:")
print("Total tests run: " + repr(results[RES_TESTS]));
print("Total running time: " + repr(results[RES_TIME]) + " sec")
if config.get(OPT_VERBOSE) and results[RES_TESTS] > 0:
print
print("Detailed Statistics:")
print("Correct results: ".ljust(WIDTH) + repr(results[RES_CORRECT]));
print("Incorrect: ".ljust(WIDTH) + repr(len(results[RES_INCORRECT])));
print("Problematic cases: ".ljust(WIDTH) + repr(results[RES_PROBLEMATIC]))
print("Timed out: ".ljust(WIDTH) + repr(len(results[RES_TIMEOUT])));
print("Out of memory: ".ljust(WIDTH) + repr(len(results[RES_MEMOUT])));
print("Arithmetic overflow: ".ljust(WIDTH) + repr(len(results[RES_ARITH])));
print("Failed: ".ljust(WIDTH) + repr(len(results[RES_FAILED])));
test_cases = results[RES_FAILED]
if len(test_cases) > 0:
print("Failed tests: " + repr(len(test_cases)))
for test_case in test_cases:
print(" " + test_case)
test_cases = results[RES_INCORRECT]
if len(test_cases) > 0:
print("Tests with wrong results [" + repr(len(test_cases)) + "]:")
for test_case in test_cases:
print(" " + test_case)
test_cases = results[RES_STRANGE]
if len(test_cases) > 0:
print("Strange results [" + repr(len(test_cases)) + "]:")
for test_case in test_cases:
print(" " + test_case)
test_cases = results[RES_TIMEOUT]
if len(test_cases) > 0:
print("Tests timed out [" + repr(len(test_cases)) + "]:")
for test_case in test_cases:
print(" " + test_case)
test_cases = results[RES_MEMOUT]
if len(test_cases) > 0:
print("Tests out of memory [" + repr(len(test_cases)) + "]:")
for test_case in test_cases:
print(" " + test_case)
test_cases = results[RES_ARITH]
if len(test_cases) > 0:
print("Arithmetic overflow [" + repr(len(test_cases)) + "]:")
for test_case in test_cases:
print(" " + test_case)
test_cases = results[RES_TOO_FAST]
if len(test_cases) > 0:
print("Tests running faster than expected [" + repr(len(test_cases)) + "]:")
for test_case in test_cases:
print(" " + test_case)
test_cases = results[RES_MUCH_TOO_FAST]
if len(test_cases) > 0:
print("...including tests running at least twice as fast as expected [" + repr(len(test_cases)) + "]:")
for test_case in test_cases:
print(" " + test_case)
test_cases = results[RES_TOO_LONG]
if len(test_cases) > 0:
print("Tests running longer [" + repr(len(test_cases)) + "]:")
for test_case in test_cases:
print(" " + test_case)
test_cases = results[RES_MUCH_TOO_LONG]
if len(test_cases) > 0:
print("...including tests running WAT too long [" + repr(len(test_cases)) + "]:")
for test_case in test_cases:
print(" " + test_case)
test_cases = results[RES_LANG]
if len(test_cases) > 0:
print("Tests with wrong input language [" + repr(len(test_cases)) + "]:")
for test_case in test_cases:
print(" " + test_case)
### main
def main ():
# evaluate command line
(options, test_cases, prover_options) = eval_arguments()
config = Config(options, prover_options)
# run the prover on all test cases
print_setup(config)
results = run_tests(config, test_cases)
print_end(config, results)
main ()
| apache-2.0 | 8,003,328,896,713,322,000 | 34.011236 | 136 | 0.554108 | false |
gioman/QGIS | python/plugins/processing/gui/RectangleMapTool.py | 2 | 3957 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RectangleMapTool.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import pyqtSignal
from qgis.PyQt.QtGui import QColor
from qgis.core import QgsPoint, QgsRectangle, QgsWkbTypes
from qgis.gui import QgsMapTool, QgsMapToolEmitPoint, QgsRubberBand
class RectangleMapTool(QgsMapToolEmitPoint):
rectangleCreated = pyqtSignal()
deactivated = pyqtSignal()
def __init__(self, canvas):
self.canvas = canvas
QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = QgsRubberBand(self.canvas, QgsWkbTypes.PolygonGeometry)
self.rubberBand.setColor(QColor(255, 0, 0, 100))
self.rubberBand.setWidth(2)
self.reset()
def reset(self):
self.startPoint = self.endPoint = None
self.isEmittingPoint = False
self.rubberBand.reset(QgsWkbTypes.PolygonGeometry)
def canvasPressEvent(self, e):
self.startPoint = self.toMapCoordinates(e.pos())
self.endPoint = self.startPoint
self.isEmittingPoint = True
self.showRect(self.startPoint, self.endPoint)
def canvasReleaseEvent(self, e):
self.isEmittingPoint = False
if self.rectangle() is not None:
self.rectangleCreated.emit()
def canvasMoveEvent(self, e):
if not self.isEmittingPoint:
return
self.endPoint = self.toMapCoordinates(e.pos())
self.showRect(self.startPoint, self.endPoint)
def showRect(self, startPoint, endPoint):
self.rubberBand.reset(QgsWkbTypes.PolygonGeometry)
if startPoint.x() == endPoint.x() or startPoint.y() == endPoint.y():
return
point1 = QgsPoint(startPoint.x(), startPoint.y())
point2 = QgsPoint(startPoint.x(), endPoint.y())
point3 = QgsPoint(endPoint.x(), endPoint.y())
point4 = QgsPoint(endPoint.x(), startPoint.y())
self.rubberBand.addPoint(point1, False)
self.rubberBand.addPoint(point2, False)
self.rubberBand.addPoint(point3, False)
# True to update canvas
self.rubberBand.addPoint(point4, True)
self.rubberBand.show()
def rectangle(self):
if self.startPoint is None or self.endPoint is None:
return None
elif self.startPoint.x() == self.endPoint.x() or \
self.startPoint.y() == self.endPoint.y():
return None
return QgsRectangle(self.startPoint, self.endPoint)
def setRectangle(self, rect):
if rect == self.rectangle():
return False
if rect is None:
self.reset()
else:
self.startPoint = QgsPoint(rect.xMaximum(), rect.yMaximum())
self.endPoint = QgsPoint(rect.xMinimum(), rect.yMinimum())
self.showRect(self.startPoint, self.endPoint)
return True
def deactivate(self):
QgsMapTool.deactivate(self)
self.deactivated.emit()
| gpl-2.0 | -8,270,287,371,011,833,000 | 34.017699 | 81 | 0.566844 | false |
baibaichen/eagle | eagle-external/hadoop_jmx_collector/lib/kafka-python/test/fixtures.py | 21 | 8637 | import logging
import os
import os.path
import shutil
import subprocess
import tempfile
from six.moves import urllib
import uuid
from six.moves.urllib.parse import urlparse # pylint: disable-msg=E0611
from test.service import ExternalService, SpawnedService
from test.testutil import get_open_port
class Fixture(object):
kafka_version = os.environ.get('KAFKA_VERSION', '0.8.0')
scala_version = os.environ.get("SCALA_VERSION", '2.8.0')
project_root = os.environ.get('PROJECT_ROOT', os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
kafka_root = os.environ.get("KAFKA_ROOT", os.path.join(project_root, 'servers', kafka_version, "kafka-bin"))
ivy_root = os.environ.get('IVY_ROOT', os.path.expanduser("~/.ivy2/cache"))
@classmethod
def download_official_distribution(cls,
kafka_version=None,
scala_version=None,
output_dir=None):
if not kafka_version:
kafka_version = cls.kafka_version
if not scala_version:
scala_version = cls.scala_version
if not output_dir:
output_dir = os.path.join(cls.project_root, 'servers', 'dist')
distfile = 'kafka_%s-%s' % (scala_version, kafka_version,)
url_base = 'https://archive.apache.org/dist/kafka/%s/' % (kafka_version,)
output_file = os.path.join(output_dir, distfile + '.tgz')
if os.path.isfile(output_file):
logging.info("Found file already on disk: %s", output_file)
return output_file
# New tarballs are .tgz, older ones are sometimes .tar.gz
try:
url = url_base + distfile + '.tgz'
logging.info("Attempting to download %s", url)
response = urllib.request.urlopen(url)
except urllib.error.HTTPError:
logging.exception("HTTP Error")
url = url_base + distfile + '.tar.gz'
logging.info("Attempting to download %s", url)
response = urllib.request.urlopen(url)
logging.info("Saving distribution file to %s", output_file)
with open(output_file, 'w') as output_file_fd:
output_file_fd.write(response.read())
return output_file
@classmethod
def test_resource(cls, filename):
return os.path.join(cls.project_root, "servers", cls.kafka_version, "resources", filename)
@classmethod
def kafka_run_class_args(cls, *args):
result = [os.path.join(cls.kafka_root, 'bin', 'kafka-run-class.sh')]
result.extend(args)
return result
@classmethod
def kafka_run_class_env(cls):
env = os.environ.copy()
env['KAFKA_LOG4J_OPTS'] = "-Dlog4j.configuration=file:%s" % cls.test_resource("log4j.properties")
return env
@classmethod
def render_template(cls, source_file, target_file, binding):
with open(source_file, "r") as handle:
template = handle.read()
with open(target_file, "w") as handle:
handle.write(template.format(**binding))
class ZookeeperFixture(Fixture):
@classmethod
def instance(cls):
if "ZOOKEEPER_URI" in os.environ:
parse = urlparse(os.environ["ZOOKEEPER_URI"])
(host, port) = (parse.hostname, parse.port)
fixture = ExternalService(host, port)
else:
(host, port) = ("127.0.0.1", get_open_port())
fixture = cls(host, port)
fixture.open()
return fixture
def __init__(self, host, port):
self.host = host
self.port = port
self.tmp_dir = None
self.child = None
def out(self, message):
logging.info("*** Zookeeper [%s:%d]: %s", self.host, self.port, message)
def open(self):
self.tmp_dir = tempfile.mkdtemp()
self.out("Running local instance...")
logging.info(" host = %s", self.host)
logging.info(" port = %s", self.port)
logging.info(" tmp_dir = %s", self.tmp_dir)
# Generate configs
template = self.test_resource("zookeeper.properties")
properties = os.path.join(self.tmp_dir, "zookeeper.properties")
self.render_template(template, properties, vars(self))
# Configure Zookeeper child process
args = self.kafka_run_class_args("org.apache.zookeeper.server.quorum.QuorumPeerMain", properties)
env = self.kafka_run_class_env()
self.child = SpawnedService(args, env)
# Party!
self.out("Starting...")
self.child.start()
self.child.wait_for(r"binding to port")
self.out("Done!")
def close(self):
self.out("Stopping...")
self.child.stop()
self.child = None
self.out("Done!")
shutil.rmtree(self.tmp_dir)
class KafkaFixture(Fixture):
@classmethod
def instance(cls, broker_id, zk_host, zk_port, zk_chroot=None, replicas=1, partitions=2):
if zk_chroot is None:
zk_chroot = "kafka-python_" + str(uuid.uuid4()).replace("-", "_")
if "KAFKA_URI" in os.environ:
parse = urlparse(os.environ["KAFKA_URI"])
(host, port) = (parse.hostname, parse.port)
fixture = ExternalService(host, port)
else:
(host, port) = ("127.0.0.1", get_open_port())
fixture = KafkaFixture(host, port, broker_id, zk_host, zk_port, zk_chroot, replicas, partitions)
fixture.open()
return fixture
def __init__(self, host, port, broker_id, zk_host, zk_port, zk_chroot, replicas=1, partitions=2):
self.host = host
self.port = port
self.broker_id = broker_id
self.zk_host = zk_host
self.zk_port = zk_port
self.zk_chroot = zk_chroot
self.replicas = replicas
self.partitions = partitions
self.tmp_dir = None
self.child = None
self.running = False
def out(self, message):
logging.info("*** Kafka [%s:%d]: %s", self.host, self.port, message)
def open(self):
if self.running:
self.out("Instance already running")
return
self.tmp_dir = tempfile.mkdtemp()
self.out("Running local instance...")
logging.info(" host = %s", self.host)
logging.info(" port = %s", self.port)
logging.info(" broker_id = %s", self.broker_id)
logging.info(" zk_host = %s", self.zk_host)
logging.info(" zk_port = %s", self.zk_port)
logging.info(" zk_chroot = %s", self.zk_chroot)
logging.info(" replicas = %s", self.replicas)
logging.info(" partitions = %s", self.partitions)
logging.info(" tmp_dir = %s", self.tmp_dir)
# Create directories
os.mkdir(os.path.join(self.tmp_dir, "logs"))
os.mkdir(os.path.join(self.tmp_dir, "data"))
# Generate configs
template = self.test_resource("kafka.properties")
properties = os.path.join(self.tmp_dir, "kafka.properties")
self.render_template(template, properties, vars(self))
# Configure Kafka child process
args = self.kafka_run_class_args("kafka.Kafka", properties)
env = self.kafka_run_class_env()
self.child = SpawnedService(args, env)
# Party!
self.out("Creating Zookeeper chroot node...")
args = self.kafka_run_class_args("org.apache.zookeeper.ZooKeeperMain",
"-server", "%s:%d" % (self.zk_host, self.zk_port),
"create",
"/%s" % self.zk_chroot,
"kafka-python")
env = self.kafka_run_class_env()
proc = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc.wait() != 0:
self.out("Failed to create Zookeeper chroot node")
self.out(proc.stdout.read())
self.out(proc.stderr.read())
raise RuntimeError("Failed to create Zookeeper chroot node")
self.out("Done!")
self.out("Starting...")
self.child.start()
self.child.wait_for(r"\[Kafka Server %d\], Started" % self.broker_id)
self.out("Done!")
self.running = True
def close(self):
if not self.running:
self.out("Instance already stopped")
return
self.out("Stopping...")
self.child.stop()
self.child = None
self.out("Done!")
shutil.rmtree(self.tmp_dir)
self.running = False
| apache-2.0 | -2,447,545,740,143,481,000 | 35.597458 | 113 | 0.575431 | false |
hang-qi/models | neural_gpu/data_utils.py | 17 | 16158 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural GPU -- data generation and batching utilities."""
import math
import os
import random
import sys
import time
import numpy as np
import tensorflow as tf
import program_utils
FLAGS = tf.app.flags.FLAGS
bins = [2 + bin_idx_i for bin_idx_i in xrange(256)]
all_tasks = ["sort", "kvsort", "id", "rev", "rev2", "incr", "add", "left",
"right", "left-shift", "right-shift", "bmul", "mul", "dup",
"badd", "qadd", "search", "progeval", "progsynth"]
log_filename = ""
vocab, rev_vocab = None, None
def pad(l):
for b in bins:
if b >= l: return b
return bins[-1]
def bin_for(l):
for i, b in enumerate(bins):
if b >= l: return i
return len(bins) - 1
train_set = {}
test_set = {}
for some_task in all_tasks:
train_set[some_task] = []
test_set[some_task] = []
for all_max_len in xrange(10000):
train_set[some_task].append([])
test_set[some_task].append([])
def read_tmp_file(name):
"""Read from a file with the given name in our log directory or above."""
dirname = os.path.dirname(log_filename)
fname = os.path.join(dirname, name + ".txt")
if not tf.gfile.Exists(fname):
print_out("== not found file: " + fname)
fname = os.path.join(dirname, "../" + name + ".txt")
if not tf.gfile.Exists(fname):
print_out("== not found file: " + fname)
fname = os.path.join(dirname, "../../" + name + ".txt")
if not tf.gfile.Exists(fname):
print_out("== not found file: " + fname)
return None
print_out("== found file: " + fname)
res = []
with tf.gfile.GFile(fname, mode="r") as f:
for line in f:
res.append(line.strip())
return res
def write_tmp_file(name, lines):
dirname = os.path.dirname(log_filename)
fname = os.path.join(dirname, name + ".txt")
with tf.gfile.GFile(fname, mode="w") as f:
for line in lines:
f.write(line + "\n")
def add(n1, n2, base=10):
"""Add two numbers represented as lower-endian digit lists."""
k = max(len(n1), len(n2)) + 1
d1 = n1 + [0 for _ in xrange(k - len(n1))]
d2 = n2 + [0 for _ in xrange(k - len(n2))]
res = []
carry = 0
for i in xrange(k):
if d1[i] + d2[i] + carry < base:
res.append(d1[i] + d2[i] + carry)
carry = 0
else:
res.append(d1[i] + d2[i] + carry - base)
carry = 1
while res and res[-1] == 0:
res = res[:-1]
if res: return res
return [0]
def init_data(task, length, nbr_cases, nclass):
"""Data initialization."""
def rand_pair(l, task):
"""Random data pair for a task. Total length should be <= l."""
k = (l-1)/2
base = 10
if task[0] == "b": base = 2
if task[0] == "q": base = 4
d1 = [np.random.randint(base) for _ in xrange(k)]
d2 = [np.random.randint(base) for _ in xrange(k)]
if task in ["add", "badd", "qadd"]:
res = add(d1, d2, base)
elif task in ["mul", "bmul"]:
d1n = sum([d * (base ** i) for i, d in enumerate(d1)])
d2n = sum([d * (base ** i) for i, d in enumerate(d2)])
if task == "bmul":
res = [int(x) for x in list(reversed(str(bin(d1n * d2n))))[:-2]]
else:
res = [int(x) for x in list(reversed(str(d1n * d2n)))]
else:
sys.exit()
sep = [12]
if task in ["add", "badd", "qadd"]: sep = [11]
inp = [d + 1 for d in d1] + sep + [d + 1 for d in d2]
return inp, [r + 1 for r in res]
def rand_dup_pair(l):
"""Random data pair for duplication task. Total length should be <= l."""
k = l/2
x = [np.random.randint(nclass - 1) + 1 for _ in xrange(k)]
inp = x + [0 for _ in xrange(l - k)]
res = x + x + [0 for _ in xrange(l - 2*k)]
return inp, res
def rand_rev2_pair(l):
"""Random data pair for reverse2 task. Total length should be <= l."""
inp = [(np.random.randint(nclass - 1) + 1,
np.random.randint(nclass - 1) + 1) for _ in xrange(l/2)]
res = [i for i in reversed(inp)]
return [x for p in inp for x in p], [x for p in res for x in p]
def rand_search_pair(l):
"""Random data pair for search task. Total length should be <= l."""
inp = [(np.random.randint(nclass - 1) + 1,
np.random.randint(nclass - 1) + 1) for _ in xrange(l-1/2)]
q = np.random.randint(nclass - 1) + 1
res = 0
for (k, v) in reversed(inp):
if k == q:
res = v
return [x for p in inp for x in p] + [q], [res]
def rand_kvsort_pair(l):
"""Random data pair for key-value sort. Total length should be <= l."""
keys = [(np.random.randint(nclass - 1) + 1, i) for i in xrange(l/2)]
vals = [np.random.randint(nclass - 1) + 1 for _ in xrange(l/2)]
kv = [(k, vals[i]) for (k, i) in keys]
sorted_kv = [(k, vals[i]) for (k, i) in sorted(keys)]
return [x for p in kv for x in p], [x for p in sorted_kv for x in p]
def prog_io_pair(prog, max_len, counter=0):
try:
ilen = np.random.randint(max_len - 3) + 1
bound = max(15 - (counter / 20), 1)
inp = [random.choice(range(-bound, bound)) for _ in range(ilen)]
inp_toks = [program_utils.prog_rev_vocab[t]
for t in program_utils.tokenize(str(inp)) if t != ","]
out = program_utils.evaluate(prog, {"a": inp})
out_toks = [program_utils.prog_rev_vocab[t]
for t in program_utils.tokenize(str(out)) if t != ","]
if counter > 400:
out_toks = []
if (out_toks and out_toks[0] == program_utils.prog_rev_vocab["["] and
len(out_toks) != len([o for o in out if o == ","]) + 3):
raise ValueError("generated list with too long ints")
if (out_toks and out_toks[0] != program_utils.prog_rev_vocab["["] and
len(out_toks) > 1):
raise ValueError("generated one int but tokenized it to many")
if len(out_toks) > max_len:
raise ValueError("output too long")
return (inp_toks, out_toks)
except ValueError:
return prog_io_pair(prog, max_len, counter+1)
def spec(inp):
"""Return the target given the input for some tasks."""
if task == "sort":
return sorted(inp)
elif task == "id":
return inp
elif task == "rev":
return [i for i in reversed(inp)]
elif task == "incr":
carry = 1
res = []
for i in xrange(len(inp)):
if inp[i] + carry < nclass:
res.append(inp[i] + carry)
carry = 0
else:
res.append(1)
carry = 1
return res
elif task == "left":
return [inp[0]]
elif task == "right":
return [inp[-1]]
elif task == "left-shift":
return [inp[l-1] for l in xrange(len(inp))]
elif task == "right-shift":
return [inp[l+1] for l in xrange(len(inp))]
else:
print_out("Unknown spec for task " + str(task))
sys.exit()
l = length
cur_time = time.time()
total_time = 0.0
is_prog = task in ["progeval", "progsynth"]
if is_prog:
inputs_per_prog = 5
program_utils.make_vocab()
progs = read_tmp_file("programs_len%d" % (l / 10))
if not progs:
progs = program_utils.gen(l / 10, 1.2 * nbr_cases / inputs_per_prog)
write_tmp_file("programs_len%d" % (l / 10), progs)
prog_ios = read_tmp_file("programs_len%d_io" % (l / 10))
nbr_cases = min(nbr_cases, len(progs) * inputs_per_prog) / 1.2
if not prog_ios:
# Generate program io data.
prog_ios = []
for pidx, prog in enumerate(progs):
if pidx % 500 == 0:
print_out("== generating io pairs for program %d" % pidx)
if pidx * inputs_per_prog > nbr_cases * 1.2:
break
ptoks = [program_utils.prog_rev_vocab[t]
for t in program_utils.tokenize(prog)]
ptoks.append(program_utils.prog_rev_vocab["_EOS"])
plen = len(ptoks)
for _ in xrange(inputs_per_prog):
if task == "progeval":
inp, out = prog_io_pair(prog, plen)
prog_ios.append(str(inp) + "\t" + str(out) + "\t" + prog)
elif task == "progsynth":
plen = max(len(ptoks), 8)
for _ in xrange(3):
inp, out = prog_io_pair(prog, plen / 2)
prog_ios.append(str(inp) + "\t" + str(out) + "\t" + prog)
write_tmp_file("programs_len%d_io" % (l / 10), prog_ios)
prog_ios_dict = {}
for s in prog_ios:
i, o, p = s.split("\t")
i_clean = "".join([c for c in i if c.isdigit() or c == " "])
o_clean = "".join([c for c in o if c.isdigit() or c == " "])
inp = [int(x) for x in i_clean.split()]
out = [int(x) for x in o_clean.split()]
if inp and out:
if p in prog_ios_dict:
prog_ios_dict[p].append([inp, out])
else:
prog_ios_dict[p] = [[inp, out]]
# Use prog_ios_dict to create data.
progs = []
for prog in prog_ios_dict:
if len([c for c in prog if c == ";"]) <= (l / 10):
progs.append(prog)
nbr_cases = min(nbr_cases, len(progs) * inputs_per_prog) / 1.2
print_out("== %d training cases on %d progs" % (nbr_cases, len(progs)))
for pidx, prog in enumerate(progs):
if pidx * inputs_per_prog > nbr_cases * 1.2:
break
ptoks = [program_utils.prog_rev_vocab[t]
for t in program_utils.tokenize(prog)]
ptoks.append(program_utils.prog_rev_vocab["_EOS"])
plen = len(ptoks)
dset = train_set if pidx < nbr_cases / inputs_per_prog else test_set
for _ in xrange(inputs_per_prog):
if task == "progeval":
inp, out = prog_ios_dict[prog].pop()
dset[task][bin_for(plen)].append([[ptoks, inp, [], []], [out]])
elif task == "progsynth":
plen, ilist = max(len(ptoks), 8), [[]]
for _ in xrange(3):
inp, out = prog_ios_dict[prog].pop()
ilist.append(inp + out)
dset[task][bin_for(plen)].append([ilist, [ptoks]])
for case in xrange(0 if is_prog else nbr_cases):
total_time += time.time() - cur_time
cur_time = time.time()
if l > 10000 and case % 100 == 1:
print_out(" avg gen time %.4f s" % (total_time / float(case)))
if task in ["add", "badd", "qadd", "bmul", "mul"]:
i, t = rand_pair(l, task)
train_set[task][bin_for(len(i))].append([[[], i, [], []], [t]])
i, t = rand_pair(l, task)
test_set[task][bin_for(len(i))].append([[[], i, [], []], [t]])
elif task == "dup":
i, t = rand_dup_pair(l)
train_set[task][bin_for(len(i))].append([[i], [t]])
i, t = rand_dup_pair(l)
test_set[task][bin_for(len(i))].append([[i], [t]])
elif task == "rev2":
i, t = rand_rev2_pair(l)
train_set[task][bin_for(len(i))].append([[i], [t]])
i, t = rand_rev2_pair(l)
test_set[task][bin_for(len(i))].append([[i], [t]])
elif task == "search":
i, t = rand_search_pair(l)
train_set[task][bin_for(len(i))].append([[i], [t]])
i, t = rand_search_pair(l)
test_set[task][bin_for(len(i))].append([[i], [t]])
elif task == "kvsort":
i, t = rand_kvsort_pair(l)
train_set[task][bin_for(len(i))].append([[i], [t]])
i, t = rand_kvsort_pair(l)
test_set[task][bin_for(len(i))].append([[i], [t]])
elif task not in ["progeval", "progsynth"]:
inp = [np.random.randint(nclass - 1) + 1 for i in xrange(l)]
target = spec(inp)
train_set[task][bin_for(l)].append([[inp], [target]])
inp = [np.random.randint(nclass - 1) + 1 for i in xrange(l)]
target = spec(inp)
test_set[task][bin_for(l)].append([[inp], [target]])
def to_symbol(i):
"""Covert ids to text."""
if i == 0: return ""
if i == 11: return "+"
if i == 12: return "*"
return str(i-1)
def to_id(s):
"""Covert text to ids."""
if s == "+": return 11
if s == "*": return 12
return int(s) + 1
def get_batch(bin_id, batch_size, data_set, height, offset=None, preset=None):
"""Get a batch of data, training or testing."""
inputs, targets = [], []
pad_length = bins[bin_id]
for b in xrange(batch_size):
if preset is None:
elem = random.choice(data_set[bin_id])
if offset is not None and offset + b < len(data_set[bin_id]):
elem = data_set[bin_id][offset + b]
else:
elem = preset
inpt, targett, inpl, targetl = elem[0], elem[1], [], []
for inp in inpt:
inpl.append(inp + [0 for _ in xrange(pad_length - len(inp))])
if len(inpl) == 1:
for _ in xrange(height - 1):
inpl.append([0 for _ in xrange(pad_length)])
for target in targett:
targetl.append(target + [0 for _ in xrange(pad_length - len(target))])
inputs.append(inpl)
targets.append(targetl)
res_input = np.array(inputs, dtype=np.int32)
res_target = np.array(targets, dtype=np.int32)
assert list(res_input.shape) == [batch_size, height, pad_length]
assert list(res_target.shape) == [batch_size, 1, pad_length]
return res_input, res_target
def print_out(s, newline=True):
"""Print a message out and log it to file."""
if log_filename:
try:
with tf.gfile.GFile(log_filename, mode="a") as f:
f.write(s + ("\n" if newline else ""))
# pylint: disable=bare-except
except:
sys.stderr.write("Error appending to %s\n" % log_filename)
sys.stdout.write(s + ("\n" if newline else ""))
sys.stdout.flush()
def decode(output):
return [np.argmax(o, axis=1) for o in output]
def accuracy(inpt_t, output, target_t, batch_size, nprint,
beam_out=None, beam_scores=None):
"""Calculate output accuracy given target."""
assert nprint < batch_size + 1
inpt = []
for h in xrange(inpt_t.shape[1]):
inpt.extend([inpt_t[:, h, l] for l in xrange(inpt_t.shape[2])])
target = [target_t[:, 0, l] for l in xrange(target_t.shape[2])]
def tok(i):
if rev_vocab and i < len(rev_vocab):
return rev_vocab[i]
return str(i - 1)
def task_print(inp, output, target):
stop_bound = 0
print_len = 0
while print_len < len(target) and target[print_len] > stop_bound:
print_len += 1
print_out(" i: " + " ".join([tok(i) for i in inp if i > 0]))
print_out(" o: " +
" ".join([tok(output[l]) for l in xrange(print_len)]))
print_out(" t: " +
" ".join([tok(target[l]) for l in xrange(print_len)]))
decoded_target = target
decoded_output = decode(output)
# Use beam output if given and score is high enough.
if beam_out is not None:
for b in xrange(batch_size):
if beam_scores[b] >= 10.0:
for l in xrange(min(len(decoded_output), beam_out.shape[2])):
decoded_output[l][b] = int(beam_out[b, 0, l])
total = 0
errors = 0
seq = [0 for b in xrange(batch_size)]
for l in xrange(len(decoded_output)):
for b in xrange(batch_size):
if decoded_target[l][b] > 0:
total += 1
if decoded_output[l][b] != decoded_target[l][b]:
seq[b] = 1
errors += 1
e = 0 # Previous error index
for _ in xrange(min(nprint, sum(seq))):
while seq[e] == 0:
e += 1
task_print([inpt[l][e] for l in xrange(len(inpt))],
[decoded_output[l][e] for l in xrange(len(decoded_target))],
[decoded_target[l][e] for l in xrange(len(decoded_target))])
e += 1
for b in xrange(nprint - errors):
task_print([inpt[l][b] for l in xrange(len(inpt))],
[decoded_output[l][b] for l in xrange(len(decoded_target))],
[decoded_target[l][b] for l in xrange(len(decoded_target))])
return errors, total, sum(seq)
def safe_exp(x):
perp = 10000
x = float(x)
if x < 100: perp = math.exp(x)
if perp > 10000: return 10000
return perp
| apache-2.0 | 6,475,924,398,456,513,000 | 34.356674 | 80 | 0.563622 | false |
rosswhitfield/mantid | scripts/Muon/GUI/Common/difference_table_widget/difference_widget_presenter.py | 3 | 1902 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from Muon.GUI.Common.difference_table_widget.difference_table_widget_presenter import DifferenceTablePresenter
from Muon.GUI.Common.difference_table_widget.difference_table_widget_view import DifferenceTableView
from Muon.GUI.Common.difference_table_widget.difference_widget_view import DifferenceView
diff_columns = ['diff_name', 'to_analyse', 'group_1', 'group_2']
class DifferencePresenter(object):
def __init__(self, model):
self.model = model
self.group_view = DifferenceTableView()
self.group_widget = DifferenceTablePresenter(self.group_view, self.model, "group")
self.pair_view = DifferenceTableView()
self.pair_widget = DifferenceTablePresenter(self.pair_view, self.model, "pair")
self.view = DifferenceView(self.pair_view, self.group_view)
def update_view_from_model(self):
self.group_widget.update_view_from_model()
self.pair_widget.update_view_from_model()
def disable_editing(self):
self.group_widget.disable_editing()
self.pair_widget.disable_editing()
def enable_editing(self):
self.group_widget.enable_editing()
self.pair_widget.enable_editing()
def add_subscribers(self, observer_list):
for observer in observer_list:
self.group_widget.selected_diff_changed_notifier.add_subscriber(observer)
self.pair_widget.selected_diff_changed_notifier.add_subscriber(observer)
def on_data_changed(self, observer):
self.group_widget.on_data_changed(observer)
self.pair_widget.on_data_changed(observer)
| gpl-3.0 | -3,301,874,080,768,597,500 | 40.347826 | 110 | 0.719243 | false |
atztogo/phono3py | phono3py/other/isotope.py | 1 | 13495 | # Copyright (C) 2015 Atsushi Togo
# All rights reserved.
#
# This file is part of phono3py.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from phonopy.harmonic.dynamical_matrix import get_dynamical_matrix
from phonopy.structure.tetrahedron_method import TetrahedronMethod
from phonopy.phonon.tetrahedron_mesh import get_tetrahedra_frequencies
from phonopy.units import VaspToTHz
from phonopy.structure.atoms import isotope_data
from phono3py.phonon.solver import run_phonon_solver_c, run_phonon_solver_py
from phono3py.phonon3.triplets import get_bz_grid_address
from phono3py.phonon.func import gaussian
def get_mass_variances(primitive):
symbols = primitive.symbols
mass_variances = []
for s in symbols:
masses = np.array([x[1] for x in isotope_data[s]])
fractions = np.array([x[2] for x in isotope_data[s]])
m_ave = np.dot(masses, fractions)
g = np.dot(fractions, (1 - masses / m_ave) ** 2)
mass_variances.append(g)
return np.array(mass_variances, dtype='double')
class Isotope(object):
def __init__(self,
mesh,
primitive,
mass_variances=None, # length of list is num_atom.
band_indices=None,
sigma=None,
frequency_factor_to_THz=VaspToTHz,
symprec=1e-5,
cutoff_frequency=None,
lapack_zheev_uplo='L'):
self._mesh = np.array(mesh, dtype='intc')
if mass_variances is None:
self._mass_variances = get_mass_variances(primitive)
else:
self._mass_variances = np.array(mass_variances, dtype='double')
self._primitive = primitive
self._sigma = sigma
self._symprec = symprec
if cutoff_frequency is None:
self._cutoff_frequency = 0
else:
self._cutoff_frequency = cutoff_frequency
self._frequency_factor_to_THz = frequency_factor_to_THz
self._lapack_zheev_uplo = lapack_zheev_uplo
self._nac_q_direction = None
self._grid_address = None
self._bz_map = None
self._grid_points = None
self._frequencies = None
self._eigenvectors = None
self._phonon_done = None
self._dm = None
self._gamma = None
self._tetrahedron_method = None
num_band = len(self._primitive) * 3
if band_indices is None:
self._band_indices = np.arange(num_band, dtype='intc')
else:
self._band_indices = np.array(band_indices, dtype='intc')
def set_grid_point(self, grid_point):
self._grid_point = grid_point
self._grid_points = np.arange(np.prod(self._mesh), dtype='uintp')
if self._grid_address is None:
primitive_lattice = np.linalg.inv(self._primitive.cell)
self._grid_address, self._bz_map = get_bz_grid_address(
self._mesh, primitive_lattice, with_boundary=True)
if self._phonon_done is None:
self._allocate_phonon()
def run(self):
self._run_c()
@property
def sigma(self):
return self._sigma
@sigma.setter
def sigma(self, sigma):
if sigma is None:
self._sigma = None
else:
self._sigma = float(sigma)
def set_sigma(self, sigma):
warnings.warn("Use attribute, sigma.", DeprecationWarning)
self.sigma = sigma
@property
def dynamical_matrix(self):
return self._dm
@property
def band_indices(self):
return self._band_indices
@property
def gamma(self):
return self._gamma
def get_gamma(self):
warnings.warn("Use attribute, gamma.", DeprecationWarning)
return self.gamma
@property
def grid_address(self):
return self._grid_address
def get_grid_address(self):
warnings.warn("Use attribute, grid_address.", DeprecationWarning)
return self.grid_address
@property
def mass_variances(self):
return self._mass_variances
def get_mass_variances(self):
warnings.warn("Use attribute, mass_variances.", DeprecationWarning)
return self.mass_variances
def get_phonons(self):
return self._frequencies, self._eigenvectors, self._phonon_done
def set_phonons(self,
grid_address,
bz_map,
frequencies,
eigenvectors,
phonon_done,
dm=None):
self._grid_address = grid_address
self._bz_map = bz_map
self._frequencies = frequencies
self._eigenvectors = eigenvectors
self._phonon_done = phonon_done
if dm is not None:
self._dm = dm
def init_dynamical_matrix(self,
fc2,
supercell,
primitive,
nac_params=None,
frequency_scale_factor=None,
decimals=None):
self._primitive = primitive
self._dm = get_dynamical_matrix(
fc2,
supercell,
primitive,
nac_params=nac_params,
frequency_scale_factor=frequency_scale_factor,
decimals=decimals,
symprec=self._symprec)
def set_nac_q_direction(self, nac_q_direction=None):
if nac_q_direction is not None:
self._nac_q_direction = np.array(nac_q_direction, dtype='double')
def _run_c(self):
self._run_phonon_solver_c(self._grid_points)
import phono3py._phono3py as phono3c
gamma = np.zeros(len(self._band_indices), dtype='double')
if self._sigma is None:
self._set_integration_weights()
weights = np.ones(len(self._grid_points), dtype='intc')
phono3c.thm_isotope_strength(gamma,
self._grid_point,
self._grid_points,
weights,
self._mass_variances,
self._frequencies,
self._eigenvectors,
self._band_indices,
self._integration_weights,
self._cutoff_frequency)
else:
phono3c.isotope_strength(gamma,
self._grid_point,
self._mass_variances,
self._frequencies,
self._eigenvectors,
self._band_indices,
np.prod(self._mesh),
self._sigma,
self._cutoff_frequency)
self._gamma = gamma / np.prod(self._mesh)
def _set_integration_weights(self):
primitive_lattice = np.linalg.inv(self._primitive.cell)
thm = TetrahedronMethod(primitive_lattice, mesh=self._mesh)
num_grid_points = len(self._grid_points)
num_band = len(self._primitive) * 3
self._integration_weights = np.zeros(
(num_grid_points, len(self._band_indices), num_band),
dtype='double')
self._set_integration_weights_c(thm)
def _set_integration_weights_c(self, thm):
import phono3py._phono3py as phono3c
unique_vertices = thm.get_unique_tetrahedra_vertices()
neighboring_grid_points = np.zeros(
len(unique_vertices) * len(self._grid_points), dtype='uintp')
phono3c.neighboring_grid_points(
neighboring_grid_points,
self._grid_points,
unique_vertices,
self._mesh,
self._grid_address,
self._bz_map)
unique_grid_points = np.array(np.unique(neighboring_grid_points),
dtype='uintp')
self._run_phonon_solver_c(unique_grid_points)
freq_points = np.array(
self._frequencies[self._grid_point, self._band_indices],
dtype='double', order='C')
phono3c.integration_weights(
self._integration_weights,
freq_points,
thm.get_tetrahedra(),
self._mesh,
self._grid_points,
self._frequencies,
self._grid_address,
self._bz_map)
def _set_integration_weights_py(self, thm):
for i, gp in enumerate(self._grid_points):
tfreqs = get_tetrahedra_frequencies(
gp,
self._mesh,
[1, self._mesh[0], self._mesh[0] * self._mesh[1]],
self._grid_address,
thm.get_tetrahedra(),
self._grid_points,
self._frequencies)
for bi, frequencies in enumerate(tfreqs):
thm.set_tetrahedra_omegas(frequencies)
thm.run(self._frequencies[self._grid_point,
self._band_indices])
iw = thm.get_integration_weight()
self._integration_weights[i, :, bi] = iw
def _run_py(self):
for gp in self._grid_points:
self._run_phonon_solver_py(gp)
if self._sigma is None:
self._set_integration_weights()
t_inv = []
for bi in self._band_indices:
vec0 = self._eigenvectors[self._grid_point][:, bi].conj()
f0 = self._frequencies[self._grid_point][bi]
ti_sum = 0.0
for i, gp in enumerate(self._grid_points):
for j, (f, vec) in enumerate(
zip(self._frequencies[i], self._eigenvectors[i].T)):
if f < self._cutoff_frequency:
continue
ti_sum_band = np.sum(
np.abs((vec * vec0).reshape(-1, 3).sum(axis=1)) ** 2
* self._mass_variances)
if self._sigma is None:
ti_sum += ti_sum_band * self._integration_weights[
i, bi, j]
else:
ti_sum += ti_sum_band * gaussian(f0 - f, self._sigma)
t_inv.append(np.pi / 2 / np.prod(self._mesh) * f0 ** 2 * ti_sum)
self._gamma = np.array(t_inv, dtype='double') / 2
def _run_phonon_solver_c(self, grid_points):
run_phonon_solver_c(self._dm,
self._frequencies,
self._eigenvectors,
self._phonon_done,
grid_points,
self._grid_address,
self._mesh,
self._frequency_factor_to_THz,
self._nac_q_direction,
self._lapack_zheev_uplo)
def _run_phonon_solver_py(self, grid_point):
run_phonon_solver_py(grid_point,
self._phonon_done,
self._frequencies,
self._eigenvectors,
self._grid_address,
self._mesh,
self._dm,
self._frequency_factor_to_THz,
self._lapack_zheev_uplo)
def _allocate_phonon(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_grid = len(self._grid_address)
self._phonon_done = np.zeros(num_grid, dtype='byte')
self._frequencies = np.zeros((num_grid, num_band), dtype='double')
itemsize = self._frequencies.itemsize
self._eigenvectors = np.zeros((num_grid, num_band, num_band),
dtype=("c%d" % (itemsize * 2)))
| bsd-3-clause | 516,168,749,323,169,340 | 37.557143 | 77 | 0.543905 | false |
s20121035/rk3288_android5.1_repo | cts/apps/CameraITS/tests/scene1/test_yuv_plus_raw.py | 2 | 2377 | # Copyright 2014 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import its.image
import its.caps
import its.device
import its.objects
import its.target
import os.path
import math
def main():
"""Test capturing a single frame as both RAW and YUV outputs.
"""
NAME = os.path.basename(__file__).split(".")[0]
THRESHOLD_MAX_RMS_DIFF = 0.035
with its.device.ItsSession() as cam:
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.compute_target_exposure(props) and
its.caps.raw16(props) and
its.caps.per_frame_control(props))
# Use a manual request with a linear tonemap so that the YUV and RAW
# should look the same (once converted by the its.image module).
e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
req = its.objects.manual_capture_request(s, e, True)
cap_raw, cap_yuv = cam.do_capture(req, cam.CAP_RAW_YUV)
img = its.image.convert_capture_to_rgb_image(cap_yuv)
its.image.write_image(img, "%s_yuv.jpg" % (NAME), True)
tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
rgb0 = its.image.compute_image_means(tile)
# Raw shots are 1/2 x 1/2 smaller after conversion to RGB, so scale the
# tile appropriately.
img = its.image.convert_capture_to_rgb_image(cap_raw, props=props)
its.image.write_image(img, "%s_raw.jpg" % (NAME), True)
tile = its.image.get_image_patch(img, 0.475, 0.475, 0.05, 0.05)
rgb1 = its.image.compute_image_means(tile)
rms_diff = math.sqrt(
sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
print "RMS difference:", rms_diff
assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,577,174,279,939,786,000 | 37.33871 | 79 | 0.648296 | false |
foxdog-studios/pyddp | ddp/pubsub/ponger.py | 1 | 1178 | # -*- coding: utf-8 -*-
# Copyright 2014 Foxdog Studios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ddp.messages.pong_message import PongMessage
from .subscriber import Subscriber
from .topics import MessageReceivedPing, MessageSendPong
__all__ = ['Ponger']
class Ponger(Subscriber):
def __init__(self, board):
super(Ponger, self).__init__(board,
{MessageReceivedPing: self._on_ping})
self._board = board
def _on_ping(self, topic, message):
self._board.publish(MessageSendPong, PongMessage(id=message.id))
| apache-2.0 | -2,490,839,697,122,580,000 | 31.722222 | 74 | 0.720713 | false |
anistark/mozillians | mozillians/phonebook/tests/test_profile_edit.py | 2 | 10910 | from django.core.urlresolvers import reverse
from mock import patch
from nose.tools import eq_, ok_
from requests import ConnectionError
from mozillians.common.tests import TestCase
from mozillians.geo.models import Country
from mozillians.geo.tests import CountryFactory, RegionFactory, CityFactory
from mozillians.phonebook.forms import LocationForm
from mozillians.phonebook.tests import _get_privacy_fields
from mozillians.users.managers import MOZILLIANS
from mozillians.users.models import UserProfile
from mozillians.users.tests import UserFactory
class ProfileEditTests(TestCase):
def test_profile_edit_vouched_links_to_groups_page(self):
"""A vouched user editing their profile is shown a link to the groups page.
"""
user = UserFactory.create()
url = reverse('phonebook:profile_edit', prefix='/en-US/')
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
groups_url = reverse('groups:index_groups', prefix='/en-US/')
ok_(groups_url in response.content)
def test_profile_edit_unvouched_doesnt_link_to_groups_page(self):
"""An unvouched user editing their profile is not shown a link to the groups page.
"""
user = UserFactory.create(vouched=False)
url = reverse('phonebook:profile_edit', prefix='/en-US/')
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
groups_url = reverse('groups:index_groups', prefix='/en-US/')
ok_(groups_url not in response.content)
def test_section_does_not_exist(self):
"""When not section exists in request.POST, 404 is raised."""
user = UserFactory.create(vouched=False)
url = reverse('phonebook:profile_edit', prefix='/en-US/')
data = {
'full_name': user.userprofile.full_name,
'email': user.email,
'username': user.username,
'lat': 40.005814,
'lng': -3.42071,
'externalaccount_set-MAX_NUM_FORMS': '1000',
'externalaccount_set-INITIAL_FORMS': '0',
'externalaccount_set-TOTAL_FORMS': '0',
'language_set-0-id': '',
'language_set-0-userprofile': '',
'language_set-0-code': 'en',
'language_set-1-id': '',
'language_set-1-userprofile': '',
'language_set-1-code': 'fr',
'language_set-MAX_NUM_FORMS': '1000',
'language_set-INITIAL_FORMS': '0',
'language_set-TOTAL_FORMS': '2',
}
data.update(_get_privacy_fields(MOZILLIANS))
with self.login(user) as client:
response = client.post(url, data=data, follow=True)
eq_(response.status_code, 404)
def test_wrong_section(self):
"""When a wrong section is given in request.POST, 404 is raised."""
user = UserFactory.create(vouched=False)
url = reverse('phonebook:profile_edit', prefix='/en-US/')
data = {
'full_name': user.userprofile.full_name,
'email': user.email,
'username': user.username,
'lat': 40.005814,
'lng': -3.42071,
'externalaccount_set-MAX_NUM_FORMS': '1000',
'externalaccount_set-INITIAL_FORMS': '0',
'externalaccount_set-TOTAL_FORMS': '0',
'language_set-0-id': '',
'language_set-0-userprofile': '',
'language_set-0-code': 'en',
'language_set-1-id': '',
'language_set-1-userprofile': '',
'language_set-1-code': 'fr',
'language_set-MAX_NUM_FORMS': '1000',
'language_set-INITIAL_FORMS': '0',
'language_set-TOTAL_FORMS': '2',
'foo_section': '',
}
data.update(_get_privacy_fields(MOZILLIANS))
with self.login(user) as client:
response = client.post(url, data=data, follow=True)
eq_(response.status_code, 404)
def test_languages_get_saved(self):
user = UserFactory.create(email='[email protected]')
data = {
'full_name': user.userprofile.full_name,
'email': user.email,
'username': user.username,
'lat': 40.005814,
'lng': -3.42071,
'externalaccount_set-MAX_NUM_FORMS': '1000',
'externalaccount_set-INITIAL_FORMS': '0',
'externalaccount_set-TOTAL_FORMS': '0',
'language_set-0-id': '',
'language_set-0-userprofile': '',
'language_set-0-code': 'en',
'language_set-1-id': '',
'language_set-1-userprofile': '',
'language_set-1-code': 'fr',
'language_set-MAX_NUM_FORMS': '1000',
'language_set-INITIAL_FORMS': '0',
'language_set-TOTAL_FORMS': '2',
'languages_section': ''
}
data.update(_get_privacy_fields(MOZILLIANS))
url = reverse('phonebook:profile_edit', prefix='/en-US/')
with self.login(user) as client:
response = client.post(url, data=data, follow=True)
eq_(response.status_code, 200)
profile = UserProfile.objects.get(pk=user.userprofile.pk)
eq_(set(profile.language_set.values_list('code', flat=True)), set(['en', 'fr']))
def test_location_data_required(self):
user = UserFactory.create(email='[email protected]')
data = {
'full_name': user.userprofile.full_name,
'email': user.email,
'username': user.username,
'externalaccount_set-MAX_NUM_FORMS': '1000',
'externalaccount_set-INITIAL_FORMS': '0',
'externalaccount_set-TOTAL_FORMS': '0',
'language_set-MAX_NUM_FORMS': '1000',
'language_set-INITIAL_FORMS': '0',
'language_set-TOTAL_FORMS': '0',
'location_section': ''
}
data.update(_get_privacy_fields(MOZILLIANS))
form = LocationForm(data=data)
eq_(form.is_valid(), False)
ok_(form.errors.get('lat'))
ok_(form.errors.get('lng'))
@patch('mozillians.phonebook.views.messages.info')
def test_succesful_registration(self, info_mock):
user = UserFactory.create(first_name='', last_name='')
url = reverse('phonebook:profile_edit', prefix='/en-US/')
data = {
'full_name': 'foo bar',
'email': '[email protected]',
'username': 'foobar',
'lat': 40.005814,
'lng': -3.42071,
'optin': True,
'registration_section': ''
}
data.update(_get_privacy_fields(MOZILLIANS))
with self.login(user) as client:
response = client.post(url, data, follow=True)
eq_(response.status_code, 200)
ok_(info_mock.called)
class LocationEditTests(TestCase):
def setUp(self):
self.user = UserFactory.create(email='[email protected]',
userprofile={'geo_country': None,
'geo_region': None,
'geo_city': None})
self.data = {
'full_name': self.user.userprofile.full_name,
'email': self.user.email,
'username': self.user.username,
'lat': 40.005814,
'lng': -3.42071,
'externalaccount_set-MAX_NUM_FORMS': '1000',
'externalaccount_set-INITIAL_FORMS': '0',
'externalaccount_set-TOTAL_FORMS': '0',
'language_set-MAX_NUM_FORMS': '1000',
'language_set-INITIAL_FORMS': '0',
'language_set-TOTAL_FORMS': '0',
'location_section': ''
}
self.country = CountryFactory.create(mapbox_id='country1', name='Petoria')
self.region = RegionFactory.create(country=self.country, mapbox_id='reg1', name='Ontario')
self.city = CityFactory.create(region=self.region, mapbox_id='city1', name='Toronto')
@patch('mozillians.geo.lookup.reverse_geocode')
def test_location_city_region_optout(self, mock_reverse_geocode):
mock_reverse_geocode.return_value = (self.country, self.region, self.city)
self.data.update(_get_privacy_fields(MOZILLIANS))
form = LocationForm(data=self.data)
eq_(form.is_valid(), True)
eq_(form.instance.geo_country, self.country)
eq_(form.instance.geo_region, None)
eq_(form.instance.geo_city, None)
@patch('mozillians.geo.lookup.reverse_geocode')
def test_location_api_called_when_latlng_changed(self, mock_reverse_geocode):
mock_reverse_geocode.return_value = (self.country, self.region, self.city)
self.data['lat'] = 40
self.data['lng'] = 20
self.data.update(_get_privacy_fields(MOZILLIANS))
initial = {
'lat': self.user.userprofile.lat,
'lng': self.user.userprofile.lng
}
form = LocationForm(data=self.data, initial=initial)
ok_(form.is_valid())
ok_(mock_reverse_geocode.called)
@patch('mozillians.geo.lookup.reverse_geocode')
def test_location_api_not_called_when_latlang_unchanged(self, mock_reverse_geocode):
mock_reverse_geocode.return_value = (self.country, self.region, self.city)
self.data['lng'] = self.user.userprofile.lng
self.data['lat'] = self.user.userprofile.lat
self.data.update(_get_privacy_fields(MOZILLIANS))
initial = {
'lat': self.user.userprofile.lat,
'lng': self.user.userprofile.lng
}
form = LocationForm(data=self.data, initial=initial)
ok_(form.is_valid())
ok_(not mock_reverse_geocode.called)
@patch('mozillians.geo.lookup.reverse_geocode')
def test_location_region_required_if_city(self, mock_reverse_geocode):
mock_reverse_geocode.return_value = (self.country, self.region, self.city)
self.data.update({'savecity': True})
self.data.update(_get_privacy_fields(MOZILLIANS))
form = LocationForm(data=self.data)
ok_(not form.is_valid())
ok_('saveregion' in form.errors)
@patch('mozillians.geo.lookup.requests')
def test_location_profile_save_connectionerror(self, mock_requests):
mock_requests.get.return_value.raise_for_status.side_effect = ConnectionError
error_country = Country.objects.create(name='Error', mapbox_id='geo_error')
self.data.update(_get_privacy_fields(MOZILLIANS))
url = reverse('phonebook:profile_edit', prefix='/en-US/')
with self.login(self.user) as client:
response = client.post(url, data=self.data, follow=True)
userprofile = UserProfile.objects.get(user=self.user)
eq_(response.status_code, 200)
eq_(userprofile.geo_country, error_country)
| bsd-3-clause | 2,644,026,904,069,464,600 | 41.286822 | 98 | 0.589918 | false |
VenturaDelMonte/socialnetworks | lessons/lab/exp2.py | 1 | 2096 | #!/usr/bin/python
import sys
import math
from lesson1 import randomGraph, diameter, averageClustering
from lesson2 import *
eps = 1e-12
rep = int(sys.argv[1]) #number of repetitions
nodes = int(sys.argv[2]) #number of nodes
sep = float(sys.argv[3]) #experiments for p = sep, 2sep, 3sep, ..., 1
k_threshold=int(sys.argv[4]) #the maximum number of random links
# experiments with random graphs
print("experiments w/ random graphs")
p = sep
while p < 1:
diam = 0
aclust = 0
not_connected = 0
for i in range(rep): # for each p we make rep simulations
graph = randomGraph(nodes,p)
edges=countEdges(graph)
tmp_diam=diameter(graph)
if tmp_diam > 0: #if diameter is -1, then the graph is not connected
diam += tmp_diam
else:
not_connected += 1
aclust += averageClustering(graph)
print(p, edges, not_connected, diam/(rep-not_connected), aclust/rep)
p += sep
print("experiments w/ Watts-Strogatz graphs (on grid)")
# experiments with Watts-Strogatz graphs (on grid)
line=int(math.sqrt(nodes))
for r in range(1,line):
for k in range(1,k_threshold+1):
diam = 0
aclust = 0
not_connected = 0
for i in range(rep):
graph = WSGridGraph(nodes,r,k)
edges=countEdges(graph)
tmp_diam=diameter(graph)
if tmp_diam > 0:
diam += tmp_diam
else:
not_connected += 1
aclust += averageClustering(graph)
print(r, k, edges, not_connected, diam/(rep-not_connected+eps), aclust/rep)
print("experiments w/ Watts-Strogatz graphs (on 2D grid)")
# experiments with Watts-Strogatz graphs (on 2D plane)
for r in range(1,line):
for k in range(1,k_threshold+1):
diam = 0
aclust = 0
not_connected = 0
for i in range(rep):
graph = WS2DGraph(nodes,r,k)
edges=countEdges(graph)
tmp_diam=diameter(graph)
if tmp_diam > 0:
diam += tmp_diam
else:
not_connected += 1
aclust += averageClustering(graph)
print(r, k, edges, not_connected, diam/(rep-not_connected+eps), aclust/rep) | mit | 2,672,493,682,791,091,000 | 28.405797 | 79 | 0.634542 | false |
UO-CIS-322/proj6-mongo | memos/flask_main.py | 2 | 3498 | """
Flask web app connects to Mongo database.
Keep a simple list of dated memoranda.
Representation conventions for dates:
- We use Arrow objects when we want to manipulate dates, but for all
storage in database, in session or g objects, or anything else that
needs a text representation, we use ISO date strings. These sort in the
order as arrow date objects, and they are easy to convert to and from
arrow date objects. (For display on screen, we use the 'humanize' filter
below.) A time zone offset will
- User input/output is in local (to the server) time.
"""
import flask
from flask import g
from flask import render_template
from flask import request
from flask import url_for
import json
import logging
import sys
# Date handling
import arrow
from dateutil import tz # For interpreting local times
# Mongo database
from pymongo import MongoClient
import config
CONFIG = config.configuration()
MONGO_CLIENT_URL = "mongodb://{}:{}@{}:{}/{}".format(
CONFIG.DB_USER,
CONFIG.DB_USER_PW,
CONFIG.DB_HOST,
CONFIG.DB_PORT,
CONFIG.DB)
print("Using URL '{}'".format(MONGO_CLIENT_URL))
###
# Globals
###
app = flask.Flask(__name__)
app.secret_key = CONFIG.SECRET_KEY
####
# Database connection per server process
###
try:
dbclient = MongoClient(MONGO_CLIENT_URL)
db = getattr(dbclient, CONFIG.DB)
collection = db.dated
except:
print("Failure opening database. Is Mongo running? Correct password?")
sys.exit(1)
###
# Pages
###
@app.route("/")
@app.route("/index")
def index():
app.logger.debug("Main page entry")
g.memos = get_memos()
for memo in g.memos:
app.logger.debug("Memo: " + str(memo))
return flask.render_template('index.html')
# We don't have an interface for creating memos yet
# @app.route("/create")
# def create():
# app.logger.debug("Create")
# return flask.render_template('create.html')
@app.errorhandler(404)
def page_not_found(error):
app.logger.debug("Page not found")
return flask.render_template('page_not_found.html',
badurl=request.base_url,
linkback=url_for("index")), 404
#################
#
# Functions used within the templates
#
#################
@app.template_filter( 'humanize' )
def humanize_arrow_date( date ):
"""
Date is internal UTC ISO format string.
Output should be "today", "yesterday", "in 5 days", etc.
Arrow will try to humanize down to the minute, so we
need to catch 'today' as a special case.
"""
try:
then = arrow.get(date).to('local')
now = arrow.utcnow().to('local')
if then.date() == now.date():
human = "Today"
else:
human = then.humanize(now)
if human == "in a day":
human = "Tomorrow"
except:
human = date
return human
#############
#
# Functions available to the page code above
#
##############
def get_memos():
"""
Returns all memos in the database, in a form that
can be inserted directly in the 'session' object.
"""
records = [ ]
for record in collection.find( { "type": "dated_memo" } ):
record['date'] = arrow.get(record['date']).isoformat()
del record['_id']
records.append(record)
return records
if __name__ == "__main__":
app.debug=CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
app.run(port=CONFIG.PORT,host="0.0.0.0")
| artistic-2.0 | -6,580,291,621,084,850,000 | 22.32 | 78 | 0.624643 | false |
gannetson/sportschooldeopenlucht | apps/love/fields.py | 1 | 1426 | from django.contrib.contenttypes.generic import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.utils.functional import lazy
class LoveDeclarationRelation(GenericRelation):
"""
A :class:`~django.contrib.contenttypes.generic.GenericRelation` which can be applied to a parent model that
can be loved by a :class:`apps.love.models.LoveMarker` model. For example:
.. code-block:: python
class Comment(models.Model):
received_loves = LoveDeclarationRelation()
"""
def __init__(self, **kwargs):
from .models import LoveDeclaration
defaults = {
'limit_choices_to': Q(
parent_type=lazy(lambda: ContentType.objects.get_for_model(LoveDeclaration), ContentType)()
)
}
defaults.update(kwargs)
super(LoveDeclarationRelation, self).__init__(
to=LoveDeclaration,
object_id_field='object_id',
content_type_field='content_type',
**defaults
)
try:
from south.modelsinspector import add_ignored_fields
except ImportError:
pass
else:
# South 0.7.x ignores GenericRelation fields but doesn't ignore subclasses.
# Adding them to the ignore list.
_name_re = "^" + __name__.replace(".", "\.")
add_ignored_fields((
_name_re + "\.LoveDeclarationRelation",
))
| bsd-3-clause | 8,221,373,981,416,151,000 | 31.409091 | 111 | 0.650771 | false |
thorwhalen/ut | pplot/date_ticks.py | 1 | 3936 | import numpy as np
from datetime import datetime as dt
unit_str_to_unit_in_seconds = {
'day': 3600 * 24,
'hour': 3600,
'h': 3600,
'minute': 60,
'mn': 60,
'second': 1,
's': 1,
'millisecond': 0.001,
'ms': 0.001,
'microsecond': 1e-6,
'us': 1e-6,
}
unit_in_seconds = np.array([
60 * 60 * 24 * 365, # year
60 * 60 * 24 * 30, # month
60 * 60 * 24 * 7, # week
60 * 60 * 24, # day
60 * 60, # hour
60, # minute
1, # second
1e-3, # millisecond
1e-6, # microsecond
1e-9 # nanosecond
])
strftime_format_for_unit = {
60 * 60 * 24 * 30: '%y-%m-%d', # month
60 * 60 * 24 * 7: '%b %d', # week
60 * 60 * 24: '%b %d', # day
60 * 60: '%d-%H:%M', # hour
60: '%H:%M:%S', # minute
1: '%M:%S.%f', # second
1e-3: "%S''%f", # millisecond
1e-6: "%S''%f", # microsecond
}
def utc_datetime_from_val_and_unit(val, unit):
if isinstance(unit, str):
unit = unit_str_to_unit_in_seconds[unit]
return dt.utcfromtimestamp(val * unit)
def largest_unit_that_changes_at_every_tick(ticks, ticks_unit):
"""
Returns the largest time unit for which each time tick changes.
:param ticks: The list of ticks
:param ticks_unit: The unit of the elements of ticks, expressed in seconds. For example, if the list
contains hours, unit=3600, if minutes, unit=60, if seconds unit=1,
if milliseconds unit=0.001.
Note: You can also use a string to express the unit, as long as it's recognized by the
unit_str_to_unit_in_seconds dict. Keys recognized:
['day', 'hour', 'h', 'minute', 'mn', 'second', 's', 'millisecond', 'ms', 'microsecond', 'us']
:return:
"""
ticks = np.array(ticks)
if isinstance(ticks_unit, str):
ticks_unit = unit_str_to_unit_in_seconds[ticks_unit]
min_tick_diff = min(np.diff(ticks))
min_tick_diff *= ticks_unit # convert to seconds
for u in unit_in_seconds:
if u < min_tick_diff:
return u
def strftime_format_for_ticks(ticks, ticks_unit):
unit = largest_unit_that_changes_at_every_tick(ticks, ticks_unit)
return strftime_format_for_unit[unit]
def strftime_with_precision(tick, format, sub_secs_precision=2):
"""
Returns a formatted string for a datetime (tick).
:param tick: The datetime for this tick
:param format: The formatting string
:param sub_secs_precision: Number of digits to used for sub-seconds.
If None, will choose it "smartly/dynamically"
:return: Formatted string
"""
t = tick.strftime(format)
is_us = '%f' in format
if is_us:
if sub_secs_precision is None:
while t[-1] == '0':
t = t[:-1]
while not t[-1].isdigit():
t = t[:-1]
return t
else:
if sub_secs_precision < 0:
sub_secs_precision = 0
elif sub_secs_precision > 6:
sub_secs_precision = 6
DFLT_PRECISION = 6
digits_to_skip = DFLT_PRECISION - sub_secs_precision
if digits_to_skip == 0:
return t
else:
t = t[:-digits_to_skip]
while not t[-1].isdigit():
t = t[:-1]
return t
else:
return t
def str_ticks(ticks, ticks_unit, sub_secs_precision=2):
t_format = strftime_format_for_ticks(ticks, ticks_unit)
return [strftime_with_precision(utc_datetime_from_val_and_unit(x, ticks_unit), t_format, sub_secs_precision) for x
in ticks]
def use_time_ticks(ax=None, ticks_unit=0.001):
from matplotlib.pylab import gca
if ax is None:
ax = gca()
_xticks = ax.get_xticks()
ax.set_xticks(_xticks)
ax.set_xticklabels(str_ticks(ticks=_xticks, ticks_unit=ticks_unit))
ax.margins(x=0)
def unit_aligned_ticks(ticks, ticks_unit):
pass
| mit | 152,808,976,147,584,480 | 29.045802 | 118 | 0.565549 | false |
sencha/chromium-spacewalk | tools/valgrind/asan/asan_symbolize.py | 17 | 1357 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from third_party import asan_symbolize
import os
import re
import sys
def fix_filename(file_name):
for path_to_cut in sys.argv[1:]:
file_name = re.sub(".*" + path_to_cut, "", file_name)
file_name = re.sub(".*asan_[a-z_]*.cc:[0-9]*", "_asan_rtl_", file_name)
file_name = re.sub(".*crtstuff.c:0", "???:0", file_name)
return file_name
class LineBuffered(object):
"""Disable buffering on a file object."""
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
if '\n' in data:
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def disable_buffering():
"""Makes this process and child processes stdout unbuffered."""
if not os.environ.get('PYTHONUNBUFFERED'):
# Since sys.stdout is a C++ object, it's impossible to do
# sys.stdout.write = lambda...
sys.stdout = LineBuffered(sys.stdout)
os.environ['PYTHONUNBUFFERED'] = 'x'
def main():
disable_buffering()
asan_symbolize.demangle = True
loop = asan_symbolize.SymbolizationLoop(binary_name_filter=fix_filename)
loop.process_stdin()
if __name__ == '__main__':
main()
| bsd-3-clause | -3,872,403,566,249,856,500 | 26.14 | 74 | 0.665438 | false |
sarthak-srivastava/chat | chat_app/chat/settings.py | 1 | 1164 | from django.conf import settings
NOTIFY_USERS_ON_ENTER_OR_LEAVE_ROOMS = getattr(settings, 'NOTIFY_USERS_ON_ENTER_OR_LEAVE_ROOMS', True)
MSG_TYPE_MESSAGE = 0 # For standard messages
MSG_TYPE_WARNING = 1 # For yellow messages
MSG_TYPE_ALERT = 2 # For red & dangerous alerts
MSG_TYPE_MUTED = 3 # For just OK information that doesn't bother users
MSG_TYPE_ENTER = 4 # For just OK information that doesn't bother users
MSG_TYPE_LEAVE = 5 # For just OK information that doesn't bother users
MESSAGE_TYPES_CHOICES = getattr(settings, 'MESSAGE_TYPES_CHOICES', (
(MSG_TYPE_MESSAGE, 'MESSAGE'),
(MSG_TYPE_WARNING, 'WARNING'),
(MSG_TYPE_ALERT, 'ALERT'),
(MSG_TYPE_MUTED, 'MUTED'),
(MSG_TYPE_ENTER, 'ENTER'),
(MSG_TYPE_LEAVE, 'LEAVE'))
)
MESSAGE_TYPES_LIST = getattr(settings, 'MESSAGE_TYPES_LIST',
[MSG_TYPE_MESSAGE,
MSG_TYPE_WARNING,
MSG_TYPE_ALERT,
MSG_TYPE_MUTED,
MSG_TYPE_ENTER,
MSG_TYPE_LEAVE]
)
| mit | 6,726,885,924,615,262,000 | 40.571429 | 102 | 0.562715 | false |
TanakritBenz/leetcode-adventure | friend_circles.py | 1 | 1532 | def buildGraph(friends):
"""
Helper function to build a graph representation from the 'friends' matrix
For Example: if input 'friends' = ['YYNN', 'YYYN', 'NYYN', 'NNNY']
Output will be 'vertices' = set([0, 1, 2, 3])
'edges' = {0: set([0, 1]),
1: set([0, 1, 2]),
2: set([1, 2]),
3: set([3])}
"""
vertices = set()
edges = {}
for i, row in enumerate(friends):
vertices.add(i)
edges[i] = set()
for j in range(len(row)):
if row[j] == 'Y':
edges[i].add(j)
return (vertices, edges)
def friendCircles(friends):
"""
For each node we do iterative DFS to find all the students in a friend circle.
For each circle we've found, we will do an increment to 'circles_cnt' by 1.
After every student node has been visited (set 'vertices' becomes empty), we're done.
"""
if not friends:
return 0
graph = buildGraph(friends)
vertices, edges = graph[0], graph[1]
circles_cnt = 0
while vertices:
stack, visited = [], set()
stack.append(vertices.pop())
while stack:
curr_node = stack.pop()
if curr_node not in visited:
visited.add(curr_node)
vertices.discard(curr_node)
for friend in edges[curr_node]:
stack.append(friend)
circles_cnt += 1
return circles_cnt
| gpl-2.0 | -5,092,389,222,722,884,000 | 33.044444 | 89 | 0.511097 | false |
Byron/bcore | src/python/bapp/schema.py | 1 | 1166 | #-*-coding:utf-8-*-
"""
@package bapp.schema
@brief All environment schemas used in the pipeline core
@author Sebastian Thiel
@copyright [GNU Lesser General Public License](https://www.gnu.org/licenses/lgpl.html)
"""
from __future__ import unicode_literals
__all__ = []
from butility import Path
from bkvstore import (RootKey,
KeyValueStoreSchema)
platform_schema = KeyValueStoreSchema(RootKey, dict(
platform={'id': 'unknown-platform'},
host={'name': str,
'fqname': str},
user={
# login name
'login': str,
# home path
'home': Path,
}
))
project_schema = KeyValueStoreSchema('project', {'id': str,
'paths': dict(
source=Path,
executables=Path,
configuration=Path,
output=Path
)
}
)
| lgpl-3.0 | 1,726,759,208,677,361,000 | 30.513514 | 86 | 0.422813 | false |
stannynuytkens/youtube-dl | youtube_dl/extractor/motherless.py | 4 | 7867 | from __future__ import unicode_literals
import datetime
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
ExtractorError,
InAdvancePagedList,
orderedSet,
str_to_int,
unified_strdate,
)
class MotherlessIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)'
_TESTS = [{
'url': 'http://motherless.com/AC3FFE1',
'md5': '310f62e325a9fafe64f68c0bccb6e75f',
'info_dict': {
'id': 'AC3FFE1',
'ext': 'mp4',
'title': 'Fucked in the ass while playing PS3',
'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
'upload_date': '20100913',
'uploader_id': 'famouslyfuckedup',
'thumbnail': r're:http://.*\.jpg',
'age_limit': 18,
}
}, {
'url': 'http://motherless.com/532291B',
'md5': 'bc59a6b47d1f958e61fbd38a4d31b131',
'info_dict': {
'id': '532291B',
'ext': 'mp4',
'title': 'Amazing girl playing the omegle game, PERFECT!',
'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen',
'game', 'hairy'],
'upload_date': '20140622',
'uploader_id': 'Sulivana7x',
'thumbnail': r're:http://.*\.jpg',
'age_limit': 18,
},
'skip': '404',
}, {
'url': 'http://motherless.com/g/cosplay/633979F',
'md5': '0b2a43f447a49c3e649c93ad1fafa4a0',
'info_dict': {
'id': '633979F',
'ext': 'mp4',
'title': 'Turtlette',
'categories': ['superheroine heroine superher'],
'upload_date': '20140827',
'uploader_id': 'shade0230',
'thumbnail': r're:http://.*\.jpg',
'age_limit': 18,
}
}, {
# no keywords
'url': 'http://motherless.com/8B4BBC1',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if any(p in webpage for p in (
'<title>404 - MOTHERLESS.COM<',
">The page you're looking for cannot be found.<")):
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
if '>The content you are trying to view is for friends only.' in webpage:
raise ExtractorError('Video %s is for friends only' % video_id, expected=True)
title = self._html_search_regex(
r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
video_url = (self._html_search_regex(
(r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'),
webpage, 'video URL', default=None, group='url') or
'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
age_limit = self._rta_search(webpage)
view_count = str_to_int(self._html_search_regex(
r'<strong>Views</strong>\s+([^<]+)<',
webpage, 'view count', fatal=False))
like_count = str_to_int(self._html_search_regex(
r'<strong>Favorited</strong>\s+([^<]+)<',
webpage, 'like count', fatal=False))
upload_date = self._html_search_regex(
r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload date')
if 'Ago' in upload_date:
days = int(re.search(r'([0-9]+)', upload_date).group(1))
upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d')
else:
upload_date = unified_strdate(upload_date)
comment_count = webpage.count('class="media-comment-contents"')
uploader_id = self._html_search_regex(
r'"thumb-member-username">\s+<a href="/m/([^"]+)"',
webpage, 'uploader_id')
categories = self._html_search_meta('keywords', webpage, default=None)
if categories:
categories = [cat.strip() for cat in categories.split(',')]
return {
'id': video_id,
'title': title,
'upload_date': upload_date,
'uploader_id': uploader_id,
'thumbnail': self._og_search_thumbnail(webpage),
'categories': categories,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
'age_limit': age_limit,
'url': video_url,
}
class MotherlessGroupIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?motherless\.com/gv?/(?P<id>[a-z0-9_]+)'
_TESTS = [{
'url': 'http://motherless.com/g/movie_scenes',
'info_dict': {
'id': 'movie_scenes',
'title': 'Movie Scenes',
'description': 'Hot and sexy scenes from "regular" movies... '
'Beautiful actresses fully nude... A looot of '
'skin! :)Enjoy!',
},
'playlist_mincount': 662,
}, {
'url': 'http://motherless.com/gv/sex_must_be_funny',
'info_dict': {
'id': 'sex_must_be_funny',
'title': 'Sex must be funny',
'description': 'Sex can be funny. Wide smiles,laugh, games, fun of '
'any kind!'
},
'playlist_mincount': 9,
}]
@classmethod
def suitable(cls, url):
return (False if MotherlessIE.suitable(url)
else super(MotherlessGroupIE, cls).suitable(url))
def _extract_entries(self, webpage, base):
entries = []
for mobj in re.finditer(
r'href="(?P<href>/[^"]+)"[^>]*>(?:\s*<img[^>]+alt="[^-]+-\s(?P<title>[^"]+)")?',
webpage):
video_url = compat_urlparse.urljoin(base, mobj.group('href'))
if not MotherlessIE.suitable(video_url):
continue
video_id = MotherlessIE._match_id(video_url)
title = mobj.group('title')
entries.append(self.url_result(
video_url, ie=MotherlessIE.ie_key(), video_id=video_id,
video_title=title))
# Alternative fallback
if not entries:
entries = [
self.url_result(
compat_urlparse.urljoin(base, '/' + video_id),
ie=MotherlessIE.ie_key(), video_id=video_id)
for video_id in orderedSet(re.findall(
r'data-codename=["\']([A-Z0-9]+)', webpage))]
return entries
def _real_extract(self, url):
group_id = self._match_id(url)
page_url = compat_urlparse.urljoin(url, '/gv/%s' % group_id)
webpage = self._download_webpage(page_url, group_id)
title = self._search_regex(
r'<title>([\w\s]+\w)\s+-', webpage, 'title', fatal=False)
description = self._html_search_meta(
'description', webpage, fatal=False)
page_count = self._int(self._search_regex(
r'(\d+)</(?:a|span)><(?:a|span)[^>]+>\s*NEXT',
webpage, 'page_count'), 'page_count')
PAGE_SIZE = 80
def _get_page(idx):
webpage = self._download_webpage(
page_url, group_id, query={'page': idx + 1},
note='Downloading page %d/%d' % (idx + 1, page_count)
)
for entry in self._extract_entries(webpage, url):
yield entry
playlist = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
return {
'_type': 'playlist',
'id': group_id,
'title': title,
'description': description,
'entries': playlist
}
| unlicense | 8,019,355,362,960,142,000 | 37.37561 | 102 | 0.509724 | false |
Ignoble61/service.smartish.widgets | serviceFunctions.py | 1 | 54962 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is based on service.skin.widgets
# Thanks to the original authors
import os
import sys
import xbmc
import xbmcgui
import xbmcplugin
import xbmcaddon
import xbmcvfs
import random
import urllib
import thread
import socket
import cPickle as pickle
from datetime import datetime
from traceback import print_exc
from time import gmtime, strftime
import _strptime
if sys.version_info < (2, 7):
import simplejson
else:
import json as simplejson
__addon__ = xbmcaddon.Addon()
__addonversion__ = __addon__.getAddonInfo('version')
__addonid__ = __addon__.getAddonInfo('id')
__addonname__ = __addon__.getAddonInfo('name')
__localize__ = __addon__.getLocalizedString
__cwd__ = __addon__.getAddonInfo('path').decode("utf-8")
__resource__ = xbmc.translatePath( os.path.join( __cwd__, 'resources', 'lib' ) ).decode("utf-8")
sys.path.append(__resource__)
import library, sql, tmdb
def log(txt):
message = '%s: %s' % (__addonname__, txt.encode('ascii', 'ignore'))
xbmc.log(msg=message, level=xbmc.LOGDEBUG)
class Main:
def __init__(self):
log('script (service) version %s started' % __addonversion__)
self.running = True
try:
#json_query = simplejson.loads( xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "JSONRPC.Introspect", "id": 1}') )
#print(simplejson.dumps(json_query, sort_keys=True, indent=4 * ' '))
self._init_vars()
self.QUIT = False
# Before we load any threads, we need to use strftime, to ensure it is imported (potential threading issue)
strftime( "%Y%m%d%H%M%S",gmtime() )
# Start threads
thread.start_new_thread( self._player_daemon, () )
thread.start_new_thread( self._socket_daemon, () )
# If we're a client, tell the server we're live
if __addon__.getSetting( "role" ) == "Client":
self.pingServer( True )
self._daemon()
# Clear xbmcgui items
self.movieWidget = []
self.episodeWidget = []
self.albumWidget = []
self.pvrWidget = []
self.nextupWidget = []
except:
log( "script (service) fatal error" )
print_exc()
self.running = False
log( "(-> localhost) 'quit'" )
clientsocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
clientsocket.connect( ( "localhost", int( __addon__.getSetting( "port" ) ) ) )
clientsocket.send( "QUIT||||EOD" )
clientsocket.send( "QUIT||||EOD" )
clientsocket.close()
log('script (service) version %s stopped' % __addonversion__)
self.WINDOW = xbmcgui.Window(10000)
def _init_vars(self):
self.WINDOW = xbmcgui.Window(10000)
self.playingLiveTV = False
self.movieWidget = []
self.episodeWidget = []
self.albumWidget = []
self.pvrWidget = []
self.nextupWidget = []
self.movieLastUpdated = 0
self.episodeLastUpdated = 0
self.albumLastUpdated = 0
self.pvrLastUpdated = 0
self.lastMovieHabits = None
self.lastEpisodeHabits = None
self.lastAlbumHabits = None
self.lastPVRHabits = None
self.movieWeighting = None
self.episodeWeighting = None
self.albumWeighting = None
self.pvrWeighting = None
# Create empty client list
self.clients = []
# Create a socket
self.serversocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
def _player_daemon( self ):
# This is the daemon which will add information about played media to the database
# Create a connection to the database
self.connectionWrite = sql.connect()
# Create a player and monitor object
self.Player = Widgets_Player( action = self.mediaStarted, ended = self.mediaEnded )
self.Monitor = Widgets_Monitor( action = self.libraryUpdated )
# Loop
while not xbmc.abortRequested and self.running == True:
xbmc.sleep( 1000 )
del self.Player
del self.Monitor
def _socket_daemon( self ):
# This is the daemon which will send back any requested widget (server) or update local skin strings (client)
log( "Widget listener started on port %s" %( __addon__.getSetting( "port" ) ) )
# Get the port, and convert it to an int
port = int( __addon__.getSetting( "port" ) )
# Bind the server
self.serversocket.bind( ( "", port ) )
self.serversocket.listen( 5 )
# Set all last updated strings to -1, so the skin refreshes all
# widgets now we're listening (Probably not needed, except for debug purposes)
self.WINDOW.setProperty( "smartish.movies", "-1" )
self.WINDOW.setProperty( "smartish.episodes", "-1" )
self.WINDOW.setProperty( "smartish.pvr", "-1" )
self.WINDOW.setProperty( "smartish.albums", "-1" )
# Loop
while not xbmc.abortRequested and self.running == True:
try:
connection, address = self.serversocket.accept()
except socket.timeout:
continue
except:
print_exc()
continue
thread.start_new_thread( self._socket_thread, (connection, address ) )
log( "Widget listener stopped" )
def _socket_thread( self, connection, address ):
totalData = []
while True:
data = connection.recv( 1028 )
if not data:
break
totalData.append( data )
if "|EOD" in data:
break
data = "".join( totalData ).split( "||||" )
if len( totalData ) > 0:
if data[ 0 ] != "ping":
log( "(%s -> *) '%s'" %( str( address[ 0 ] ), data[ 0 ] ) )
if data[ 0 ] == "QUIT":
connection.send( "QUITTING||||EOD" )
else:
# Messages from widget:
# Display Widget
returnlimit = int( __addon__.getSetting( "returnLimit" ) )
if data[ 0 ] == "movies":
xbmcplugin.setContent( int( data[ 1 ] ), "movies" )
xbmcplugin.addDirectoryItems( int( data[1] ),self.movieWidget[:returnlimit] )
xbmcplugin.endOfDirectory( handle=int( data[1] ) )
if data[ 0 ] == "episodes":
xbmcplugin.setContent( int( data[ 1 ] ), "episodes" )
xbmcplugin.addDirectoryItems( int( data[1] ),self.episodeWidget[:returnlimit] )
xbmcplugin.endOfDirectory( handle=int( data[1] ) )
if data[ 0 ] == "albums":
xbmcplugin.setContent( int( data[ 1 ] ), "albums" )
xbmcplugin.addDirectoryItems( int( data[1] ),self.albumWidget[:returnlimit] )
xbmcplugin.endOfDirectory( handle=int( data[1] ) )
if data[ 0 ] == "pvr":
xbmcplugin.addDirectoryItems( int( data[1] ),self.pvrWidget[:returnlimit] )
xbmcplugin.endOfDirectory( handle=int( data[1] ) )
# Play media
if data[ 0 ] == "playpvr":
xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method": "Player.Open", "params": { "item": {"channelid": ' + data[ 1 ] + '} } }' )
xbmcplugin.setResolvedUrl( handle=int( data[ 2 ] ), succeeded=False, listitem=xbmcgui.ListItem() )
if data[ 0 ] == "playrec":
xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method": "Player.Open", "params": { "item": {"recordingid": ' + data[ 1 ] + '} } }' )
xbmcplugin.setResolvedUrl( handle=int( data[ 2 ] ), succeeded=False, listitem=xbmcgui.ListItem() )
if data[ 0 ] == "playalb":
xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method": "Player.Open", "params": { "item": { "albumid": ' + data[ 1 ] + ' } } }' )
xbmcplugin.setResolvedUrl( handle=int( data[ 2 ] ), succeeded=False, listitem=xbmcgui.ListItem() )
# Messages from server:
# Build widgets with data from server
if data[ 0 ] == "widget":
log( "(%s > *) Received data to build %s widget" %( str( address[ 0 ] ), data[ 1 ] ) )
thread.start_new_thread( self.gotWidgetFromServer, ( data[ 1 ], pickle.loads( data[ 2 ] ), pickle.loads( data[ 3 ] ) ) )
# Messages from client:
# Save last played information
if data[ 0 ] == "lastplayed":
library.nowPlaying[ str( address[ 0 ] ) ] = ( data[ 1 ], data[ 2 ] )
library.lastplayedType = data[ 1 ]
library.lastplayedID = data[ 2 ]
if data[ 1 ] == "movie":
self.movieLastUpdated = 0
self.lastMovieHabits = None
if data[ 1 ] == "episode":
self.episodeLastUpdated = 0
self.lastEpisodeHabits = None
library.tvshowInformation.pop( int( data[ 3 ] ), None )
library.tvshowNextUnwatched.pop( int( data[ 3 ] ), None )
library.tvshowNewest.pop( int( data[ 3 ] ), None )
if data[ 1 ] == "recorded":
self.pvrLastUpdated = 0
if data[ 1 ] == "album":
self.albumLastUpdated = 0
self.lastAlbumHabits = None
# Clear last played information
if data[ 0 ] == "playbackended":
library.lastplayedType = None
library.lastplayedID = None
library.lastplayedID.pop( str( address[ 0 ] ), None )
# Update the database with program information
if data[ 0 ] == "mediainfo":
thread.start_new_thread( self.addClientDataToDatabase, ( pickle.loads( data[ 1 ] ), pickle.loads( data[ 2 ] ), pickle.loads( data[ 3 ] ) ) )
# Update underlying widget data (e.g. after library update)
if data[ 0 ] == "updatewidget":
if data[ 1 ] == "movie":
self.lastMovieHabits = None
self.movieLastUpdated = 0
if data[ 1 ] == "episode":
self.lastEpisodeHabits = None
self.episodeLastUpdated = 0
if data[ 1 ] == "album":
self.lastAlbumHabits = None
self.albumLastUpdated = 0
# Client pinging us
if data[ 0 ] == "ping":
# If client isn't registered, add it
if not str( address[ 0 ] ) in self.clients:
log( "New client has registered at address %s" %( str( address[ 0 ] ) ) )
self.clients.append( str( address[ 0 ] ) )
# Send widgets
thread.start_new_thread( self._send_widgets, ( str( address[ 0 ] ), None ) )
# Client started up
if data[ 0 ] == "clientstart":
# If client isn't registered, add it
if not str( address[ 0 ] ) in self.clients:
log( "New client has registered at address %s" %( str( address[ 0 ] ) ) )
self.clients.append( str( address[ 0 ] ) )
# Send widgets
thread.start_new_thread( self._send_widgets, ( str( address[ 0 ] ), None ) )
if data[ 0 ] != "ping":
log( "(* -> %s) 'OK' '%s'" %( str( address[ 0 ] ), data[ 0 ] ) )
connection.send( "OK||||%s||||EOD" %( data[ 0 ] ) )
else:
log( "(* -> %s) 'NODATA' '%s'" %( str( address[ 0 ] ), data[ 0 ] ) )
connection.send( "NODATA||||EOD" )
def _send_widgets( self, address, unused ):
# Send any widgets to client
if self.movieWeighting is not None:
self.sendWidgetToClient( "movie", self.movieWeighting, self.movieItems, address )
if self.episodeWeighting is not None:
self.sendWidgetToClient( "episode", self.episodeWeighting, self.episodeItems, address )
if self.pvrWeighting is not None:
self.sendWidgetToClient( "pvr", self.pvrWeighting, self.pvrItems, address )
if self.albumWeighting is not None:
self.sendWidgetToClient( "album", self.albumWeighting, self.albumItems, address )
def _remove_client( self, address ):
# Remove client from list
self.clients.remove( address )
library.nowPlaying.pop( address, None )
def _daemon( self ):
# This is a daemon which will update the widget with latest suggestions
self.connectionRead = sql.connect( True )
count = 0
while not xbmc.abortRequested:
if __addon__.getSetting( "role" ) == "Client":
count += 1
if count >= 60:
# Tell the server we're still alive
self.pingServer()
count = 0
if __addon__.getSetting( "role" ) == "Server" and ( len( self.clients ) != 0 or xbmc.getCondVisibility( "Skin.HasSetting(enable.smartish.widgets)" ) ):
count += 1
if count >= 60 or self.lastMovieHabits is None or self.lastEpisodeHabits is None or self.lastAlbumHabits is None or self.lastPVRHabits is None:
nextWidget = self._getNextWidget()
if nextWidget is not None:
# If live tv is playing, call the mediaStarted function in case channel has changed
if self.playingLiveTV:
self.mediaStarted( self.connectionRead )
# Get the users habits out of the database
habits, freshness = sql.getFromDatabase( self.connectionRead, nextWidget )
if nextWidget == "movie" and habits == self.lastMovieHabits:
self.movieLastUpdated = strftime( "%Y%m%d%H%M%S",gmtime() )
count = 0
continue
if nextWidget == "episode" and habits == self.lastEpisodeHabits:
self.episodeLastUpdated = strftime( "%Y%m%d%H%M%S",gmtime() )
count = 0
continue
if nextWidget == "album" and habits == self.lastAlbumHabits:
self.albumLastUpdated = strftime( "%Y%m%d%H%M%S",gmtime() )
count = 0
continue
# Pause briefly, and again check that abortRequested hasn't been called
xbmc.sleep( 100 )
if xbmc.abortRequested:
return
log( "Updating %s widget" %( nextWidget ) )
# Get all the media items that match the users habits
weighted, items = library.getMedia( nextWidget, habits, freshness )
# Pause briefly, and again check that abortRequested hasn't been called
xbmc.sleep( 100 )
if xbmc.abortRequested:
return
# Generate the widgets
if weighted is not None:
listitems = library.buildWidget( nextWidget, weighted, items )
else:
listitems = []
# Save the widget
if nextWidget == "movie":
self.movieWidget = listitems
self.movieLastUpdated = strftime( "%Y%m%d%H%M%S",gmtime() )
self.lastMovieHabits = habits
self.movieWeighting = weighted
self.movieItems = items
self.WINDOW.setProperty( "smartish.movies", self.movieLastUpdated )
log( "Movie widget updated" )
elif nextWidget == "episode":
self.episodeWidget = listitems
self.episodeLastUpdated = strftime( "%Y%m%d%H%M%S",gmtime() )
self.lastEpisodeHabits = habits
self.episodeWeighting = weighted
self.episodeItems = items
self.WINDOW.setProperty( "smartish.episodes", self.episodeLastUpdated )
log( "Episode widget updated" )
elif nextWidget == "pvr":
self.pvrWidget = listitems
self.pvrWeighting = weighted
self.pvrItems = items
self.lastPVRHabits = habits
self.pvrLastUpdated = strftime( "%Y%m%d%H%M%S",gmtime() )
self.WINDOW.setProperty( "smartish.pvr", self.pvrLastUpdated )
log( "PVR widget updated" )
elif nextWidget == "album":
self.albumWidget = listitems
self.albumLastUpdated = strftime( "%Y%m%d%H%M%S",gmtime() )
self.lastAlbumHabits = habits
self.albumWeighting = weighted
self.albumItems = items
self.WINDOW.setProperty( "smartish.albums", self.albumLastUpdated )
log( "Album widget updated" )
# Send widget data to clients so they can build their own widgets
thread.start_new_thread( self.sendWidgetToClient, ( nextWidget, weighted, library.shrinkJson( nextWidget, weighted, items ) ) )
# Reset counter and update widget type
count = 0
xbmc.sleep( 1000 )
def gotWidgetFromServer( self, type, data1, data2 ):
listitems = library.buildWidget( type, data1, data2 )
if type == "movie":
log( "Movie widget updated" )
self.movieWidget = listitems
self.movieLastUpdated = strftime( "%Y%m%d%H%M%S",gmtime() )
self.WINDOW.setProperty( "smartish.movies", self.movieLastUpdated )
elif type == "episode":
log( "Episode widget updated" )
self.episodeWidget = listitems
self.episodeLastUpdated = strftime( "%Y%m%d%H%M%S",gmtime() )
self.WINDOW.setProperty( "smartish.episodes", self.episodeLastUpdated )
elif type == "pvr":
log( "PVR widget updated" )
self.pvrWidget = listitems
self.pvrLastUpdated = strftime( "%Y%m%d%H%M%S",gmtime() )
self.WINDOW.setProperty( "smartish.pvr", self.pvrLastUpdated )
elif type == "album":
log( "Album widget updated" )
self.albumWidget = listitems
self.albumLastUpdated = strftime( "%Y%m%d%H%M%S",gmtime() )
self.WINDOW.setProperty( "smartish.albums", self.albumLastUpdated )
else:
log( "Unknown widget type %s" %( type ) )
def sendWidgetToClient( self, widget, weighted, items, client = None ):
pickledWeighted = pickle.dumps( weighted, protocol = pickle.HIGHEST_PROTOCOL )
pickledItems = pickle.dumps( items, protocol = pickle.HIGHEST_PROTOCOL )
# If we're the client, nothing to do here
if __addon__.getSetting( "role" ) == "Client":
return
# Get list of clients
if client is not None:
clients = [ client ]
else :
clients = self.clients
if len( clients ) == 0:
# No clients set up, nothing to do
return
port = int( __addon__.getSetting( "port" ) )
for client in clients:
if client is not None and client in self.clients:
try:
clientsocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
clientsocket.connect( ( client, port ) )
log( "(* -> %s) 'widget' (%s)" %( client, widget ) )
clientsocket.send( "widget||||%s||||%s||||%s||||EOD" %( widget, pickledWeighted, pickledItems ) )
message = clientsocket.recv( 128 ).split( "||||" )
log( "(%s -> *) '%s' '%s'" %( client, message[ 0 ], message[ 1 ] ) )
clientsocket.close()
except socket.error, msg:
log( repr( msg ) )
log( "Removing client %s" %( client ) )
self.clients.remove( client )
except:
print_exc()
log( "Removing client %s" %( client ) )
# Remove client from list
self.clients.remove( client )
def _getNextWidget( self ):
# This function finds the widget which was the last to be udpated
update = { self.pvrLastUpdated: "pvr", self.albumLastUpdated: "album", self.episodeLastUpdated: "episode", self.movieLastUpdated: "movie" }
for key in sorted( update.keys() ):
return update[ key ]
def mediaEnded( self ):
# Media has finished playing, clear our saved values of what was playing
library.lastplayedID = None
library.lastplayedType = None
library.nowPlaying.pop( "localhost", None )
self.nextupWidget = []
self.playingLiveTV = False
# If we're a client, tell the server we've finished playing
if __addon__.getSetting( "role" ) == "Client":
host = __addon__.getSetting( "serverip" )
port = int( __addon__.getSetting( "port" ) )
try:
clientsocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
clientsocket.connect( ( host, port ) )
log( "(* -> %s) 'playbackended'" %( host ) )
clientsocket.send( "playbackended||||EOD" )
message = clientsocket.recv( 128 ).split( "||||" )
log( "(%s -> *) '%s' '%s'" %( host, message[ 0 ], message[ 1 ] ) )
clientsocket.close()
except:
print_exc()
log( "Unable to establish connection to server at address %s" %( host ) )
def mediaStarted( self, connection = None ):
# Get the active player
json_query = xbmc.executeJSONRPC( '{"jsonrpc": "2.0", "id": 1, "method": "Player.GetActivePlayers"}' )
json_query = unicode(json_query, 'utf-8', errors='ignore')
json_query = simplejson.loads(json_query)
if json_query.has_key('result') and json_query[ "result" ]:
playerid = json_query[ "result" ][ 0 ][ "playerid" ]
# Get details of the playing media
json_query = xbmc.executeJSONRPC( '{"jsonrpc": "2.0", "id": 1, "method": "Player.GetItem", "params": {"playerid": ' + str( playerid ) + ', "properties": [ "title", "artist", "albumartist", "genre", "year", "rating", "album", "track", "duration", "comment", "lyrics", "playcount", "fanart", "director", "trailer", "tagline", "plot", "plotoutline", "originaltitle", "lastplayed", "writer", "studio", "mpaa", "cast", "country", "imdbnumber", "premiered", "productioncode", "runtime", "set", "showlink", "streamdetails", "top250", "votes", "firstaired", "season", "episode", "showtitle", "file", "resume", "artistid", "albumid", "tvshowid", "setid", "watchedepisodes", "disc", "tag", "art", "genreid", "displayartist", "albumartistid", "description", "theme", "mood", "style", "albumlabel", "sorttitle", "episodeguide", "uniqueid", "dateadded", "channel", "channeltype", "hidden", "locked", "channelnumber", "starttime", "endtime" ] } }' )
json_query = unicode(json_query, 'utf-8', errors='ignore')
json_query = simplejson.loads(json_query)
if json_query.has_key( 'result' ):
type = json_query[ "result" ][ "item" ][ "type" ]
if type == "episode":
self.episode( json_query[ "result" ][ "item" ] )
elif type == "movie":
self.movie( json_query[ "result" ][ "item" ] )
elif type == "song":
self.song( json_query[ "result" ][ "item" ] )
elif type == "channel":
# Get details of the current show
live_query = xbmc.executeJSONRPC( '{ "jsonrpc": "2.0", "id": 1, "method": "PVR.GetBroadcasts", "params": {"channelid": %d, "properties": [ "title", "plot", "plotoutline", "starttime", "endtime", "runtime", "progress", "progresspercentage", "genre", "episodename", "episodenum", "episodepart", "firstaired", "hastimer", "isactive", "parentalrating", "wasactive", "thumbnail" ], "limits": {"end": 1} } }' %( json_query[ "result" ][ "item" ][ "id" ] ) )
#live_query = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "id": 1, "method": "PVR.GetChannelDetails", "params": {"channelid": %d, "properties": [ "broadcastnow" ]}}' %( json_query[ "result" ][ "item" ][ "id" ] ) )
live_query = unicode(live_query, 'utf-8', errors='ignore')
live_query = simplejson.loads(live_query)
# Check the details we need are actually included:
if live_query.has_key( "result" ) and live_query[ "result" ].has_key( "broadcasts" ):
if self.playingLiveTV:
# Only update if the current show has changed
if not self.lastLiveTVChannel == str( json_query[ "result" ][ "item" ][ "id" ] ) + "|" + live_query[ "result" ][ "broadcasts" ][ 0 ][ "starttime" ]:
self.livetv( json_query[ "result" ][ "item" ], live_query[ "result" ][ "broadcasts" ][ 0 ], connection )
else:
self.livetv( json_query[ "result" ][ "item" ], live_query[ "result" ][ "broadcasts" ][ 0 ], connection )
# Save the current channel, so we can only update on channel change
self.playingLiveTV = True
self.lastLiveTVChannel = str( json_query[ "result" ][ "item" ][ "id" ] ) + "|" + live_query[ "result" ][ "broadcasts" ][ 0 ][ "starttime" ]
elif type == "unknown" and "channel" in json_query[ "result" ][ "item"] and json_query[ "result" ][ "item" ][ "channel" ] != "":
self.recordedtv( json_query[ "result" ][ "item" ] )
def movie( self, json_query ):
# This function extracts the details we want to save from a movie, and sends them to the addToDatabase function
# First, time stamps (so all items have identical time stamp)
dateandtime = str( datetime.now() )
time = str( "%02d:%02d" %( datetime.now().hour, datetime.now().minute ) )
day = datetime.today().weekday()
daytimeStrings = { "dateandtime": dateandtime, "time": time, "day": day }
# Save this is lastplayed, so the widgets won't display it
self.lastPlayed( "movie", json_query[ "id" ] )
self.movieLastUpdated = 0
self.lastMovieHabits = None
dbaseInfo = []
additionalInfo = {}
# MPAA
if json_query[ "mpaa" ] != "":
dbaseInfo.append( ( "movie", "mpaa", json_query[ "mpaa" ] ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "movie", "mpaa", json_query[ "mpaa" ] )
# Tag
for tag in json_query[ "tag" ]:
dbaseInfo.append( ( "movie", "tag", tag ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "movie", "tag", tag )
# Director(s)
for director in json_query[ "director" ]:
dbaseInfo.append( ( "movie", "director", director ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "movie", "director", director )
# Writer(s)
for writer in json_query[ "writer" ]:
dbaseInfo.append( ( "movie", "writer", writer ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "movie", "writer", writer )
# Studio(s)
for studio in json_query[ "studio" ]:
dbaseInfo.append( ( "movie", "studio", studio ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "movie", "studio", studio )
# Genre(s)
for genre in json_query[ "genre" ]:
dbaseInfo.append( ( "movie", "genre", genre ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "movie", "genre", genre )
# Actor(s)
for actor in json_query[ "cast" ]:
dbaseInfo.append( ( "movie", "actor", actor[ "name" ] ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "movie", "actor", actor[ "name" ] )
# Is it watched
if json_query[ "playcount" ] == 0:
# This is a new movie
dbaseInfo.append( ( "movie", "special", "unwatched" ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "movie", "special", "unwatched" )
# Get additional info from TMDB
if __addon__.getSetting( "role" ) == "Server":
keywords, related = sql.getTMDBExtras( "movie", json_query[ "id" ], json_query[ "imdbnumber" ], json_query[ "year" ] )
for keyword in keywords:
dbaseInfo.append( ( "movie", "keyword", keyword ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "movie", "keyword", keyword )
for show in related:
dbaseInfo.append( ( "movie", "related", show ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "movie", "related", show )
else:
# We're a client, so we'll let the server get the additional info
additionalInfo[ "type" ] = "movie"
additionalInfo[ "id" ] = json_query[ "id" ]
additionalInfo[ "imdbnumber" ] = json_query[ "imdbnumber" ]
additionalInfo[ "year" ] = json_query[ "year" ]
# Convert dateadded to datetime object
dateadded = datetime.now() - datetime.strptime( json_query[ "dateadded" ], "%Y-%m-%d %H:%M:%S" )
# How new is it
if dateadded.days <= 2:
dbaseInfo.append( ( "movie", "special", "fresh" ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "movie", "special", "fresh" )
if dateadded.days <= 7:
dbaseInfo.append( ( "movie", "special", "recentlyadded" ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "movie", "special", "recentlyadded" )
# Mark played, so we can get percentage unwatched/recent
dbaseInfo.append( ( "movie", "special", "playedmedia" ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "movie", "special", "playedmedia" )
# Add all the info to the dbase
self.newaddToDatabase( daytimeStrings, dbaseInfo, additionalInfo )
def episode( self, json_query ):
# This function extracts the details we want to save from a tv show episode, and sends them to the addToDatabase function
# First, time stamps (so all items have identical time stamp)
dateandtime = str( datetime.now() )
time = str( "%02d:%02d" %( datetime.now().hour, datetime.now().minute ) )
day = datetime.today().weekday()
daytimeStrings = { "dateandtime": dateandtime, "time": time, "day": day }
# Save this as last played, so the widgets won't display it
self.lastPlayed( "episode", json_query[ "id" ], json_query[ "tvshowid" ] )
library.tvshowInformation.pop( json_query[ "tvshowid" ], None )
library.tvshowNextUnwatched.pop( json_query[ "tvshowid" ], None )
library.tvshowNewest.pop( json_query[ "tvshowid" ], None )
self.episodeLastUpdated = 0
self.lastEpisodeHabits = None
dbaseInfo = []
additionalInfo = {}
# TV Show ID
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "episode", "tvshowid", json_query[ "tvshowid" ] )
dbaseInfo.append( ( "episode", "tvshowid", json_query[ "tvshowid" ] ) )
# Now get details of the tv show
show_query = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShowDetails", "params": {"tvshowid": %s, "properties": ["sorttitle", "mpaa", "premiered", "episode", "watchedepisodes", "studio", "genre", "cast", "tag", "imdbnumber" ]}, "id": 1}' % json_query[ "tvshowid" ] )
show_query = unicode(show_query, 'utf-8', errors='ignore')
show_query = simplejson.loads(show_query)
show_query = show_query[ "result" ][ "tvshowdetails" ]
# MPAA
dbaseInfo.append( ( "episode", "mpaa", show_query[ "mpaa" ] ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "episode", "mpaa", show_query[ "mpaa" ] )
# Studio(s)
for studio in show_query[ "studio" ]:
dbaseInfo.append( ( "episode", "studio", studio ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "episode", "studio", studio )
# Genre(s)
for genre in show_query[ "genre" ]:
dbaseInfo.append( ( "episode", "genre", genre ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "episode", "genre", genre )
# Tag(s)
for genre in show_query[ "tag" ]:
dbaseInfo.append( ( "episode", "tag", tag ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "episode", "tag", tag )
# Actor(s)
for actor in show_query[ "cast" ]:
dbaseInfo.append( ( "episode", "actor", actor[ "name" ] ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "episode", "actor", actor[ "name" ] )
# Is it watched
if json_query[ "playcount" ] == 0:
# This is a new episode
dbaseInfo.append( ( "episode", "special", "unwatched" ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "episode", "special", "unwatched" )
# Get additional info from TMDB
if __addon__.getSetting( "role" ) == "Server":
keywords, related = sql.getTMDBExtras( "episode", json_query[ "imdbnumber" ], show_query[ "label" ], show_query[ "premiered" ][:-6] )
for keyword in keywords:
dbaseInfo.append( ( "episode", "keyword", keyword ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "episode", "keyword", keyword )
for show in related:
dbaseInfo.append( ( "episode", "related", show ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "episode", "related", show )
else:
# We're a client, so we'll let the server get the additional info
additionalInfo[ "type" ] = "episode"
additionalInfo[ "id" ] = json_query[ "imdbnumber" ]
additionalInfo[ "imdbnumber" ] = show_query[ "label" ]
additionalInfo[ "year" ] = show_query[ "premiered" ][:-6]
# Convert dateadded to datetime object
dateadded = datetime.now() - datetime.strptime( json_query[ "dateadded" ], "%Y-%m-%d %H:%M:%S" )
# How new is it
if dateadded.days <= 2:
dbaseInfo.append( ( "episode", "special", "fresh" ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "episode", "special", "fresh" )
if dateadded.days <= 7:
dbaseInfo.append( ( "episode", "special", "recentlyadded" ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "episode", "special", "recentlyadded" )
# Mark played, so we can get percentage unwatched/recent
dbaseInfo.append( ( "episode", "special", "playedmedia" ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "episode", "special", "playedmedia" )
# Add all the info to the dbase
self.newaddToDatabase( daytimeStrings, dbaseInfo, additionalInfo )
def recordedtv( self, json_query ):
# This function extracts the details we want to save from a tv show episode, and sends them to the addToDatabase function
# First, time stamps (so all items have identical time stamp)
dateandtime = str( datetime.now() )
time = str( "%02d:%02d" %( datetime.now().hour, datetime.now().minute ) )
day = datetime.today().weekday()
daytimeStrings = { "dateandtime": dateandtime, "time": time, "day": day }
# Save this as last played, so the widget won't display it
self.lastPlayed( "recorded", json_query[ "id" ] )
self.pvrLastUpdated = 0
dbaseInfo = []
additionalInfo = {}
# Channel
dbaseInfo.append( ( "recorded", "channel", json_query[ "channel" ] ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "recorded", "channel", json_query[ "channel" ] )
# Genre(s)
for genre in json_query[ "genre" ]:
for splitGenre in genre.split( "/" ):
dbaseInfo.append( ( "recorded", "genre", splitGenre ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "recorded", "genre", splitGenre )
# Is it watched
if json_query[ "lastplayed" ] == "":
# This is a new episode
dbaseInfo.append( ( "recorded", "special", "unwatched" ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "recorded", "special", "unwatched" )
# Convert startime to datetime object
dateadded = datetime.now() - datetime.strptime( json_query[ "starttime" ], "%Y-%m-%d %H:%M:%S" )
# How new is it
if dateadded.days <= 2:
dbaseInfo.append( ( "recorded", "special", "fresh" ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "recorded", "special", "fresh" )
if dateadded.days <= 7:
dbaseInfo.append( ( "recorded", "special", "recentlyadded" ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "recorded", "special", "recentlyadded" )
# Mark played, so we can get percentage unwatched/recent
dbaseInfo.append( ( "recorded", "special", "playedmedia" ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "recorded", "special", "playedmedia" )
# Add all the info to the dbase
self.newaddToDatabase( daytimeStrings, dbaseInfo, additionalInfo )
def livetv( self, json_query, live_query, connection = None ):
# This function extracts the details we want to save from live tv, and sends them to the addToDatabase function
if connection is None:
connection = self.connectionWrite
# First, time stamps (so all items have identical time stamp)
dateandtime = str( datetime.now() )
time = str( "%02d:%02d" %( datetime.now().hour, datetime.now().minute ) )
day = datetime.today().weekday()
daytimeStrings = { "dateandtime": dateandtime, "time": time, "day": day }
# Trigger PVR to be next widget to be updated
self.pvrLastUpdated = 0
dbaseInfo = []
additionalInfo = {}
# ChannelType
dbaseInfo.append( ( "live", "channeltype", json_query[ "channeltype" ] ) )
#self.addToDatabase( connection, dateandtime, time, day, "live", "channeltype", json_query[ "channeltype" ] )
# Channel
dbaseInfo.append( ( "live", "channel", json_query[ "channel" ] ) )
#self.addToDatabase( connection, dateandtime, time, day, "live", "channel", json_query[ "channel" ] )
# ChannelNumber
dbaseInfo.append( ( "live", "channelnumber", json_query[ "channelnumber" ] ) )
#self.addToDatabase( connection, dateandtime, time, day, "live", "channelnumber", json_query[ "channelnumber" ] )
# ChannelID
dbaseInfo.append( ( "live", "channelid", json_query[ "id" ] ) )
#self.addToDatabase( connection, dateandtime, time, day, "live", "channelid", json_query[ "id" ] )
# Genre
for genre in live_query[ "genre" ]:
for splitGenre in genre.split( "/" ):
dbaseInfo.append( ( "live", "genre", splitGenre ) )
#self.addToDatabase( connection, dateandtime, time, day, "live", "genre", splitGenre )
# Mark played, so we can get percentage unwatched/recent
dbaseInfo.append( ( "live", "special", "playedmedia" ) )
dbaseInfo.append( ( "live", "special", "playedlive" ) )
#self.addToDatabase( connection, dateandtime, time, day, "live", "special", "playedmedia" )
#self.addToDatabase( connection, dateandtime, time, day, "live", "special", "playedlive" )
# Add all the info to the dbase
self.newaddToDatabase( daytimeStrings, dbaseInfo, additionalInfo )
def song( self, json_query ):
# This function extracts the details we want to save from a song, and sends them to the addToDatabase function
# First, time stamps (so all items have identical time stamp)
dateandtime = str( datetime.now() )
time = str( "%02d:%02d" %( datetime.now().hour, datetime.now().minute ) )
day = datetime.today().weekday()
daytimeStrings = { "dateandtime": dateandtime, "time": time, "day": day }
dbaseInfo = []
additionalInfo = {}
# Now get details of the album
album_query = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "AudioLibrary.GetAlbumDetails", "params": {"albumid": %s, "properties": [ "title", "description", "artist", "genre", "theme", "mood", "style", "type", "albumlabel", "rating", "year", "musicbrainzalbumid", "musicbrainzalbumartistid", "fanart", "thumbnail", "playcount", "genreid", "artistid", "displayartist" ]}, "id": 1}' % json_query[ "albumid" ] )
album_query = unicode(album_query, 'utf-8', errors='ignore')
album_query = simplejson.loads(album_query)
album_query = album_query[ "result" ][ "albumdetails" ]
# Check album has changed
if library.lastplayedType == "album" and library.lastplayedID == album_query[ "albumid" ]:
return
# Save album, so we only update data on album change
self.lastPlayed( "album", album_query[ "albumid" ] )
self.albumLastUpdated = 0
self.lastAlbumHabits = None
for artist in album_query[ "artist" ]:
dbaseInfo.append( ( "album", "artist", artist ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "album", "artist", artist )
for style in album_query[ "style" ]:
dbaseInfo.append( ( "album", "style", style ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "album", "style", style )
for theme in album_query[ "theme" ]:
dbaseInfo.append( ( "album", "theme", theme ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "album", "theme", theme )
dbaseInfo.append( ( "album", "label", album_query[ "label" ] ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "album", "label", album_query[ "label" ] )
for genre in album_query[ "genre" ]:
dbaseInfo.append( ( "album", "genre", genre ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "album", "genre", genre )
for mood in album_query[ "mood" ]:
dbaseInfo.append( ( "album", "mood", mood ) )
#self.addToDatabase( self.connectionWrite, dateandtime, time, day, "album", "mood", mood )
# Add all the info to the dbase
self.newaddToDatabase( daytimeStrings, dbaseInfo, additionalInfo )
def libraryUpdated( self, database ):
if database == "video":
# Clear movie and episode habits, and set them both to be updated
self.lastMovieHabits = None
self.lastEpisodeHabits = None
self.movieLastUpdated = 0
self.episodeLastUpdated = 0
self.updateWidgetServer( "movie" )
self.updateWidgetServer( "episode" )
if database == "music":
# Clear album habits, and set to be updated
self.lastAlbumHabits = None
self.albumLastUpdated = 0
self.updateWidgetServer( "album" )
def newaddToDatabase( self, daytime, dbaseInfo, additionalInfo ):
if __addon__.getSetting( "role" ) == "Server":
# We're the server, add the habits into the database
connection = sql.connect()
nextupHabits = {}
type = None
for habit in dbaseInfo:
sql.addToDatabase( connection, daytime[ "dateandtime" ], daytime[ "time" ], daytime[ "day" ], habit[ 0 ], habit[ 1 ], habit[ 2 ] )
nextupHabits = sql.nextupHabits( nextupHabits, habit[ 1 ], habit[ 2 ] )
if type is None:
type = habit[ 0 ]
connection.close()
thread.start_new_thread( self.buildNextUpWidget, ( type, nextupHabits ) )
self.buildNextUpWidget( type, nextupHabits )
else:
# We're the client, send the data to the server to add
host = __addon__.getSetting( "serverip" )
port = int( __addon__.getSetting( "port" ) )
try:
clientsocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
clientsocket.connect( ( host, port ) )
log( "(* -> %s) 'mediainfo'" %( host ) )
clientsocket.send( "mediainfo||||%s||||%s||||%s||||EOD" %( pickle.dumps( daytime, protocol = pickle.HIGHEST_PROTOCOL ), pickle.dumps( dbaseInfo, protocol = pickle.HIGHEST_PROTOCOL ), pickle.dumps( additionalInfo, protocol = pickle.HIGHEST_PROTOCOL ) ) )
message = clientsocket.recv( 128 ).split( "||||" )
log( "(%s -> *) '%s' '%s'" %( host, message[ 0 ], message[ 1 ] ) )
clientsocket.close()
except:
print_exc()
log( "Unable to establish connection to server at address %s" %( host ) )
def addClientDataToDatabase( self, daytime, dbaseInfo, additionalInfo ):
# We've received habits from client we need to add to the database
connection = sql.connect()
nextupHabits = {}
type = None
for habit in dbaseInfo:
sql.addToDatabase( connection, daytime[ "dateandtime" ], daytime[ "time" ], daytime[ "day" ], habit[ 0 ], habit[ 1 ], habit[ 2 ] )
nextupHabits = sql.nextupHabits( nextupHabits, habit[ 1 ], habit[ 2 ] )
if type is None:
type = habit[ 0 ]
# If this is a movie or episode, try to get additional info from TMDB
if "type" in additionalInfo:
keywords, related = sql.getTMDBExtras( additionalInfo[ "type" ], additionalInfo[ "id" ], additionalInfo[ "imdbnumber" ], additionalInfo[ "year" ] )
for keyword in keywords:
self.addToDatabase( connection, daytime[ "dateandtime" ], daytime[ "time" ], daytime[ "day" ], additionalInfo[ "type" ], "keyword", keyword )
nextupHabits = sql.nextupHabits( nextupHabits, "keyword", keyword )
for show in related:
self.addToDatabase( connection, daytime[ "dateandtime" ], daytime[ "time" ], daytime[ "day" ], additionalInfo[ "type" ], "related", show )
nextupHabits = sql.nextupHabits( nextupHabits, "related", show )
connection.close()
self.buildNextUpWidget( type, nextupHabits )
def buildNextUpWidget( self, type, habits ):
if type is None: return []
if type != "movie" and type != "episode": return []
log( "Updating %s nextup" %( type ) )
weighted, items = library.getMedia( type, habits, ( 10, 10, 0 ) )
# Pause briefly, and again check that abortRequested hasn't been called
xbmc.sleep( 100 )
if xbmc.abortRequested:
return
# Generate the widgets
if weighted is not None:
self.nextupWidget = library.buildWidget( type, weighted, items )
else:
self.nextupWidget = []
log( "Updated %s nextup" %( type ) )
def addToDatabase( self, connection, dateandtime, time, day, media, type, data ):
log( "### DEPRECATED FUNCTION CALLED!" )
if __addon__.getSetting( "role" ) == "Server":
# If we're acting as a server, add the data into the database
sql.addToDatabase( connection, dateandtime, time, day, media, type, data )
else:
# We're acting as the client, so tell the server to add the data into the database
host = __addon__.getSetting( "serverip" )
port = int( __addon__.getSetting( "port" ) )
try:
clientsocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
clientsocket.connect( ( host, port ) )
log( "(* -> %s) 'mediainfo'" %( host ) )
clientsocket.send( "mediainfo||||%s||||%s||||%s||||%s||||%s||||%s||||EOD" %( dateandtime, time, day, media, type, data ) )
message = clientsocket.recv( 128 ).split( "||||" )
log( "(%s -> *) '%s' '%s'" %( host, message[ 0 ], message[ 1 ] ) )
clientsocket.close()
except:
print_exc()
log( "Unable to establish connection to server at address %s" %( host ) )
def lastPlayed( self, type, id, episodeID = "" ):
library.lastplayedType = type
library.lastplayedID = id
library.nowPlaying[ "localhost" ] = ( type, id )
if __addon__.getSetting( "role" ) == "Client":
# We're acting as the client, so tell the server to add the data into the database
host = __addon__.getSetting( "serverip" )
port = int( __addon__.getSetting( "port" ) )
try:
clientsocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
clientsocket.connect( ( host, port ) )
log( "(* -> %s) 'lastplayed'" %( host ) )
clientsocket.send( "lastplayed||||%s||||%s||||%s||||EOD" %( type, id, episodeID ) )
message = clientsocket.recv( 128 ).split( "||||" )
log( "(%s -> *) '%s' '%s'" %( host, message[ 0 ], message[ 1 ] ) )
clientsocket.close()
except:
print_exc()
log( "(playing) Unable to establish connection to server at address %s:%s" %( host, port ) )
def updateWidgetServer( self, type ):
if __addon__.getSetting( "role" ) == "Client":
# We're acting as the client, so tell the server to update a particular widget (e.g. after media
# played or library updated)
host = __addon__.getSetting( "serverip" )
port = int( __addon__.getSetting( "port" ) )
try:
clientsocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
clientsocket.connect( ( host, port ) )
log( "(* -> %s) 'updatewidget' (%s)" %( host, type ) )
clientsocket.send( "updatewidget||||%s||||EOD" %( type ) )
message = clientsocket.recv( 128 ).split( "||||" )
log( "(%s -> *) '%s' 's'" %( host, message[ 0 ], message[ 1 ] ) )
clientsocket.close()
except:
log( "Unable to establish connection to server at address %s" %( host ) )
def pingServer( self, firstConnect = False ):
if __addon__.getSetting( "role" ) == "Client":
# Ping the server
host = __addon__.getSetting( "serverip" )
port = int( __addon__.getSetting( "port" ) )
if firstConnect:
message = "clientstart"
else:
message = "ping"
try:
clientsocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
clientsocket.connect( ( host, port ) )
if firstConnect:
log( "(* -> %s) '%s'" %( host, message ) )
clientsocket.send( "%s||||EOD" %( message ) )
message = clientsocket.recv( 128 ).split( "||||" )
if firstConnect:
log( "(%s -> *) '%s' '%s'" %( host, message[ 0 ], message[ 1 ] ) )
clientsocket.close()
except:
log( "Unable to establish connection to server at address %s" %( host ) )
class Widgets_Monitor(xbmc.Monitor):
def __init__(self, *args, **kwargs):
xbmc.Monitor.__init__(self)
self.action = kwargs[ "action" ]
def onDatabaseUpdated(self, database):
self.action( database )
class Widgets_Player(xbmc.Player):
def __init__(self, *args, **kwargs):
xbmc.Player.__init__(self)
self.action = kwargs[ "action" ]
self.ended = kwargs[ "ended" ]
def onPlayBackStarted(self):
log( "Playback started" )
xbmc.sleep(1000)
self.action()
def onPlayBackEnded(self):
self.ended()
def onPlayBackStopped(self):
self.ended()
| gpl-2.0 | -7,838,391,187,766,050,000 | 47.467372 | 947 | 0.549962 | false |
ichuang/sympy | sympy/polys/tests/test_polyclasses.py | 2 | 11852 | """Tests for OO layer of several polynomial representations. """
from sympy.polys.polyclasses import (
DMP, init_normal_DMP,
DMF, init_normal_DMF,
ANP, init_normal_ANP,
)
from sympy.polys.domains import ZZ, QQ
from sympy.polys.specialpolys import f_4
from sympy.polys.polyerrors import (
ExactQuotientFailed,
)
from sympy.utilities.pytest import raises
def test_DMP___init__():
f = DMP([[0],[],[0,1,2],[3]], ZZ)
assert f.rep == [[1,2],[3]]
assert f.dom == ZZ
assert f.lev == 1
f = DMP([[1,2],[3]], ZZ, 1)
assert f.rep == [[1,2],[3]]
assert f.dom == ZZ
assert f.lev == 1
f = DMP({(1,1): 1, (0,0): 2}, ZZ, 1)
assert f.rep == [[1,0],[2]]
assert f.dom == ZZ
assert f.lev == 1
def test_DMP___eq__():
assert DMP([[ZZ(1),ZZ(2)],[ZZ(3)]], ZZ) == \
DMP([[ZZ(1),ZZ(2)],[ZZ(3)]], ZZ)
assert DMP([[ZZ(1),ZZ(2)],[ZZ(3)]], ZZ) == \
DMP([[QQ(1),QQ(2)],[QQ(3)]], QQ)
assert DMP([[QQ(1),QQ(2)],[QQ(3)]], QQ) == \
DMP([[ZZ(1),ZZ(2)],[ZZ(3)]], ZZ)
assert DMP([[[ZZ(1)]]], ZZ) != DMP([[ZZ(1)]], ZZ)
assert DMP([[ZZ(1)]], ZZ) != DMP([[[ZZ(1)]]], ZZ)
def test_DMP___bool__():
assert bool(DMP([[]], ZZ)) == False
assert bool(DMP([[1]], ZZ)) == True
def test_DMP_to_dict():
f = DMP([[3],[],[2],[],[8]], ZZ)
assert f.to_dict() == \
{(4, 0): 3, (2, 0): 2, (0, 0): 8}
assert f.to_sympy_dict() == \
{(4, 0): ZZ.to_sympy(3), (2, 0): ZZ.to_sympy(2), (0, 0): ZZ.to_sympy(8)}
def test_DMP_properties():
assert DMP([[]], ZZ).is_zero == True
assert DMP([[1]], ZZ).is_zero == False
assert DMP([[1]], ZZ).is_one == True
assert DMP([[2]], ZZ).is_one == False
assert DMP([[1]], ZZ).is_ground == True
assert DMP([[1],[2],[1]], ZZ).is_ground == False
assert DMP([[1],[2,0],[1,0]], ZZ).is_sqf == True
assert DMP([[1],[2,0],[1,0,0]], ZZ).is_sqf == False
assert DMP([[1,2],[3]], ZZ).is_monic == True
assert DMP([[2,2],[3]], ZZ).is_monic == False
assert DMP([[1,2],[3]], ZZ).is_primitive == True
assert DMP([[2,4],[6]], ZZ).is_primitive == False
def test_DMP_arithmetics():
f = DMP([[2],[2,0]], ZZ)
assert f.mul_ground(2) == DMP([[4],[4,0]], ZZ)
assert f.quo_ground(2) == DMP([[1],[1,0]], ZZ)
raises(ExactQuotientFailed, 'f.exquo_ground(3)')
f = DMP([[-5]], ZZ)
g = DMP([[5]], ZZ)
assert f.abs() == g
assert abs(f) == g
assert g.neg() == f
assert -g == f
h = DMP([[]], ZZ)
assert f.add(g) == h
assert f + g == h
assert g + f == h
assert f + 5 == h
assert 5 + f == h
h = DMP([[-10]], ZZ)
assert f.sub(g) == h
assert f - g == h
assert g - f == -h
assert f - 5 == h
assert 5 - f == -h
h = DMP([[-25]], ZZ)
assert f.mul(g) == h
assert f * g == h
assert g * f == h
assert f * 5 == h
assert 5 * f == h
h = DMP([[25]], ZZ)
assert f.sqr() == h
assert f.pow(2) == h
assert f**2 == h
raises(TypeError, "f.pow('x')")
f = DMP([[1],[],[1,0,0]], ZZ)
g = DMP([[2],[-2,0]], ZZ)
q = DMP([[2],[2,0]], ZZ)
r = DMP([[8,0,0]], ZZ)
assert f.pdiv(g) == (q, r)
assert f.pquo(g) == q
assert f.prem(g) == r
raises(ExactQuotientFailed, 'f.pexquo(g)')
f = DMP([[1],[],[1,0,0]], ZZ)
g = DMP([[1],[-1,0]], ZZ)
q = DMP([[1],[1,0]], ZZ)
r = DMP([[2,0,0]], ZZ)
assert f.div(g) == (q, r)
assert f.quo(g) == q
assert f.rem(g) == r
assert divmod(f, g) == (q, r)
assert f // g == q
assert f % g == r
raises(ExactQuotientFailed, 'f.exquo(g)')
def test_DMP_functionality():
f = DMP([[1],[2,0],[1,0,0]], ZZ)
g = DMP([[1],[1,0]], ZZ)
h = DMP([[1]], ZZ)
assert f.degree() == 2
assert f.degree_list() == (2, 2)
assert f.total_degree() == 2
assert f.LC() == ZZ(1)
assert f.TC() == ZZ(0)
assert f.nth(1, 1) == ZZ(2)
raises(TypeError, "f.nth(0, 'x')")
assert f.max_norm() == 2
assert f.l1_norm() == 4
u = DMP([[2],[2,0]], ZZ)
assert f.diff(m=1, j=0) == u
assert f.diff(m=1, j=1) == u
raises(TypeError, "f.diff(m='x', j=0)")
u = DMP([1,2,1], ZZ)
v = DMP([1,2,1], ZZ)
assert f.eval(a=1, j=0) == u
assert f.eval(a=1, j=1) == v
assert f.eval(1).eval(1) == ZZ(4)
assert f.cofactors(g) == (g, g, h)
assert f.gcd(g) == g
assert f.lcm(g) == f
u = DMP([[QQ(45),QQ(30),QQ(5)]], QQ)
v = DMP([[QQ(1),QQ(2,3),QQ(1,9)]], QQ)
assert u.monic() == v
assert (4*f).content() == ZZ(4)
assert (4*f).primitive() == (ZZ(4), f)
f = DMP([[1],[2],[3],[4],[5],[6]], ZZ)
assert f.trunc(3) == DMP([[1],[-1],[],[1],[-1],[]], ZZ)
f = DMP(f_4, ZZ)
assert f.sqf_part() == -f
assert f.sqf_list() == (ZZ(-1), [(-f, 1)])
f = DMP([[-1],[],[],[5]], ZZ)
g = DMP([[3,1],[],[]], ZZ)
h = DMP([[45,30,5]], ZZ)
r = DMP([675,675,225,25], ZZ)
assert f.subresultants(g) == [f, g, h]
assert f.resultant(g) == r
f = DMP([1,3,9,-13], ZZ)
assert f.discriminant() == -11664
f = DMP([QQ(2),QQ(0)], QQ)
g = DMP([QQ(1),QQ(0),QQ(-16)], QQ)
s = DMP([QQ(1,32),QQ(0)], QQ)
t = DMP([QQ(-1,16)], QQ)
h = DMP([QQ(1)], QQ)
assert f.half_gcdex(g) == (s, h)
assert f.gcdex(g) == (s, t, h)
assert f.invert(g) == s
f = DMP([[1],[2],[3]], QQ)
raises(ValueError, "f.half_gcdex(f)")
raises(ValueError, "f.gcdex(f)")
raises(ValueError, "f.invert(f)")
f = DMP([1,0,20,0,150,0,500,0,625,-2,0,-10,9], ZZ)
g = DMP([1,0,0,-2,9], ZZ)
h = DMP([1,0,5,0], ZZ)
assert g.compose(h) == f
assert f.decompose() == [g, h]
f = DMP([[1],[2],[3]], QQ)
raises(ValueError, "f.decompose()")
raises(ValueError, "f.sturm()")
def test_DMP_exclude():
f = [[[[[[[[[[[[[[[[[[[[[[[[[[1]], [[]]]]]]]]]]]]]]]]]]]]]]]]]]
J = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 24, 25]
assert DMP(f, ZZ).exclude() == (J, DMP([1, 0], ZZ))
assert DMP([[1], [1, 0]], ZZ).exclude() == ([], DMP([[1], [1, 0]], ZZ))
def test_DMF__init__():
f = DMF(([[0],[],[0,1,2],[3]], [[1,2,3]]), ZZ)
assert f.num == [[1,2],[3]]
assert f.den == [[1,2,3]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1,2],[3]], [[1,2,3]]), ZZ, 1)
assert f.num == [[1,2],[3]]
assert f.den == [[1,2,3]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[-1],[-2]],[[3],[-4]]), ZZ)
assert f.num == [[-1],[-2]]
assert f.den == [[3],[-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1],[2]],[[-3],[4]]), ZZ)
assert f.num == [[-1],[-2]]
assert f.den == [[3],[-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1],[2]],[[-3],[4]]), ZZ)
assert f.num == [[-1],[-2]]
assert f.den == [[3],[-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[]],[[-3],[4]]), ZZ)
assert f.num == [[]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(17, ZZ, 1)
assert f.num == [[17]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1],[2]]), ZZ)
assert f.num == [[1],[2]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF([[0],[],[0,1,2],[3]], ZZ)
assert f.num == [[1,2],[3]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF({(1,1): 1, (0,0): 2}, ZZ, 1)
assert f.num == [[1,0],[2]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[QQ(1)],[QQ(2)]], [[-QQ(3)],[QQ(4)]]), QQ)
assert f.num == [[-QQ(1)],[-QQ(2)]]
assert f.den == [[QQ(3)],[-QQ(4)]]
assert f.lev == 1
assert f.dom == QQ
f = DMF(([[QQ(1,5)],[QQ(2,5)]], [[-QQ(3,7)],[QQ(4,7)]]), QQ)
assert f.num == [[-QQ(7)],[-QQ(14)]]
assert f.den == [[QQ(15)],[-QQ(20)]]
assert f.lev == 1
assert f.dom == QQ
raises(ValueError, "DMF(([1], [[1]]), ZZ)")
raises(ZeroDivisionError, "DMF(([1], []), ZZ)")
def test_DMF__eq__():
pass
def test_DMF__bool__():
assert bool(DMF([[]], ZZ)) == False
assert bool(DMF([[1]], ZZ)) == True
def test_DMF_properties():
assert DMF([[]], ZZ).is_zero == True
assert DMF([[]], ZZ).is_one == False
assert DMF([[1]], ZZ).is_zero == False
assert DMF([[1]], ZZ).is_one == True
assert DMF(([[1]], [[2]]), ZZ).is_one == False
def test_DMF_arithmetics():
f = DMF([[7],[-9]], ZZ)
g = DMF([[-7],[9]], ZZ)
assert f.neg() == -f == g
f = DMF(([[1]], [[1],[]]), ZZ)
g = DMF(([[1]], [[1,0]]), ZZ)
h = DMF(([[1],[1,0]], [[1,0],[]]), ZZ)
assert f.add(g) == f + g == h
assert g.add(f) == g + f == h
h = DMF(([[-1],[1,0]], [[1,0],[]]), ZZ)
assert f.sub(g) == f - g == h
h = DMF(([[1]], [[1,0],[]]), ZZ)
assert f.mul(g) == f*g == h
assert g.mul(f) == g*f == h
h = DMF(([[1,0]], [[1],[]]), ZZ)
assert f.quo(g) == f/g == h
h = DMF(([[1]], [[1],[],[],[]]), ZZ)
assert f.pow(3) == f**3 == h
h = DMF(([[1]], [[1,0,0,0]]), ZZ)
assert g.pow(3) == g**3 == h
def test_ANP___init__():
rep = [QQ(1),QQ(1)]
mod = [QQ(1),QQ(0),QQ(1)]
f = ANP(rep, mod, QQ)
assert f.rep == [QQ(1),QQ(1)]
assert f.mod == [QQ(1),QQ(0),QQ(1)]
assert f.dom == QQ
rep = {1: QQ(1), 0: QQ(1)}
mod = {2: QQ(1), 0: QQ(1)}
f = ANP(rep, mod, QQ)
assert f.rep == [QQ(1),QQ(1)]
assert f.mod == [QQ(1),QQ(0),QQ(1)]
assert f.dom == QQ
f = ANP(1, mod, QQ)
assert f.rep == [QQ(1)]
assert f.mod == [QQ(1),QQ(0),QQ(1)]
assert f.dom == QQ
def test_ANP___eq__():
a = ANP([QQ(1), QQ(1)], [QQ(1),QQ(0),QQ(1)], QQ)
b = ANP([QQ(1), QQ(1)], [QQ(1),QQ(0),QQ(2)], QQ)
assert (a == a) == True
assert (a != a) == False
assert (a == b) == False
assert (a != b) == True
b = ANP([QQ(1), QQ(2)], [QQ(1),QQ(0),QQ(1)], QQ)
assert (a == b) == False
assert (a != b) == True
def test_ANP___bool__():
assert bool(ANP([], [QQ(1),QQ(0),QQ(1)], QQ)) == False
assert bool(ANP([QQ(1)], [QQ(1),QQ(0),QQ(1)], QQ)) == True
def test_ANP_properties():
mod = [QQ(1),QQ(0),QQ(1)]
assert ANP([QQ(0)], mod, QQ).is_zero == True
assert ANP([QQ(1)], mod, QQ).is_zero == False
assert ANP([QQ(1)], mod, QQ).is_one == True
assert ANP([QQ(2)], mod, QQ).is_one == False
def test_ANP_arithmetics():
mod = [QQ(1),QQ(0),QQ(0),QQ(-2)]
a = ANP([QQ(2),QQ(-1),QQ(1)], mod, QQ)
b = ANP([QQ(1),QQ(2)], mod, QQ)
c = ANP([QQ(-2), QQ(1), QQ(-1)], mod, QQ)
assert a.neg() == -a == c
c = ANP([QQ(2), QQ(0), QQ(3)], mod, QQ)
assert a.add(b) == a+b == c
assert b.add(a) == b+a == c
c = ANP([QQ(2), QQ(-2), QQ(-1)], mod, QQ)
assert a.sub(b) == a-b == c
c = ANP([QQ(-2), QQ(2), QQ(1)], mod, QQ)
assert b.sub(a) == b-a == c
c = ANP([QQ(3), QQ(-1), QQ(6)], mod, QQ)
assert a.mul(b) == a*b == c
assert b.mul(a) == b*a == c
c = ANP([QQ(-1,43), QQ(9,43), QQ(5,43)], mod, QQ)
assert a.pow(0) == a**(0) == ANP(1, mod, QQ)
assert a.pow(1) == a**(1) == a
assert a.pow(-1) == a**(-1) == c
assert a.quo(a) == a.mul(a.pow(-1)) == a*a**(-1) == ANP(1, mod, QQ)
def test___hash__():
# Issue 2472
# Make sure int vs. long doesn't affect hashing with Python ground types
assert DMP([[1, 2], [3]], ZZ) == DMP([[1l, 2l], [3l]], ZZ)
assert hash(DMP([[1, 2], [3]], ZZ)) == hash(DMP([[1l, 2l], [3l]], ZZ))
assert DMF(([[1, 2], [3]], [[1]]), ZZ) == DMF(([[1L, 2L], [3L]], [[1L]]), ZZ)
assert hash(DMF(([[1, 2], [3]], [[1]]), ZZ)) == hash(DMF(([[1L, 2L], [3L]], [[1L]]), ZZ))
assert ANP([1, 1], [1, 0, 1], ZZ) == ANP([1l, 1l], [1l, 0l, 1l], ZZ)
assert hash(ANP([1, 1], [1, 0, 1], ZZ)) == hash(ANP([1l, 1l], [1l, 0l, 1l], ZZ))
| bsd-3-clause | -6,849,658,554,011,959,000 | 22.751503 | 98 | 0.444904 | false |
hatchetation/freeipa | ipapython/platform/fedora16.py | 1 | 9991 | # Author: Alexander Bokovoy <[email protected]>
#
# Copyright (C) 2011 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import time
from ipapython import ipautil, dogtag
from ipapython.platform import base, redhat, systemd
from ipapython.ipa_log_manager import root_logger
from ipalib import api
# All what we allow exporting directly from this module
# Everything else is made available through these symbols when they are
# directly imported into ipapython.services:
# authconfig -- class reference for platform-specific implementation of
# authconfig(8)
# service -- class reference for platform-specific implementation of a
# PlatformService class
# knownservices -- factory instance to access named services IPA cares about,
# names are ipapython.services.wellknownservices
# backup_and_replace_hostname -- platform-specific way to set hostname and
# make it persistent over reboots
# restore_network_configuration -- platform-specific way of restoring network
# configuration (e.g. static hostname)
# restore_context -- platform-sepcific way to restore security context, if
# applicable
# check_selinux_status -- platform-specific way to see if SELinux is enabled
# and restorecon is installed.
__all__ = ['authconfig', 'service', 'knownservices',
'backup_and_replace_hostname', 'restore_context', 'check_selinux_status',
'restore_network_configuration', 'timedate_services']
# Just copy a referential list of timedate services
timedate_services = list(base.timedate_services)
# For beginning just remap names to add .service
# As more services will migrate to systemd, unit names will deviate and
# mapping will be kept in this dictionary
system_units = dict(map(lambda x: (x, "%s.service" % (x)), base.wellknownservices))
system_units['rpcgssd'] = 'nfs-secure.service'
system_units['rpcidmapd'] = 'nfs-idmap.service'
# Rewrite dirsrv and pki-tomcatd services as they support instances via separate
# service generator. To make this working, one needs to have both [email protected]
# and foo.target -- the latter is used when request should be coming for
# all instances (like stop). systemd, unfortunately, does not allow one
# to request action for all service instances at once if only [email protected]
# unit is available. To add more, if any of those services need to be
# started/stopped automagically, one needs to manually create symlinks in
# /etc/systemd/system/foo.target.wants/ (look into systemd.py's enable()
# code).
system_units['dirsrv'] = '[email protected]'
# Our directory server instance for PKI is [email protected]
system_units['pkids'] = '[email protected]'
# Old style PKI instance
system_units['pki-cad'] = '[email protected]'
system_units['pki_cad'] = system_units['pki-cad']
# Our PKI instance is [email protected]
system_units['pki-tomcatd'] = '[email protected]'
system_units['pki_tomcatd'] = system_units['pki-tomcatd']
class Fedora16Service(systemd.SystemdService):
def __init__(self, service_name):
systemd_name = service_name
if service_name in system_units:
systemd_name = system_units[service_name]
else:
if len(service_name.split('.')) == 1:
# if service_name does not have a dot, it is not foo.service
# and not a foo.target. Thus, not correct service name for
# systemd, default to foo.service style then
systemd_name = "%s.service" % (service_name)
super(Fedora16Service, self).__init__(service_name, systemd_name)
# Special handling of directory server service
#
# We need to explicitly enable instances to install proper symlinks as
# dirsrv.target.wants/ dependencies. Standard systemd service class does it
# on enable() method call. Unfortunately, ipa-server-install does not do
# explicit dirsrv.enable() because the service startup is handled by ipactl.
#
# If we wouldn't do this, our instances will not be started as systemd would
# not have any clue about instances (PKI-IPA and the domain we serve) at all.
# Thus, hook into dirsrv.restart().
class Fedora16DirectoryService(Fedora16Service):
def enable(self, instance_name=""):
super(Fedora16DirectoryService, self).enable(instance_name)
dirsrv_systemd = "/etc/sysconfig/dirsrv.systemd"
if os.path.exists(dirsrv_systemd):
# We need to enable LimitNOFILE=8192 in the [email protected]
# Since 389-ds-base-1.2.10-0.8.a7 the configuration of the
# service parameters is performed via
# /etc/sysconfig/dirsrv.systemd file which is imported by systemd
# into [email protected] unit
replacevars = {'LimitNOFILE':'8192'}
ipautil.inifile_replace_variables(dirsrv_systemd, 'service', replacevars=replacevars)
restore_context(dirsrv_systemd)
ipautil.run(["/bin/systemctl", "--system", "daemon-reload"],raiseonerr=False)
def restart(self, instance_name="", capture_output=True, wait=True):
if len(instance_name) > 0:
elements = self.systemd_name.split("@")
srv_etc = os.path.join(self.SYSTEMD_ETC_PATH, self.systemd_name)
srv_tgt = os.path.join(self.SYSTEMD_ETC_PATH, self.SYSTEMD_SRV_TARGET % (elements[0]))
srv_lnk = os.path.join(srv_tgt, self.service_instance(instance_name))
if not os.path.exists(srv_etc):
self.enable(instance_name)
elif not os.path.samefile(srv_etc, srv_lnk):
os.unlink(srv_lnk)
os.symlink(srv_etc, srv_lnk)
super(Fedora16DirectoryService, self).restart(instance_name, capture_output=capture_output, wait=wait)
# Enforce restart of IPA services when we do enable it
# This gets around the fact that after ipa-server-install systemd thinks
# ipa.service is not yet started but all services were actually started
# already.
class Fedora16IPAService(Fedora16Service):
def enable(self, instance_name=""):
super(Fedora16IPAService, self).enable(instance_name)
self.restart(instance_name)
class Fedora16SSHService(Fedora16Service):
def get_config_dir(self, instance_name=""):
return '/etc/ssh'
class Fedora16CAService(Fedora16Service):
def __wait_until_running(self):
# We must not wait for the httpd proxy if httpd is not set up yet.
# Unfortunately, knownservices.httpd.is_installed() can return
# false positives, so check for existence of our configuration file.
# TODO: Use a cleaner solution
if not os.path.exists('/etc/httpd/conf.d/ipa.conf'):
root_logger.debug(
'The httpd proxy is not installed, skipping wait for CA')
return
if dogtag.install_constants.DOGTAG_VERSION < 10:
# The server status information isn't available on DT 9
root_logger.debug('Using Dogtag 9, skipping wait for CA')
return
root_logger.debug('Waiting until the CA is running')
timeout = api.env.startup_timeout
op_timeout = time.time() + timeout
while time.time() < op_timeout:
status = dogtag.ca_status()
root_logger.debug('The CA status is: %s' % status)
if status == 'running':
break
root_logger.debug('Waiting for CA to start...')
time.sleep(1)
else:
raise RuntimeError('CA did not start in %ss' % timeout)
def start(self, instance_name="", capture_output=True, wait=True):
super(Fedora16CAService, self).start(
instance_name, capture_output=capture_output, wait=wait)
if wait:
self.__wait_until_running()
def restart(self, instance_name="", capture_output=True, wait=True):
super(Fedora16CAService, self).restart(
instance_name, capture_output=capture_output, wait=wait)
if wait:
self.__wait_until_running()
# Redirect directory server service through special sub-class due to its
# special handling of instances
def f16_service(name):
if name == 'dirsrv':
return Fedora16DirectoryService(name)
if name == 'ipa':
return Fedora16IPAService(name)
if name == 'sshd':
return Fedora16SSHService(name)
if name in ('pki-cad', 'pki_cad', 'pki-tomcatd', 'pki_tomcatd'):
return Fedora16CAService(name)
return Fedora16Service(name)
class Fedora16Services(base.KnownServices):
def __init__(self):
services = dict()
for s in base.wellknownservices:
services[s] = f16_service(s)
# Call base class constructor. This will lock services to read-only
super(Fedora16Services, self).__init__(services)
def restore_context(filepath, restorecon='/usr/sbin/restorecon'):
return redhat.restore_context(filepath, restorecon)
def check_selinux_status(restorecon='/usr/sbin/restorecon'):
return redhat.check_selinux_status(restorecon)
authconfig = redhat.authconfig
service = f16_service
knownservices = Fedora16Services()
backup_and_replace_hostname = redhat.backup_and_replace_hostname
restore_network_configuration = redhat.restore_network_configuration
| gpl-3.0 | -5,763,098,073,647,504,000 | 45.686916 | 110 | 0.687919 | false |
elliotthill/django-oscar | oscar/apps/address/migrations/0006_auto__add_unique_useraddress_hash_user.py | 16 | 7096 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'UserAddress', fields ['hash', 'user']
db.create_unique('address_useraddress', ['hash', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'UserAddress', fields ['hash', 'user']
db.delete_unique('address_useraddress', ['hash', 'user_id'])
models = {
'address.country': {
'Meta': {'ordering': "('-display_order', 'name')", 'object_name': 'Country'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}),
'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'address.useraddress': {
'Meta': {'ordering': "['-num_orders']", 'unique_together': "(('user', 'hash'),)", 'object_name': 'UserAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default_for_billing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_default_for_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'addresses'", 'to': "orm['{0}']".format(AUTH_USER_MODEL)})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['address']
| bsd-3-clause | -1,272,559,203,242,485,000 | 75.301075 | 182 | 0.557074 | false |
5GExchange/escape | test/testframework/testcases/perf.py | 2 | 5795 | # Copyright 2017 Janos Czentye
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import distutils.dir_util
import logging
import os
import shutil
import time
from testframework.testcases.basic import BasicSuccessfulTestCase
from testframework.testcases.domain_mock import DomainOrchestratorAPIMocker
from testframework.testcases.dynamic import (DynamicallyGeneratedTestCase,
DynamicTestGenerator)
log = logging.getLogger()
class PerformanceTestCase(BasicSuccessfulTestCase):
"""
"""
def __init__ (self, result_folder=None, stat_folder=None, *args, **kwargs):
super(PerformanceTestCase, self).__init__(*args, **kwargs)
if not result_folder:
result_folder = os.getcwd()
log.warning("Result folder is missing! Using working dir: %s"
% result_folder)
self.result_folder = os.path.join(result_folder,
time.strftime("%Y%m%d%H%M%S"))
self.stat_folder = stat_folder
def tearDown (self):
if self.result_folder is not None:
target_dir = os.path.join(self.result_folder, self.test_case_info.name)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
log.debug("Store files into: %s" % target_dir)
log.debug(self.stat_folder)
if self.stat_folder is not None:
distutils.dir_util.copy_tree(src=self.stat_folder, dst=target_dir)
shutil.copytree(src=self.test_case_info.full_testcase_path,
dst=os.path.join(target_dir, "output"),
ignore=shutil.ignore_patterns('*.txt',
'*.sh',
'*.config'))
super(PerformanceTestCase, self).tearDown()
class DynamicPerformanceTestCase(DynamicallyGeneratedTestCase):
"""
"""
def __init__ (self, result_folder=None, stat_folder=None, *args, **kwargs):
super(DynamicPerformanceTestCase, self).__init__(*args, **kwargs)
if not result_folder:
result_folder = os.getcwd()
log.warning("Result folder is missing! Using working dir: %s"
% result_folder)
self.result_folder = os.path.join(result_folder,
time.strftime("%Y%m%d%H%M%S"))
self.stat_folder = stat_folder
def tearDown (self):
if self.result_folder is not None:
target_dir = os.path.join(self.result_folder, self.test_case_info.name)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
log.debug("Store files into: %s" % target_dir)
log.debug(self.stat_folder)
if self.stat_folder is not None:
distutils.dir_util.copy_tree(src=self.stat_folder, dst=target_dir)
shutil.copytree(src=self.test_case_info.full_testcase_path,
dst=os.path.join(target_dir, "output"),
ignore=shutil.ignore_patterns('*.txt',
'*.sh',
'*.config'))
super(DynamicPerformanceTestCase, self).tearDown()
def verify_result (self):
super(BasicSuccessfulTestCase, self).verify_result()
if self.run_result.log_output is None:
raise RuntimeError("log output is missing!")
# Detect TIMEOUT error
self.assertFalse(self.command_runner.timeout_exceeded,
msg="Running timeout(%ss) is exceeded!" %
self.command_runner.kill_timeout)
# Search for successful orchestration message
error_result = self.detect_unsuccessful_result(self.run_result)
self.assertIsNone(error_result,
msg="Unsuccessful result detected:\n%s" % error_result)
class DynamicMockingPerformanceTestCase(DynamicPerformanceTestCase):
"""
"""
def __init__ (self, responses=None, *args, **kwargs):
super(DynamicMockingPerformanceTestCase, self).__init__(*args, **kwargs)
self.domain_mocker = DomainOrchestratorAPIMocker(**kwargs)
dir = self.test_case_info.full_testcase_path
if responses:
self.domain_mocker.register_responses(dirname=dir, responses=responses)
else:
self.domain_mocker.register_responses_from_dir(dirname=dir)
def setUp (self):
super(DynamicMockingPerformanceTestCase, self).setUp()
self.domain_mocker.start()
def tearDown (self):
super(DynamicMockingPerformanceTestCase, self).tearDown()
self.domain_mocker.shutdown()
class DynamicPerformanceTestGenerator(DynamicTestGenerator):
"""
"""
def __init__ (self, result_folder=None, stat_folder=None, *args, **kwargs):
if not result_folder:
result_folder = os.getcwd()
log.warning("Result folder is missing! Using working dir: %s"
% result_folder)
self.result_folder = os.path.join(result_folder,
time.strftime("%Y%m%d%H%M%S"))
self.stat_folder = stat_folder
super(DynamicPerformanceTestGenerator, self).__init__(*args, **kwargs)
def _create_test_cases (self):
super(DynamicPerformanceTestGenerator, self)._create_test_cases()
for tc in self._tests:
if hasattr(tc, "result_folder"):
tc.result_folder = self.result_folder
if hasattr(tc, "stat_folder"):
tc.stat_folder = self.stat_folder
| apache-2.0 | 1,511,885,670,726,187,000 | 38.965517 | 77 | 0.641933 | false |
jiahaoliang/group-based-policy | gbpservice/neutron/services/grouppolicy/common/exceptions.py | 1 | 11892 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Exceptions used by Group Policy plugin and drivers."""
from neutron.common import exceptions
class GroupPolicyDriverError(exceptions.NeutronException):
"""Policy driver call failed."""
message = _("%(method)s failed.")
class GroupPolicyException(exceptions.NeutronException):
"""Base for policy driver exceptions returned to user."""
pass
class GroupPolicyDeploymentError(GroupPolicyException):
message = _("Deployment not configured properly. See logs for details.")
class GroupPolicyInternalError(GroupPolicyException):
message = _("Unexpected internal failure. See logs for details.")
class GroupPolicyBadRequest(exceptions.BadRequest, GroupPolicyException):
"""Base for policy driver exceptions returned to user."""
pass
class GroupPolicyNotSupportedError(GroupPolicyBadRequest):
message = _("Operation %(method_name)s for resource "
"%(resource_name)s is not supported by this "
"deployment.")
class PolicyTargetRequiresPolicyTargetGroup(GroupPolicyBadRequest):
message = _("An policy target group was not specified when "
"creating policy_target.")
class PolicyTargetGroupUpdateOfPolicyTargetNotSupported(GroupPolicyBadRequest):
message = _("Updating policy target group of policy target "
"is not supported.")
class PolicyTargetGroupSubnetRemovalNotSupported(GroupPolicyBadRequest):
message = _("Removing a subnet from an policy target group is not "
"supported.")
class L2PolicyUpdateOfPolicyTargetGroupNotSupported(GroupPolicyBadRequest):
message = _("Updating L2 policy of policy target group is not supported.")
class L3PolicyUpdateOfL2PolicyNotSupported(GroupPolicyBadRequest):
message = _("Updating L3 policy of L2 policy is not supported.")
class UnsettingInjectDefaultRouteOfL2PolicyNotSupported(GroupPolicyBadRequest):
message = _("Unsetting inject_default_route attribute of L2 policy is not "
"supported.")
class L3PolicyMultipleRoutersNotSupported(GroupPolicyBadRequest):
message = _("L3 policy does not support multiple routers.")
class L3PolicyRoutersUpdateNotSupported(GroupPolicyBadRequest):
message = _("Updating L3 policy's routers is not supported.")
class NoSubnetAvailable(exceptions.ResourceExhausted, GroupPolicyException):
message = _("No subnet is available from l3 policy's pool.")
class PolicyTargetGroupInUse(GroupPolicyBadRequest):
message = _("Policy Target Group %(policy_target_group)s is in use")
class InvalidPortForPTG(GroupPolicyBadRequest):
message = _("Subnet %(port_subnet_id)s of port %(port_id)s does not "
"match subnet %(ptg_subnet_id)s of Policy Target Group "
"%(policy_target_group_id)s.")
class InvalidPortExtraAttributes(GroupPolicyBadRequest):
message = _("Port extra attribute %(attribute)s is invalid for the "
"following reason: %(reason)s")
class InvalidSubnetForPTG(GroupPolicyBadRequest):
message = _("Subnet %(subnet_id)s does not belong to network "
"%(network_id)s associated with L2P %(l2p_id)s for PTG "
"%(ptg_id)s.")
class OverlappingIPPoolsInSameTenantNotAllowed(GroupPolicyBadRequest):
message = _("IP Pool %(ip_pool)s overlaps with one of the existing L3P "
"for the same tenant %(overlapping_pools)s.")
class SharedResourceReferenceError(GroupPolicyBadRequest):
message = _("Shared resource of type %(res_type)s with id %(res_id)s "
"can't reference the non shared resource of type "
"%(ref_type)s with id %(ref_id)s")
class InvalidSharedResource(GroupPolicyBadRequest):
message = _("Resource of type %(type)s cannot be shared by driver "
"%(driver)s")
class CrossTenantL2PolicyL3PolicyNotSupported(GroupPolicyBadRequest):
message = _("Cross tenancy not supported between L2Ps and L3Ps")
class CrossTenantPolicyTargetGroupL2PolicyNotSupported(
GroupPolicyBadRequest):
message = _("Cross tenancy not supported between PTGs and L2Ps")
class NonSharedNetworkOnSharedL2PolicyNotSupported(GroupPolicyBadRequest):
message = _("Non Shared Network can't be set for a shared L2 Policy")
class InvalidSharedAttributeUpdate(GroupPolicyBadRequest):
message = _("Invalid shared attribute update. Shared resource %(id)s is "
"referenced by %(rid)s, which is either shared or owned by a "
"different tenant.")
class ExternalRouteOverlapsWithL3PIpPool(GroupPolicyBadRequest):
message = _("Destination %(destination)s for ES %(es_id)s overlaps with "
"L3P %(l3p_id)s.")
class ExternalSegmentSubnetOverlapsWithL3PIpPool(GroupPolicyBadRequest):
message = _("Subnet %(subnet)s for ES %(es_id)s overlaps with "
"L3P %(l3p_id)s.")
class ExternalRouteNextHopNotInExternalSegment(GroupPolicyBadRequest):
message = _("One or more external routes' nexthop are not part of "
"subnet %(cidr)s.")
class InvalidL3PExternalIPAddress(GroupPolicyBadRequest):
message = _("Address %(ip)s allocated for l3p %(l3p_id)s on segment "
"%(es_id)s doesn't belong to the segment subnet %(es_cidr)s")
class InvalidAttributeUpdateForES(GroupPolicyBadRequest):
message = _("Attribute %(attribute)s cannot be updated for External "
"Segment.")
class MultipleESPerEPNotSupported(GroupPolicyBadRequest):
message = _("Multiple External Segments per External Policy is not "
"supported.")
class ESIdRequiredWhenCreatingEP(GroupPolicyBadRequest):
message = _("External Segment ID is required when creating ExternalPolicy")
class ESUpdateNotSupportedForEP(GroupPolicyBadRequest):
message = _("external_segments update for External Policy is not "
"supported.")
class MultipleESPerL3PolicyNotSupported(GroupPolicyBadRequest):
message = _("Only one External Segment per L3 Policy supported.")
class InvalidSubnetForES(GroupPolicyBadRequest):
message = _("External Segment subnet %(sub_id)s is not part of an "
"external network %(net_id)s.")
class OnlyOneEPPerTenantAllowed(GroupPolicyBadRequest):
message = _("Only one External Policy per Tenant is allowed.")
class ImplicitSubnetNotSupported(GroupPolicyBadRequest):
message = _("RMD doesn't support implicit external subnet creation.")
class DefaultL3PolicyAlreadyExists(GroupPolicyBadRequest):
message = _("Default L3 Policy with name %(l3p_name)s already "
"exists and is visible for this tenant.")
class DefaultExternalSegmentAlreadyExists(GroupPolicyBadRequest):
message = _("Default External Segment with name %(es_name)s already "
"exists and is visible for this tenant.")
class InvalidCrossTenantReference(GroupPolicyBadRequest):
message = _("Not supported cross tenant reference: object "
"%(res_type)s:%(res_id)s can't link %(ref_type)s:%(ref_id)s "
"unless it's shared.")
class InvalidNetworkAccess(GroupPolicyBadRequest):
message = _("%(msg)s : Network id %(network_id)s doesn't belong to "
" the tenant id %(tenant_id)s.")
class InvalidRouterAccess(GroupPolicyBadRequest):
message = _("%(msg)s : Router id %(router_id)s does not belong to the "
" tenant id %(tenant_id)s.")
class MultipleRedirectActionsNotSupportedForRule(GroupPolicyBadRequest):
message = _("Resource Mapping Driver does not support multiple redirect "
"actions in a Policy Rule.")
class MultipleRedirectActionsNotSupportedForPRS(GroupPolicyBadRequest):
message = _("Resource Mapping Driver does not support multiple redirect "
"actions in a Policy Rule Set.")
class InvalidNetworkServiceParameters(GroupPolicyBadRequest):
message = _("Resource Mapping Driver currently supports only one "
"parameter of type: ip_single and value: self_subnet and one "
"parameter of type ip_single or ip_pool and value nat_pool")
class ESSubnetRequiredForNatPool(GroupPolicyBadRequest):
message = _("Resource Mapping Driver requires an External Segment which "
"has an external subnet specified to create a Nat Pool")
class InvalidESSubnetCidrForNatPool(GroupPolicyBadRequest):
message = _("Resource Mapping Driver requires an External Segment which "
"maps to ip pool value specified in the nat pool")
class NSPRequiresES(GroupPolicyBadRequest):
message = _("Resource Mapping Driver requires an External Segment in "
"l3policy to associate a NSP with value nat_pool to a PTG")
class NSPRequiresNatPool(GroupPolicyBadRequest):
message = _("Resource Mapping Driver requires an External Segment in "
"l3policy which has nat_pool associated for associating a NSP "
"with value nat_pool to a PTG")
class L3PEsinUseByNSP(exceptions.InUse, GroupPolicyException):
message = _("The External Segment in L3Policy cannot be updated because "
"it is in use by Network Service Policy")
class NatPoolinUseByNSP(exceptions.InUse, GroupPolicyException):
message = _("The Nat Pool is in use by Network Service Policy")
class OverlappingNATPoolInES(GroupPolicyBadRequest):
message = _("One or more NAT Pools associated with ES %(es_id)s overlaps "
"with NAT Pool %(np_id)s.")
class OverlappingSubnetForNATPoolInES(GroupPolicyBadRequest):
message = _("One or more subnets associated with network %(net_id)s "
"partially overlaps with NAT Pool %(np_id)s.")
class InvalidProxiedGroupL3P(GroupPolicyBadRequest):
message = _("Cannot proxy PTG %(ptg_id)s: it's on a different L3 policy "
"%(l3p_id)s")
class InvalidProxiedGroupL2P(GroupPolicyBadRequest):
message = _("Cannot proxy PTG %(ptg_id)s: it's on the same L2 Policy as "
"the proxy group of type L2.")
class OnlyOneProxyGatewayAllowed(GroupPolicyBadRequest):
message = _("Another proxy gateway PT already exists for group "
"%(group_id)s")
class OnlyOneGroupDefaultGatewayAllowed(GroupPolicyBadRequest):
message = _("Another group default gateway PT already exists for group "
"%(group_id)s")
class PTGAlreadyProvidingRedirectPRS(GroupPolicyBadRequest):
message = _("PTG %(ptg_id)s is already providing a redirect PRS.")
class InvalidClusterId(GroupPolicyBadRequest):
message = _("In RMD and derived drivers, a PT cluster_id should point to "
"an existing PT.")
class PolicyTargetInUse(GroupPolicyBadRequest):
message = _("Cannot delete a PT in use by a cluster.")
class InvalidClusterPtg(GroupPolicyBadRequest):
message = _("Inter PTG clustering disallowed.")
class NatPoolInUseByPort(exceptions.InUse, GroupPolicyException):
message = _("Ports or floating IP addresses are using the subnet "
"corresponding to Nat Pool.")
class IdenticalExternalRoute(GroupPolicyBadRequest):
message = _("External segments %(es1)s and %(es2)s cannot have "
"identical external route CIDR %(cidr)s if associated "
"with a common L3 policy.")
| apache-2.0 | 5,639,468,632,870,441,000 | 35.478528 | 79 | 0.707198 | false |
Microvellum/Fluid-Designer | win64-vc/2.78/scripts/addons/io_import_dxf/dxfgrabber/sections.py | 5 | 2357 | # Purpose: handle dxf sections
# Created: 21.07.2012, taken from my ezdxf project
# Copyright (C) 2012, Manfred Moitzi
# License: MIT License
from __future__ import unicode_literals
__author__ = "mozman <[email protected]>"
from .codepage import toencoding
from .defaultchunk import DefaultChunk, iterchunks
from .headersection import HeaderSection
from .tablessection import TablesSection
from .entitysection import EntitySection, ObjectsSection
from .blockssection import BlocksSection
from .acdsdata import AcDsDataSection
class Sections(object):
def __init__(self, tagreader, drawing):
self._sections = {}
self._create_default_sections()
self._setup_sections(tagreader, drawing)
def __contains__(self, name):
return name in self._sections
def _create_default_sections(self):
self._sections['header'] = HeaderSection()
for cls in SECTIONMAP.values():
section = cls()
self._sections[section.name] = section
def _setup_sections(self, tagreader, drawing):
def name(section):
return section[1].value
bootstrap = True
for section in iterchunks(tagreader, stoptag='EOF', endofchunk='ENDSEC'):
if bootstrap:
new_section = HeaderSection.from_tags(section)
drawing.dxfversion = new_section.get('$ACADVER', 'AC1009')
codepage = new_section.get('$DWGCODEPAGE', 'ANSI_1252')
drawing.encoding = toencoding(codepage)
bootstrap = False
else:
section_name = name(section)
if section_name in SECTIONMAP:
section_class = get_section_class(section_name)
new_section = section_class.from_tags(section, drawing)
else:
new_section = None
if new_section is not None:
self._sections[new_section.name] = new_section
def __getattr__(self, key):
try:
return self._sections[key]
except KeyError:
raise AttributeError(key)
SECTIONMAP = {
'TABLES': TablesSection,
'ENTITIES': EntitySection,
'OBJECTS': ObjectsSection,
'BLOCKS': BlocksSection,
'ACDSDATA': AcDsDataSection,
}
def get_section_class(name):
return SECTIONMAP.get(name, DefaultChunk)
| gpl-3.0 | -5,671,919,442,779,845,000 | 32.671429 | 81 | 0.627068 | false |
stackdio/stackdio | stackdio/core/serializers.py | 2 | 16330 | # -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import inspect
import logging
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from guardian.shortcuts import assign_perm, remove_perm
from rest_framework import serializers
from stackdio.core import mixins, models, utils, validators
from stackdio.core.fields import HyperlinkedParentField
logger = logging.getLogger(__name__)
class NoOpSerializer(serializers.Serializer):
def to_representation(self, instance):
return instance
def to_internal_value(self, data):
return data
def create(self, validated_data):
raise NotImplementedError()
def update(self, instance, validated_data):
raise NotImplementedError()
class BulkListSerializer(serializers.ListSerializer):
def update(self, instance, validated_data):
id_attr = getattr(self.child.Meta, 'update_lookup_field', 'id')
all_validated_data_by_id = {
i.pop(id_attr): i
for i in validated_data
}
if not all((bool(i) and not inspect.isclass(i)
for i in all_validated_data_by_id.keys())):
raise serializers.ValidationError('')
# since this method is given a queryset which can have many
# model instances, first find all objects to update
# and only then update the models
objects_to_update = self.filter_queryset(instance, id_attr, all_validated_data_by_id)
self.check_objects_to_update(objects_to_update, all_validated_data_by_id)
updated_objects = []
for obj in objects_to_update:
obj_validated_data = self.get_obj_validated_data(obj, id_attr, all_validated_data_by_id)
# use model serializer to actually update the model
# in case that method is overwritten
updated_objects.append(self.child.update(obj, obj_validated_data))
return updated_objects
def filter_queryset(self, queryset, id_attr, all_validated_data_by_id):
return queryset.filter(**{
'{}__in'.format(id_attr): all_validated_data_by_id.keys(),
})
def check_objects_to_update(self, objects_to_update, all_validated_data_by_id):
if len(all_validated_data_by_id) != objects_to_update.count():
raise serializers.ValidationError({
'bulk': 'Could not find all objects to update.',
})
def get_obj_validated_data(self, obj, id_attr, all_validated_data_by_id):
obj_id = getattr(obj, id_attr)
return all_validated_data_by_id.get(obj_id)
class BulkSerializerMixin(object):
def to_internal_value(self, data):
ret = super(BulkSerializerMixin, self).to_internal_value(data)
id_attr = getattr(self.Meta, 'update_lookup_field', 'id')
request_method = getattr(getattr(self.context.get('view'), 'request'), 'method', '')
# add update_lookup_field field back to validated data
# since super by default strips out read-only fields
# hence id will no longer be present in validated_data
if all((isinstance(self.root, BulkListSerializer),
id_attr,
request_method in ('PUT', 'PATCH'))):
id_field = self.fields[id_attr]
id_value = id_field.get_value(data)
ret[id_attr] = id_value
return ret
class StackdioHyperlinkedModelSerializer(serializers.HyperlinkedModelSerializer):
"""
Override to use the appropriately namespaced url
"""
def add_extra_kwargs(self, kwargs):
"""
Hook to be able to add in extra kwargs
(specifically for the StackdioParentHyperlinkedModelSerializer)
"""
return kwargs
def build_url_field(self, field_name, model_class):
"""
Create a field representing the object's own URL.
"""
field_class = self.serializer_url_field
root_namespace = getattr(self.Meta, 'root_namespace', 'api')
app_label = getattr(self.Meta, 'app_label', model_class._meta.app_label)
model_name = getattr(self.Meta, 'model_name', model_class._meta.object_name.lower())
lookup_field = getattr(self.Meta, 'lookup_field', 'pk')
lookup_url_kwarg = getattr(self.Meta, 'lookup_url_kwarg', lookup_field)
# Override user things
if model_name in ('user', 'group', 'permission'):
app_label = 'users'
field_kwargs = {
'view_name': '%s:%s:%s-detail' % (root_namespace, app_label, model_name),
'lookup_field': lookup_field,
'lookup_url_kwarg': lookup_url_kwarg,
}
field_kwargs = self.add_extra_kwargs(field_kwargs)
return field_class, field_kwargs
class StackdioParentHyperlinkedModelSerializer(StackdioHyperlinkedModelSerializer):
serializer_url_field = HyperlinkedParentField
def add_extra_kwargs(self, kwargs):
parent_attr = getattr(self.Meta, 'parent_attr', None)
parent_lookup_field = getattr(self.Meta, 'parent_lookup_field', 'pk')
default_parent_lookup_url_kwarg = 'parent_{}'.format(parent_lookup_field)
parent_lookup_url_kwarg = getattr(self.Meta,
'parent_lookup_url_kwarg',
default_parent_lookup_url_kwarg)
kwargs['parent_attr'] = parent_attr
kwargs['parent_lookup_field'] = parent_lookup_field
kwargs['parent_lookup_url_kwarg'] = parent_lookup_url_kwarg
return kwargs
class StackdioLabelSerializer(mixins.CreateOnlyFieldsMixin,
StackdioParentHyperlinkedModelSerializer):
"""
This is an abstract class meant to be extended for any type of object that needs to be labelled
by setting the appropriate `app_label` and `model_name` attributes on the `Meta` class.
```
class MyObjectLabelSerializer(StackdioLabelSerializer):
# The Meta class needs to inherit from the super Meta class
class Meta(StackdioLabelSerializer.Meta):
app_label = 'my-app'
model_name = 'my-object'
```
"""
class Meta:
model = models.Label
parent_attr = 'content_object'
lookup_field = 'key'
lookup_url_kwarg = 'label_name'
fields = (
'url',
'key',
'value',
)
extra_kwargs = {
'key': {'validators': [validators.LabelValidator()]},
'value': {'validators': [validators.LabelValidator()]},
}
create_only_fields = (
'key',
)
def validate(self, attrs):
content_object = self.context.get('content_object')
key = attrs.get('key')
# Only need to validate if both a key was passed in and the content_object already exists
if key and content_object:
labels = content_object.labels.filter(key=key)
if labels.count() > 0:
raise serializers.ValidationError({
'key': ['Label keys must be unique.']
})
return attrs
class StackdioLiteralLabelsSerializer(StackdioLabelSerializer):
class Meta(StackdioLabelSerializer.Meta):
fields = (
'key',
'value',
)
class PermissionsBulkListSerializer(BulkListSerializer):
name_attr_map = {
'user': 'username',
'group': 'name',
}
def filter_queryset(self, queryset, id_attr, all_validated_data_by_id):
ret = []
for obj in queryset:
auth_obj = obj[id_attr]
name_attr = self.name_attr_map[id_attr]
if getattr(auth_obj, name_attr) in all_validated_data_by_id:
ret.append(obj)
return ret
def check_objects_to_update(self, objects_to_update, all_validated_data_by_id):
if len(all_validated_data_by_id) != len(objects_to_update):
raise serializers.ValidationError({
'bulk': 'Could not find all objects to update.',
})
def get_obj_validated_data(self, obj, id_attr, all_validated_data_by_id):
auth_obj = obj[id_attr]
name_attr = self.name_attr_map[id_attr]
return all_validated_data_by_id[getattr(auth_obj, name_attr)]
class StackdioModelPermissionsSerializer(BulkSerializerMixin, serializers.Serializer):
class Meta:
list_serializer_class = PermissionsBulkListSerializer
def validate(self, attrs):
view = self.context['view']
available_perms = view.get_model_permissions()
bad_perms = []
for perm in attrs['permissions']:
if perm not in available_perms:
bad_perms.append(perm)
if bad_perms:
raise serializers.ValidationError({
'permissions': ['Invalid permissions: {0}'.format(', '.join(bad_perms))]
})
return attrs
def create(self, validated_data):
# Determine if this is a user or group
view = self.context['view']
user_or_group = view.get_user_or_group()
# Grab our data
auth_obj = validated_data[user_or_group]
# Grab model class
model_cls = validated_data['model_cls']
app_label = model_cls._meta.app_label
model_name = model_cls._meta.model_name
with transaction.atomic():
for perm in validated_data['permissions']:
assign_perm('%s.%s_%s' % (app_label, perm, model_name), auth_obj)
return self.to_internal_value(validated_data)
def update(self, instance, validated_data):
# Determine if this is a user or group
view = self.context['view']
user_or_group = view.get_user_or_group()
# The funkiness below is to prevent a client from submitting a PUT or PATCH request to
# /api/<resource>/permissions/users/user_id1 with user="user_id2". If this were
# allowed, you could change the permissions of any user from the endpoint of any other user
# Pull the user from the instance to update rather than from the incoming request
auth_obj = instance[user_or_group]
# Then add it to the validated_data so the create request uses the correct user
validated_data[user_or_group] = auth_obj
# Grab the object
model_cls = validated_data['model_cls']
app_label = model_cls._meta.app_label
model_name = model_cls._meta.model_name
# Make sure we do this atomically - since we're removing all permissions on a PUT,
# don't commit the transaction until the permissions have been re-created
with transaction.atomic():
if not self.partial:
# PUT request - delete all the permissions, then recreate them later
for perm in instance['permissions']:
remove_perm('%s.%s_%s' % (app_label, perm, model_name), auth_obj)
# We now want to do the same thing as create
return self.create(validated_data)
class StackdioObjectPermissionsSerializer(BulkSerializerMixin, serializers.Serializer):
class Meta:
list_serializer_class = PermissionsBulkListSerializer
def validate(self, attrs):
view = self.context['view']
available_perms = view.get_object_permissions()
bad_perms = []
for perm in attrs['permissions']:
if perm not in available_perms:
bad_perms.append(perm)
if bad_perms:
raise serializers.ValidationError({
'permissions': ['Invalid permissions: {0}'.format(', '.join(bad_perms))]
})
return attrs
def create(self, validated_data):
# Determine if this is a user or group
view = self.context['view']
user_or_group = view.get_user_or_group()
# Grab our data
auth_obj = validated_data[user_or_group]
# Grab the object
obj = validated_data['object']
app_label = obj._meta.app_label
model_name = obj._meta.model_name
with transaction.atomic():
for perm in validated_data['permissions']:
assign_perm('%s.%s_%s' % (app_label, perm, model_name), auth_obj, obj)
return self.to_internal_value(validated_data)
def update(self, instance, validated_data):
# Determine if this is a user or group
view = self.context['view']
user_or_group = view.get_user_or_group()
# The funkiness below is to prevent a client from submitting a PUT or PATCH request to
# /api/<resource>/<pk>/permissions/users/user_id1 with user="user_id2". If this were
# allowed, you could change the permissions of any user from the endpoint of any other user
# Pull the user from the instance to update rather than from the incoming request
auth_obj = instance[user_or_group]
# Then add it to the validated_data so the create request uses the correct user
validated_data[user_or_group] = auth_obj
# Grab the object
obj = validated_data['object']
app_label = obj._meta.app_label
model_name = obj._meta.model_name
# Make sure we do this atomically - since we're removing all permissions on a PUT,
# don't commit the transaction until the permissions have been re-created
with transaction.atomic():
if not self.partial:
# PUT request - delete all the permissions, then recreate them later
for perm in instance['permissions']:
remove_perm('%s.%s_%s' % (app_label, perm, model_name), auth_obj, obj)
# We now want to do the same thing as create
return self.create(validated_data)
class ObjectPropertiesSerializer(serializers.Serializer):
def to_representation(self, instance):
return utils.recursively_sort_dict(instance.properties)
def to_internal_value(self, data):
return data
def validate(self, attrs):
validators.PropertiesValidator().validate(attrs)
return attrs
def create(self, validated_data):
raise NotImplementedError('Cannot create properties.')
def update(self, instance, validated_data):
if self.partial:
# This is a PATCH, so properly merge in the old data
old_properties = instance.properties
instance.properties = utils.recursive_update(old_properties, validated_data)
else:
# This is a PUT, so just add the data directly
instance.properties = validated_data
# Be sure to save the instance
instance.save()
return instance
class PropertiesField(serializers.JSONField):
def __init__(self, *args, **kwargs):
# Add our properties validator
kwargs.setdefault('validators', []).append(validators.PropertiesValidator())
super(PropertiesField, self).__init__(*args, **kwargs)
def to_representation(self, value):
ret = super(PropertiesField, self).to_representation(value)
return utils.recursively_sort_dict(ret)
class EventField(serializers.SlugRelatedField):
default_error_messages = {
'does_not_exist': _('Event \'{value}\' does not exist.'),
'invalid': _('Invalid value.'),
}
def __init__(self, **kwargs):
if not kwargs.get('read_only', False):
kwargs.setdefault('queryset', models.Event.objects.all())
super(EventField, self).__init__(slug_field='tag', **kwargs)
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = models.Event
fields = (
'tag',
)
| apache-2.0 | 2,464,084,715,326,547,500 | 33.451477 | 100 | 0.625597 | false |
SinZ163/EdisonSandbox | twitter.py | 1 | 3749 | # coding=UTF8
from __future__ import absolute_import, print_function
#Twitter API
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy import API
#Python API
import json
import time
import mraa #Intel API
import requests
#My API
from I2cLCDRGBBackLit import I2CLCDDisplay
from TH02 import TH02
from music import Music
# Go to http://apps.twitter.com and create an app.
# The consumer key and secret will be generated for you after
consumer_key = ""
consumer_secret = ""
access_token = ""
access_token_secret = ""
weather_api = ""
with open("./conf.json", "r") as f:
info = json.load(f)
consumer_key = info["consumer_key"]
consumer_secret = info["consumer_secret"]
access_token = info["access_token"]
access_token_secret = info["access_token_secret"]
weather_api = info["weather_api"]
reply_tweet = "@{name} It is currently {temp} degrees, {fact} and {sunset:.2f} hours until sunset"
class StdOutListener(StreamListener):
""" A listener handles tweets are the received from the stream.
This is a basic listener that just prints received tweets to stdout.
"""
def init(self):
self.display.I2cLCDInit() #Clear the display completely
self.display.LEDColor(0x55,0xAC,0xEE) #Twitter Blue
#self.display.LCDInstruction(0x04) #Display on, Cursor OFF, blink OFF
def __init__(self, api):
self.api = api
self.display = I2CLCDDisplay()
self.music = Music()
self.sensor = TH02(bus=1)
self.uv = mraa.Aio(0)
self.init()
def on_data(self, data):
data = json.loads(data)
self.init()
self.display.LCDPrint("@" + data["user"]["screen_name"][:15])
print("@{screen_name} {text}".format(screen_name=data["user"]["screen_name"], text=data["text"]))
displayText = data["text"]
#This is to show the first word(s) well
self.display.LCDInstruction(0x80+0x28) #Row 2, Column 0x00
self.display.LCDPrint(displayText[:16])
self.music.play()
r = requests.get("http://api.openweathermap.org/data/2.5/weather?q=Hawthorn,Australia&appid=" + weather_api)
info = r.json()
print(info)
self.api.update_status(
status=reply_tweet.format(
temp = self.sensor.getTemperature(),
name = data["user"]["screen_name"],
fact = info["weather"][0]["description"],
#FutureTime - CurrentTime = Time until FutureTime. /60 to turn into minutes. /60 to turn into hours
sunset = (int(info["sys"]["sunset"]) - int(time.time())) / 60.0 / 60.0
),
in_reply_to_status_id = data["id"])
time.sleep(5)
#If it is too fat to appear on the display, and if it barely fits, just show it, no harm. it wont run the next iteration.
while(len(displayText) >= 16):
self.display.LCDInstruction(0x80+0x28) #Row 2, Column 0x04
self.display.LCDPrint(displayText[:16])
time.sleep(0.5)
displayText = displayText[1:]
time.sleep(2)
self.init()
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = API(auth)
l = StdOutListener(api)
stream = Stream(auth, l)
stream.filter(track=['#swinburne']) #Change this to #swinburneweather or something if you want to track something else!
| mit | 8,585,144,835,983,826,000 | 34.704762 | 123 | 0.606828 | false |
pescobar/easybuild-framework | test/framework/sandbox/easybuild/easyblocks/generic/modulerc.py | 2 | 1464 | ##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Dummy ModuleRC easyblock.
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.framework.easyblock import EasyBlock
class ModuleRC(EasyBlock):
"""Dummy implementation of generic easyblock that generates .modulerc files."""
def configure_step(self):
pass
def build_step(self):
pass
def install_step(self):
pass
def sanity_check_step(self):
pass
| gpl-2.0 | -6,024,385,575,238,901,000 | 30.826087 | 96 | 0.728142 | false |
Tomsod/gemrb | gemrb/GUIScripts/iwd2/SPParty2.py | 3 | 1253 | # GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#Single Player Party Select
import GemRB
from GameCheck import MAX_PARTY_SIZE
def OnLoad():
LoadPartyCharacters()
GemRB.SetNextScript("SPPartyFormation")
return
#loading characters from party.ini
def LoadPartyCharacters():
i = GemRB.GetVar("PartyIdx")
Tag = "Party " + str(i)
for j in range(1, min(6, MAX_PARTY_SIZE)+1):
Key = "Char"+str(j)
CharName = GemRB.GetINIPartyKey(Tag, Key, "")
if CharName !="":
GemRB.CreatePlayer(CharName, j, 1)
return
| gpl-2.0 | 3,578,537,500,143,461,000 | 32.864865 | 81 | 0.743017 | false |
allenlavoie/tensorflow | tensorflow/contrib/signal/python/kernel_tests/spectral_ops_test.py | 25 | 14294 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for spectral_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.signal.python.ops import spectral_ops
from tensorflow.contrib.signal.python.ops import window_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.platform import test
class SpectralOpsTest(test.TestCase):
@staticmethod
def _np_hann_periodic_window(length):
if length == 1:
return np.ones(1)
odd = length % 2
if not odd:
length += 1
window = 0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))
if not odd:
window = window[:-1]
return window
@staticmethod
def _np_frame(data, window_length, hop_length):
num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))
shape = (num_frames, window_length)
strides = (data.strides[0] * hop_length, data.strides[0])
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@staticmethod
def _np_stft(data, fft_length, hop_length, window_length):
frames = SpectralOpsTest._np_frame(data, window_length, hop_length)
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return np.fft.rfft(frames * window, fft_length)
@staticmethod
def _np_inverse_stft(stft, fft_length, hop_length, window_length):
frames = np.fft.irfft(stft, fft_length)
# Pad or truncate frames's inner dimension to window_length.
frames = frames[..., :window_length]
frames = np.pad(frames, [[0, 0]] * (frames.ndim - 1) +
[[0, max(0, window_length - frames.shape[-1])]], "constant")
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return SpectralOpsTest._np_overlap_add(frames * window, hop_length)
@staticmethod
def _np_overlap_add(stft, hop_length):
num_frames, window_length = np.shape(stft)
# Output length will be one complete window, plus another hop_length's
# worth of points for each additional window.
output_length = window_length + (num_frames - 1) * hop_length
output = np.zeros(output_length)
for i in range(num_frames):
output[i * hop_length:i * hop_length + window_length] += stft[i,]
return output
def _compare(self, signal, frame_length, frame_step, fft_length):
with spectral_ops_test_util.fft_kernel_label_map(), (
self.test_session(use_gpu=True)) as sess:
actual_stft = spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
signal_ph = array_ops.placeholder(dtype=dtypes.as_dtype(signal.dtype))
actual_stft_from_ph = spectral_ops.stft(
signal_ph, frame_length, frame_step, fft_length, pad_end=False)
actual_inverse_stft = spectral_ops.inverse_stft(
actual_stft, frame_length, frame_step, fft_length)
actual_stft, actual_stft_from_ph, actual_inverse_stft = sess.run(
[actual_stft, actual_stft_from_ph, actual_inverse_stft],
feed_dict={signal_ph: signal})
actual_stft_ph = array_ops.placeholder(dtype=actual_stft.dtype)
actual_inverse_stft_from_ph = sess.run(
spectral_ops.inverse_stft(
actual_stft_ph, frame_length, frame_step, fft_length),
feed_dict={actual_stft_ph: actual_stft})
# Confirm that there is no difference in output when shape/rank is fully
# unknown or known.
self.assertAllClose(actual_stft, actual_stft_from_ph)
self.assertAllClose(actual_inverse_stft, actual_inverse_stft_from_ph)
expected_stft = SpectralOpsTest._np_stft(
signal, fft_length, frame_step, frame_length)
self.assertAllClose(expected_stft, actual_stft, 1e-4, 1e-4)
expected_inverse_stft = SpectralOpsTest._np_inverse_stft(
expected_stft, fft_length, frame_step, frame_length)
self.assertAllClose(
expected_inverse_stft, actual_inverse_stft, 1e-4, 1e-4)
def test_shapes(self):
with spectral_ops_test_util.fft_kernel_label_map(), (
self.test_session(use_gpu=True)):
signal = np.zeros((512,)).astype(np.float32)
# If fft_length is not provided, the smallest enclosing power of 2 of
# frame_length (8) is used.
stft = spectral_ops.stft(signal, frame_length=7, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], stft.eval().shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], stft.eval().shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
fft_length=16, pad_end=True)
self.assertAllEqual([64, 9], stft.shape.as_list())
self.assertAllEqual([64, 9], stft.eval().shape)
stft = spectral_ops.stft(signal, frame_length=16, frame_step=8,
fft_length=8, pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], stft.eval().shape)
stft = np.zeros((32, 9)).astype(np.complex64)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length=8,
fft_length=16, frame_step=8)
expected_length = (stft.shape[0] - 1) * 8 + 8
self.assertAllEqual([None], inverse_stft.shape.as_list())
self.assertAllEqual([expected_length], inverse_stft.eval().shape)
def test_stft_and_inverse_stft(self):
"""Test that spectral_ops.stft/inverse_stft match a NumPy implementation."""
# Tuples of (signal_length, frame_length, frame_step, fft_length).
test_configs = [
(512, 64, 32, 64),
(512, 64, 64, 64),
(512, 72, 64, 64),
(512, 64, 25, 64),
(512, 25, 15, 36),
(123, 23, 5, 42),
]
for signal_length, frame_length, frame_step, fft_length in test_configs:
signal = np.random.random(signal_length).astype(np.float32)
self._compare(signal, frame_length, frame_step, fft_length)
def test_stft_round_trip(self):
# Tuples of (signal_length, frame_length, frame_step, fft_length,
# threshold, corrected_threshold).
test_configs = [
# 87.5% overlap.
(4096, 256, 32, 256, 1e-5, 1e-6),
# 75% overlap.
(4096, 256, 64, 256, 1e-5, 1e-6),
# Odd frame hop.
(4096, 128, 25, 128, 1e-3, 1e-6),
# Odd frame length.
(4096, 127, 32, 128, 1e-3, 1e-6),
# 50% overlap.
(4096, 128, 64, 128, 0.40, 1e-6),
]
for (signal_length, frame_length, frame_step, fft_length, threshold,
corrected_threshold) in test_configs:
# Generate a random white Gaussian signal.
signal = random_ops.random_normal([signal_length])
with spectral_ops_test_util.fft_kernel_label_map(), (
self.test_session(use_gpu=True)) as sess:
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
inverse_stft_corrected = spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length,
window_fn=spectral_ops.inverse_stft_window_fn(frame_step))
signal, inverse_stft, inverse_stft_corrected = sess.run(
[signal, inverse_stft, inverse_stft_corrected])
# Truncate signal to the size of inverse stft.
signal = signal[:inverse_stft.shape[0]]
# Ignore the frame_length samples at either edge.
signal = signal[frame_length:-frame_length]
inverse_stft = inverse_stft[frame_length:-frame_length]
inverse_stft_corrected = inverse_stft_corrected[
frame_length:-frame_length]
# Check that the inverse and original signal are close up to a scale
# factor.
inverse_stft_scaled = inverse_stft / np.mean(np.abs(inverse_stft))
signal_scaled = signal / np.mean(np.abs(signal))
self.assertLess(np.std(inverse_stft_scaled - signal_scaled), threshold)
# Check that the inverse with correction and original signal are close.
self.assertLess(np.std(inverse_stft_corrected - signal),
corrected_threshold)
def test_inverse_stft_window_fn(self):
"""Test that inverse_stft_window_fn has unit gain at each window phase."""
# Tuples of (frame_length, frame_step).
test_configs = [
(256, 32),
(256, 64),
(128, 25),
(127, 32),
(128, 64),
]
for (frame_length, frame_step) in test_configs:
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
with self.test_session(use_gpu=True) as sess:
hann_window, inverse_window = sess.run([hann_window, inverse_window])
# Expect unit gain at each phase of the window.
product_window = hann_window * inverse_window
for i in range(frame_step):
self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))
def test_inverse_stft_window_fn_special_case(self):
"""Test inverse_stft_window_fn in special overlap = 3/4 case."""
# Cases in which frame_length is an integer multiple of 4 * frame_step are
# special because they allow exact reproduction of the waveform with a
# squared Hann window (Hann window in both forward and reverse transforms).
# In the case where frame_length = 4 * frame_step, that combination
# produces a constant gain of 1.5, and so the corrected window will be the
# Hann window / 1.5.
# Tuples of (frame_length, frame_step).
test_configs = [
(256, 64),
(128, 32),
]
for (frame_length, frame_step) in test_configs:
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
with self.test_session(use_gpu=True) as sess:
hann_window, inverse_window = sess.run([hann_window, inverse_window])
self.assertAllClose(hann_window, inverse_window * 1.5)
@staticmethod
def _compute_stft_gradient(signal, frame_length=32, frame_step=16,
fft_length=32):
"""Computes the gradient of the STFT with respect to `signal`."""
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length)
magnitude_stft = math_ops.abs(stft)
loss = math_ops.reduce_sum(magnitude_stft)
return gradients_impl.gradients([loss], [signal])[0]
def test_gradients(self):
"""Test that spectral_ops.stft has a working gradient."""
with spectral_ops_test_util.fft_kernel_label_map(), (
self.test_session(use_gpu=True)) as sess:
signal_length = 512
# An all-zero signal has all zero gradients with respect to the sum of the
# magnitude STFT.
empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
empty_signal_gradient = sess.run(
self._compute_stft_gradient(empty_signal))
self.assertTrue((empty_signal_gradient == 0.0).all())
# A sinusoid will have non-zero components of its gradient with respect to
# the sum of the magnitude STFT.
sinusoid = math_ops.sin(
2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
sinusoid_gradient = sess.run(self._compute_stft_gradient(sinusoid))
self.assertFalse((sinusoid_gradient == 0.0).all())
def test_gradients_numerical(self):
with spectral_ops_test_util.fft_kernel_label_map(), (
self.test_session(use_gpu=True)):
# Tuples of (signal_length, frame_length, frame_step, fft_length,
# stft_bound, inverse_stft_bound).
# TODO(rjryan): Investigate why STFT gradient error is so high.
test_configs = [
(64, 16, 8, 16),
(64, 16, 16, 16),
(64, 16, 7, 16),
(64, 7, 4, 9),
(29, 5, 1, 10),
]
for (signal_length, frame_length, frame_step, fft_length) in test_configs:
signal_shape = [signal_length]
signal = random_ops.random_uniform(signal_shape)
stft_shape = [max(0, 1 + (signal_length - frame_length) // frame_step),
fft_length // 2 + 1]
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft_shape = [(stft_shape[0] - 1) * frame_step + frame_length]
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
stft_error = test.compute_gradient_error(signal, [signal_length],
stft, stft_shape)
inverse_stft_error = test.compute_gradient_error(
stft, stft_shape, inverse_stft, inverse_stft_shape)
self.assertLess(stft_error, 2e-3)
self.assertLess(inverse_stft_error, 5e-4)
if __name__ == "__main__":
test.main()
| apache-2.0 | -2,508,023,253,568,214,000 | 42.054217 | 80 | 0.635441 | false |
edcast-inc/edx-platform-edcast | lms/djangoapps/instructor_task/tasks.py | 29 | 11526 | """
This file contains tasks that are designed to perform background operations on the
running state of a course.
At present, these tasks all operate on StudentModule objects in one way or another,
so they share a visitor architecture. Each task defines an "update function" that
takes a module_descriptor, a particular StudentModule object, and xmodule_instance_args.
A task may optionally specify a "filter function" that takes a query for StudentModule
objects, and adds additional filter clauses.
A task also passes through "xmodule_instance_args", that are used to provide
information to our code that instantiates xmodule instances.
The task definition then calls the traversal function, passing in the three arguments
above, along with the id value for an InstructorTask object. The InstructorTask
object contains a 'task_input' row which is a JSON-encoded dict containing
a problem URL and optionally a student. These are used to set up the initial value
of the query for traversing StudentModule objects.
"""
import logging
from functools import partial
from django.conf import settings
from django.utils.translation import ugettext_noop
from celery import task
from bulk_email.tasks import perform_delegate_email_batches
from instructor_task.tasks_helper import (
run_main_task,
BaseInstructorTask,
perform_module_state_update,
rescore_problem_module_state,
reset_attempts_module_state,
delete_problem_module_state,
upload_grades_csv,
upload_problem_grade_report,
upload_students_csv,
cohort_students_and_upload,
upload_enrollment_report,
upload_may_enroll_csv,
upload_exec_summary_report,
generate_students_certificates,
)
TASK_LOG = logging.getLogger('edx.celery.task')
@task(base=BaseInstructorTask) # pylint: disable=not-callable
def rescore_problem(entry_id, xmodule_instance_args):
"""Rescores a problem in a course, for all students or one specific student.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
'student': the identifier (username or email) of a particular user whose
problem submission should be rescored. If not specified, all problem
submissions for the problem will be rescored.
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('rescored')
update_fcn = partial(rescore_problem_module_state, xmodule_instance_args)
def filter_fcn(modules_to_update):
"""Filter that matches problems which are marked as being done"""
return modules_to_update.filter(state__contains='"done": true')
visit_fcn = partial(perform_module_state_update, update_fcn, filter_fcn)
return run_main_task(entry_id, visit_fcn, action_name)
@task(base=BaseInstructorTask) # pylint: disable=not-callable
def reset_problem_attempts(entry_id, xmodule_instance_args):
"""Resets problem attempts to zero for a particular problem for all students in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('reset')
update_fcn = partial(reset_attempts_module_state, xmodule_instance_args)
visit_fcn = partial(perform_module_state_update, update_fcn, None)
return run_main_task(entry_id, visit_fcn, action_name)
@task(base=BaseInstructorTask) # pylint: disable=not-callable
def delete_problem_state(entry_id, xmodule_instance_args):
"""Deletes problem state entirely for all students on a particular problem in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('deleted')
update_fcn = partial(delete_problem_module_state, xmodule_instance_args)
visit_fcn = partial(perform_module_state_update, update_fcn, None)
return run_main_task(entry_id, visit_fcn, action_name)
@task(base=BaseInstructorTask) # pylint: disable=not-callable
def send_bulk_course_email(entry_id, _xmodule_instance_args):
"""Sends emails to recipients enrolled in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'email_id': the full URL to the problem to be rescored. (required)
`_xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance. This is unused here.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('emailed')
visit_fcn = perform_delegate_email_batches
return run_main_task(entry_id, visit_fcn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def calculate_grades_csv(entry_id, xmodule_instance_args):
"""
Grade a course and push the results to an S3 bucket for download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('graded')
TASK_LOG.info(
u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
xmodule_instance_args.get('task_id'), entry_id, action_name
)
task_fn = partial(upload_grades_csv, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def calculate_problem_grade_report(entry_id, xmodule_instance_args):
"""
Generate a CSV for a course containing all students' problem
grades and push the results to an S3 bucket for download.
"""
# Translators: This is a past-tense phrase that is inserted into task progress messages as {action}.
action_name = ugettext_noop('problem distribution graded')
TASK_LOG.info(
u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
xmodule_instance_args.get('task_id'), entry_id, action_name
)
task_fn = partial(upload_problem_grade_report, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def calculate_students_features_csv(entry_id, xmodule_instance_args):
"""
Compute student profile information for a course and upload the
CSV to an S3 bucket for download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('generated')
task_fn = partial(upload_students_csv, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def enrollment_report_features_csv(entry_id, xmodule_instance_args):
"""
Compute student profile information for a course and upload the
CSV to an S3 bucket for download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('generating_enrollment_report')
task_fn = partial(upload_enrollment_report, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def exec_summary_report_csv(entry_id, xmodule_instance_args):
"""
Compute executive summary report for a course and upload the
Html generated report to an S3 bucket for download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = 'generating_exec_summary_report'
task_fn = partial(upload_exec_summary_report, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def calculate_may_enroll_csv(entry_id, xmodule_instance_args):
"""
Compute information about invited students who have not enrolled
in a given course yet and upload the CSV to an S3 bucket for
download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('generated')
task_fn = partial(upload_may_enroll_csv, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def generate_certificates(entry_id, xmodule_instance_args):
"""
Grade students and generate certificates.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('certificates generated')
TASK_LOG.info(
u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
xmodule_instance_args.get('task_id'), entry_id, action_name
)
task_fn = partial(generate_students_certificates, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask) # pylint: disable=E1102
def cohort_students(entry_id, xmodule_instance_args):
"""
Cohort students in bulk, and upload the results.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
# An example of such a message is: "Progress: {action} {succeeded} of {attempted} so far"
action_name = ugettext_noop('cohorted')
task_fn = partial(cohort_students_and_upload, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
| agpl-3.0 | 3,658,851,030,294,172,700 | 44.377953 | 112 | 0.735207 | false |
wlx2015/dstat | plugins/dstat_vz_io.py | 4 | 2736 | ### Author: Dag Wieers <[email protected]>
### Example content for /proc/bc/<veid>/ioacct
# read 2773011640320
# write 2095707136000
# dirty 4500342390784
# cancel 4080624041984
# missed 0
# syncs_total 2
# fsyncs_total 1730732
# fdatasyncs_total 3266
# range_syncs_total 0
# syncs_active 0
# fsyncs_active 0
# fdatasyncs_active 0
# range_syncs_active 0
# vfs_reads 3717331387
# vfs_read_chars 3559144863185798078
# vfs_writes 901216138
# vfs_write_chars 23864660931174682
# io_pbs 16
class dstat_plugin(dstat):
def __init__(self):
self.nick = ['read', 'write', 'dirty', 'cancel', 'missed']
self.cols = len(self.nick)
def check(self):
if not os.path.exists('/proc/vz'):
raise Exception, 'System does not have OpenVZ support'
elif not os.path.exists('/proc/bc'):
raise Exception, 'System does not have (new) OpenVZ beancounter support'
elif not glob.glob('/proc/bc/*/ioacct'):
raise Exception, 'System does not have any OpenVZ containers'
info(1, 'Module %s is still experimental.' % self.filename)
def name(self):
return ['ve/'+name for name in self.vars]
def vars(self):
ret = []
if not op.full:
varlist = ['total',]
else:
varlist = [os.path.basename(veid) for veid in glob.glob('/proc/vz/*')]
ret = varlist
return ret
def extract(self):
for name in self.vars:
self.set2['total'] = {}
for line in dopen('/proc/bc/%s/ioacct' % name).readlines():
l = line.split()
if len(l) != 2: continue
if l[0] not in self.nick: continue
index = self.nick.index(l[0])
self.set2[name][index] = long(l[1])
self.set2['total'][index] = self.set2['total'][index] + long(l[1])
# print name, self.val[name], self.set2[name][0], self.set2[name][1]
# print name, self.val[name], self.set1[name][0], self.set1[name][1]
self.val[name] = map(lambda x, y: (y - x) / elapsed, self.set1[name], self.set2[name])
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| gpl-2.0 | -150,684,117,891,641,200 | 39.835821 | 98 | 0.472588 | false |
willingc/oh-mainline | vendor/packages/scrapy/scrapy/tests/test_utils_response.py | 28 | 3908 | import os
import unittest
import urlparse
from scrapy.http import Response, TextResponse, HtmlResponse
from scrapy.utils.response import body_or_str, response_httprepr, open_in_browser, \
get_meta_refresh
__doctests__ = ['scrapy.utils.response']
class ResponseUtilsTest(unittest.TestCase):
dummy_response = TextResponse(url='http://example.org/', body='dummy_response')
def test_body_or_str_input(self):
self.assertTrue(isinstance(body_or_str(self.dummy_response), basestring))
self.assertTrue(isinstance(body_or_str('text'), basestring))
self.assertRaises(Exception, body_or_str, 2)
def test_body_or_str_extraction(self):
self.assertEqual(body_or_str(self.dummy_response), 'dummy_response')
self.assertEqual(body_or_str('text'), 'text')
def test_body_or_str_encoding(self):
self.assertTrue(isinstance(body_or_str(self.dummy_response, unicode=False), str))
self.assertTrue(isinstance(body_or_str(self.dummy_response, unicode=True), unicode))
self.assertTrue(isinstance(body_or_str('text', unicode=False), str))
self.assertTrue(isinstance(body_or_str('text', unicode=True), unicode))
self.assertTrue(isinstance(body_or_str(u'text', unicode=False), str))
self.assertTrue(isinstance(body_or_str(u'text', unicode=True), unicode))
def test_response_httprepr(self):
r1 = Response("http://www.example.com")
self.assertEqual(response_httprepr(r1), 'HTTP/1.1 200 OK\r\n\r\n')
r1 = Response("http://www.example.com", status=404, headers={"Content-type": "text/html"}, body="Some body")
self.assertEqual(response_httprepr(r1), 'HTTP/1.1 404 Not Found\r\nContent-Type: text/html\r\n\r\nSome body')
r1 = Response("http://www.example.com", status=6666, headers={"Content-type": "text/html"}, body="Some body")
self.assertEqual(response_httprepr(r1), 'HTTP/1.1 6666 \r\nContent-Type: text/html\r\n\r\nSome body')
def test_open_in_browser(self):
url = "http:///www.example.com/some/page.html"
body = "<html> <head> <title>test page</title> </head> <body>test body</body> </html>"
def browser_open(burl):
path = urlparse.urlparse(burl).path
if not os.path.exists(path):
path = burl.replace('file://', '')
bbody = open(path).read()
assert '<base href="%s">' % url in bbody, "<base> tag not added"
return True
response = HtmlResponse(url, body=body)
assert open_in_browser(response, _openfunc=browser_open), \
"Browser not called"
self.assertRaises(TypeError, open_in_browser, Response(url, body=body), \
debug=True)
def test_get_meta_refresh(self):
r1 = HtmlResponse("http://www.example.com", body="""
<html>
<head><title>Dummy</title><meta http-equiv="refresh" content="5;url=http://example.org/newpage" /></head>
<body>blahablsdfsal&</body>
</html>""")
r2 = HtmlResponse("http://www.example.com", body="""
<html>
<head><title>Dummy</title><noScript>
<meta http-equiv="refresh" content="5;url=http://example.org/newpage" /></head>
</noSCRIPT>
<body>blahablsdfsal&</body>
</html>""")
r3 = HtmlResponse("http://www.example.com", body="""
<noscript><meta http-equiv="REFRESH" content="0;url=http://www.example.com/newpage</noscript>
<script type="text/javascript">
if(!checkCookies()){
document.write('<meta http-equiv="REFRESH" content="0;url=http://www.example.com/newpage">');
}
</script>
""")
self.assertEqual(get_meta_refresh(r1), (5.0, 'http://example.org/newpage'))
self.assertEqual(get_meta_refresh(r2), (None, None))
self.assertEqual(get_meta_refresh(r3), (None, None))
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | -6,672,220,744,340,988,000 | 44.976471 | 117 | 0.635107 | false |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v2_10_0/event_broker.py | 16 | 42430 | from ..broker import Broker
class EventBroker(Broker):
controller = "events"
def index(self, **kwargs):
"""Lists the available events. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EventCategory: The category of an event.
:type EventCategory: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EventCategory: The category of an event.
:type EventCategory: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EventID: The internal NetMRI identifier of an event.
:type EventID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EventID: The internal NetMRI identifier of an event.
:type EventID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EventTimestamp: The date and time this record was collected.
:type EventTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EventTimestamp: The date and time this record was collected.
:type EventTimestamp: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EventType: The type of an event.
:type EventType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EventType: The type of an event.
:type EventType: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the events with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the events with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of event methods. The listed methods will be called on each event returned and included in the output. Available methods are: data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` EventID
:param sort: The data field(s) to use for sorting the output. Default is EventID. Valid values are EventID, DataSourceID, EventCategory, EventCategoryID, EventType, EventTimestamp, EventState, EventDetail.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Event. Valid values are EventID, DataSourceID, EventCategory, EventCategoryID, EventType, EventTimestamp, EventState, EventDetail. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return events: An array of the Event objects that match the specified input criteria.
:rtype events: Array of Event
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified event.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param EventID: The internal NetMRI identifier of an event.
:type EventID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of event methods. The listed methods will be called on each event returned and included in the output. Available methods are: data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return event: The event identified by the specified EventID.
:rtype event: Event
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available events matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EventCategory: The category of an event.
:type EventCategory: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EventCategory: The category of an event.
:type EventCategory: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EventCategoryID: The internal NetMRI identifier of a category in an event.
:type EventCategoryID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EventCategoryID: The internal NetMRI identifier of a category in an event.
:type EventCategoryID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EventDetail: The details of an event.
:type EventDetail: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EventDetail: The details of an event.
:type EventDetail: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EventID: The internal NetMRI identifier of an event.
:type EventID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EventID: The internal NetMRI identifier of an event.
:type EventID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EventState: The state of an event.
:type EventState: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EventState: The state of an event.
:type EventState: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EventTimestamp: The date and time this record was collected.
:type EventTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EventTimestamp: The date and time this record was collected.
:type EventTimestamp: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EventType: The type of an event.
:type EventType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EventType: The type of an event.
:type EventType: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the events with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the events with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of event methods. The listed methods will be called on each event returned and included in the output. Available methods are: data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` EventID
:param sort: The data field(s) to use for sorting the output. Default is EventID. Valid values are EventID, DataSourceID, EventCategory, EventCategoryID, EventType, EventTimestamp, EventState, EventDetail.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Event. Valid values are EventID, DataSourceID, EventCategory, EventCategoryID, EventType, EventTimestamp, EventState, EventDetail. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against events, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, EventCategory, EventCategoryID, EventDetail, EventID, EventState, EventTimestamp, EventType.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return events: An array of the Event objects that match the specified input criteria.
:rtype events: Array of Event
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available events matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, EventCategory, EventCategoryID, EventDetail, EventID, EventState, EventTimestamp, EventType.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EventCategory: The operator to apply to the field EventCategory. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EventCategory: The category of an event. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EventCategory: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EventCategory: If op_EventCategory is specified, the field named in this input will be compared to the value in EventCategory using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EventCategory must be specified if op_EventCategory is specified.
:type val_f_EventCategory: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EventCategory: If op_EventCategory is specified, this value will be compared to the value in EventCategory using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EventCategory must be specified if op_EventCategory is specified.
:type val_c_EventCategory: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EventCategoryID: The operator to apply to the field EventCategoryID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EventCategoryID: The internal NetMRI identifier of a category in an event. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EventCategoryID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EventCategoryID: If op_EventCategoryID is specified, the field named in this input will be compared to the value in EventCategoryID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EventCategoryID must be specified if op_EventCategoryID is specified.
:type val_f_EventCategoryID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EventCategoryID: If op_EventCategoryID is specified, this value will be compared to the value in EventCategoryID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EventCategoryID must be specified if op_EventCategoryID is specified.
:type val_c_EventCategoryID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EventDetail: The operator to apply to the field EventDetail. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EventDetail: The details of an event. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EventDetail: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EventDetail: If op_EventDetail is specified, the field named in this input will be compared to the value in EventDetail using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EventDetail must be specified if op_EventDetail is specified.
:type val_f_EventDetail: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EventDetail: If op_EventDetail is specified, this value will be compared to the value in EventDetail using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EventDetail must be specified if op_EventDetail is specified.
:type val_c_EventDetail: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EventID: The operator to apply to the field EventID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EventID: The internal NetMRI identifier of an event. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EventID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EventID: If op_EventID is specified, the field named in this input will be compared to the value in EventID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EventID must be specified if op_EventID is specified.
:type val_f_EventID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EventID: If op_EventID is specified, this value will be compared to the value in EventID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EventID must be specified if op_EventID is specified.
:type val_c_EventID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EventState: The operator to apply to the field EventState. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EventState: The state of an event. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EventState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EventState: If op_EventState is specified, the field named in this input will be compared to the value in EventState using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EventState must be specified if op_EventState is specified.
:type val_f_EventState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EventState: If op_EventState is specified, this value will be compared to the value in EventState using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EventState must be specified if op_EventState is specified.
:type val_c_EventState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EventTimestamp: The operator to apply to the field EventTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EventTimestamp: The date and time this record was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EventTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EventTimestamp: If op_EventTimestamp is specified, the field named in this input will be compared to the value in EventTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EventTimestamp must be specified if op_EventTimestamp is specified.
:type val_f_EventTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EventTimestamp: If op_EventTimestamp is specified, this value will be compared to the value in EventTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EventTimestamp must be specified if op_EventTimestamp is specified.
:type val_c_EventTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EventType: The operator to apply to the field EventType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EventType: The type of an event. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EventType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EventType: If op_EventType is specified, the field named in this input will be compared to the value in EventType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EventType must be specified if op_EventType is specified.
:type val_f_EventType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EventType: If op_EventType is specified, this value will be compared to the value in EventType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EventType must be specified if op_EventType is specified.
:type val_c_EventType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the events with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the events with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of event methods. The listed methods will be called on each event returned and included in the output. Available methods are: data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` EventID
:param sort: The data field(s) to use for sorting the output. Default is EventID. Valid values are EventID, DataSourceID, EventCategory, EventCategoryID, EventType, EventTimestamp, EventState, EventDetail.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Event. Valid values are EventID, DataSourceID, EventCategory, EventCategoryID, EventType, EventTimestamp, EventState, EventDetail. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return events: An array of the Event objects that match the specified input criteria.
:rtype events: Array of Event
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def create(self, **kwargs):
"""Creates a new event.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param EventID: The internal NetMRI identifier of an event.
:type EventID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param EventCategory: The category of an event.
:type EventCategory: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param EventCategoryID: The internal NetMRI identifier of a category in an event.
:type EventCategoryID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param EventType: The type of an event.
:type EventType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param EventTimestamp: The date and time this record was collected.
:type EventTimestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param EventState: The state of an event.
:type EventState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param EventDetail: The details of an event.
:type EventDetail: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the newly created event.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the newly created event.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the newly created event.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return event: The newly created event.
:rtype event: Event
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
| apache-2.0 | 8,320,916,558,051,046,000 | 46.144444 | 510 | 0.577587 | false |
redhat-openstack/glance_store | glance_store/location.py | 3 | 6035 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A class that describes the location of an image in Glance.
In Glance, an image can either be **stored** in Glance, or it can be
**registered** in Glance but actually be stored somewhere else.
We needed a class that could support the various ways that Glance
describes where exactly an image is stored.
An image in Glance has two location properties: the image URI
and the image storage URI.
The image URI is essentially the permalink identifier for the image.
It is displayed in the output of various Glance API calls and,
while read-only, is entirely user-facing. It shall **not** contain any
security credential information at all. The Glance image URI shall
be the host:port of that Glance API server along with /images/<IMAGE_ID>.
The Glance storage URI is an internal URI structure that Glance
uses to maintain critical information about how to access the images
that it stores in its storage backends. It **may contain** security
credentials and is **not** user-facing.
"""
import logging
from oslo_config import cfg
from six.moves import urllib
from glance_store import exceptions
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
SCHEME_TO_CLS_MAP = {}
def get_location_from_uri(uri, conf=CONF):
"""
Given a URI, return a Location object that has had an appropriate
store parse the URI.
:param uri: A URI that could come from the end-user in the Location
attribute/header.
:param conf: The global configuration.
Example URIs:
https://user:[email protected]:80/images/some-id
http://images.oracle.com/123456
swift://example.com/container/obj-id
swift://user:account:[email protected]/container/obj-id
swift+http://user:account:[email protected]/container/obj-id
s3://accesskey:[email protected]/bucket/key-id
s3+https://accesskey:[email protected]/bucket/key-id
file:///var/lib/glance/images/1
cinder://volume-id
"""
pieces = urllib.parse.urlparse(uri)
if pieces.scheme not in SCHEME_TO_CLS_MAP.keys():
raise exceptions.UnknownScheme(scheme=pieces.scheme)
scheme_info = SCHEME_TO_CLS_MAP[pieces.scheme]
return Location(pieces.scheme, scheme_info['location_class'],
conf, uri=uri)
def register_scheme_map(scheme_map):
"""
Given a mapping of 'scheme' to store_name, adds the mapping to the
known list of schemes.
This function overrides existing stores.
"""
for (k, v) in scheme_map.items():
LOG.debug("Registering scheme %s with %s", k, v)
SCHEME_TO_CLS_MAP[k] = v
class Location(object):
"""
Class describing the location of an image that Glance knows about
"""
def __init__(self, store_name, store_location_class, conf,
uri=None, image_id=None, store_specs=None):
"""
Create a new Location object.
:param store_name: The string identifier/scheme of the storage backend
:param store_location_class: The store location class to use
for this location instance.
:param image_id: The identifier of the image in whatever storage
backend is used.
:param uri: Optional URI to construct location from
:param store_specs: Dictionary of information about the location
of the image that is dependent on the backend
store
"""
self.store_name = store_name
self.image_id = image_id
self.store_specs = store_specs or {}
self.conf = conf
self.store_location = store_location_class(self.store_specs, conf)
if uri:
self.store_location.parse_uri(uri)
def get_store_uri(self):
"""
Returns the Glance image URI, which is the host:port of the API server
along with /images/<IMAGE_ID>
"""
return self.store_location.get_uri()
def get_uri(self):
return None
class StoreLocation(object):
"""
Base class that must be implemented by each store
"""
def __init__(self, store_specs, conf):
self.conf = conf
self.specs = store_specs
if self.specs:
self.process_specs()
def process_specs(self):
"""
Subclasses should implement any processing of the self.specs collection
such as storing credentials and possibly establishing connections.
"""
pass
def get_uri(self):
"""
Subclasses should implement a method that returns an internal URI that,
when supplied to the StoreLocation instance, can be interpreted by the
StoreLocation's parse_uri() method. The URI returned from this method
shall never be public and only used internally within Glance, so it is
fine to encode credentials in this URI.
"""
raise NotImplementedError("StoreLocation subclass must implement "
"get_uri()")
def parse_uri(self, uri):
"""
Subclasses should implement a method that accepts a string URI and
sets appropriate internal fields such that a call to get_uri() will
return a proper internal URI
"""
raise NotImplementedError("StoreLocation subclass must implement "
"parse_uri()")
| apache-2.0 | -1,717,081,614,566,471,400 | 34.292398 | 79 | 0.658824 | false |
axltxl/zenfig | zenfig/kit.py | 1 | 1974 | # -*- coding: utf-8 -*-
"""
zenfig.kit
~~~~~~~~
Kit interface
:copyright: (c) 2016 by Alejandro Ricoveri
:license: MIT, see LICENSE for more details.
"""
import os
import re
from . import log
from .util import autolog
from .kits import git, local, KitException
from .kits.git import GitRepoKit
@autolog
def get_kit(kit_name, *, provider=None):
"""
Initialize kit interface
This will deduct what type of kit this is dealing with,
it will load the appropiate interface based on kit_name.
:param kit_name: Name of the kit to be loaded
:param provider: Kit provider to be used to load kit_name
:returns: a Kit holding all relevant information about kit_name
"""
# Local kit version requested by the user
kit_version = None
# if provider has not been enforced
# then, deduct proper provider for kit_name
if provider is None:
# test whether kit_name is a absolute directory
if re.match("^\/", kit_name):
log.msg_debug("Using '{}' as absolute directory".format(kit_name))
provider = local
# test whether kit_name is a relative directory
elif os.path.isdir(os.path.join(os.getcwd(), kit_name)):
log.msg_debug("Using '{}' as relative directory".format(kit_name))
provider = local
# see whether kit_name matches git kit criteria
elif re.match(GitRepoKit.RE_GIT_REPO_SHORT, kit_name) \
or re.match(GitRepoKit.RE_GIT_REPO_URL, kit_name):
if re.match('.*==.*', kit_name):
kit_name, kit_version = kit_name.split('==')
provider = git
# when everything else fails ...
else:
raise KitException("'{}' is not a valid provider".format(provider))
else:
provider = local
log.msg_debug("Kit provider '{}' has been imposed!".format(provider))
# Get a Kit instance from the provider
return provider.get_kit(kit_name, kit_version)
| mit | 4,460,714,536,449,111,000 | 28.029412 | 79 | 0.633739 | false |
100sms/yibai-python-sdk | demo/YibaiClientTest.py | 1 | 1483 | # encoding=utf8
import json
from yibai.api import *
server_url = 'https://sms.100sms.cn/api'
#此处为你的apikey,可登录https://web.100sms.cn/ 查看你的apikey
apikey = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
client = YibaiClient(server_url, apikey)
def test_sms_batch_submit():
try:
response = client.sms_batch_submit([
{'mobile': '187xxxxxxxx', 'message': '【亿佰云通讯】您的验证码是:1234'},
{'mobile': '186xxxxxxxx', 'message': '【亿佰云通讯】您的验证码是:5678'}
])
print json.dumps(response)
except YibaiApiError as e:
print 'YibaiApiError. code: {0}, message: {1}'.format(e.code, e.message.encode('utf8'))
except Exception as e:
print 'Unexpected error. ' + e.message
def test_sms_pull_status_report():
try:
response = client.sms_pull_status_report()
print json.dumps(response)
except YibaiApiError as e:
print 'YibaiApiError. code: {0}, message: {1}'.format(e.code, e.message.encode('utf8'))
except Exception as e:
print 'Unexpected error.' + e.message
def test_sms_pull_reply_message():
try:
response = client.sms_pull_reply_message()
print json.dumps(response)
except YibaiApiError as e:
print 'YibaiApiError. code: {0}, message: {1}'.format(e.code, e.message.encode('utf8'))
except Exception as e:
print 'Unexpected error.' + e.message
test_sms_pull_reply_message()
| mit | -6,034,683,769,823,462,000 | 30.133333 | 95 | 0.645967 | false |
tigerneil/chainer | tests/chainer_tests/functions_tests/math_tests/test_sum.py | 11 | 2009 | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestSum(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(numpy.float32)
self.gy = numpy.array(2, dtype=numpy.float32)
def check_forward(self, x_data, axis=None):
x = chainer.Variable(x_data)
y = functions.sum(x, axis=axis)
self.assertEqual(y.data.dtype, numpy.float32)
y_expect = self.x.sum(axis=axis)
gradient_check.assert_allclose(y_expect, y.data)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
for i in range(self.x.ndim):
self.check_forward(self.x, axis=i)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
for i in range(self.x.ndim):
self.check_forward(cuda.to_gpu(self.x), axis=i)
def check_backward(self, x_data, y_grad, axis=None):
x = chainer.Variable(x_data)
y = functions.sum(x, axis=axis)
y.grad = y_grad
y.backward()
gx_expect = numpy.full_like(self.x, self.gy)
gradient_check.assert_allclose(gx_expect, x.grad)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
for i in range(self.x.ndim):
gy = numpy.ones_like(self.x.sum(axis=i)) * self.gy
self.check_backward(self.x, gy, axis=i)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
for i in range(self.x.ndim):
gy = numpy.ones_like(self.x.sum(axis=i)) * self.gy
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(gy), axis=i)
testing.run_module(__name__, __file__)
| mit | 1,990,287,462,342,079,700 | 27.7 | 77 | 0.627178 | false |
ksiomelo/cubix | semantics/search.py | 1 | 10275 | #from org.openrdf.model import *
#from org.openrdf.query import *
#from org.openrdf.repository import *
#from org.openrdf.rio import *
#from org.openrdf.repository.sail import *
#from org.openrdf.repository.manager import RemoteRepositoryManager
#from org.openrdf.query.resultio import *
##from org.openrdf.repository.sail import SailRepository
##import org.openrdf.repository.sail.SailRepository
#from org.openrdf.sail.memory import *
# UNSUSED?
from SPARQLWrapper import SPARQLWrapper, SPARQLWrapper2, JSON
import urllib
import urllib2
from xml.dom.minidom import parseString
import httplib2
import simplejson
from fca.context import Context
class Semantic(object):
@staticmethod
def search_owlim(prefix, query):
prefix = """PREFIX :<http://www.w3.org/TR/2003/PR-owl-guide-20031209/wine#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX owl:<http://www.w3.org/2002/07/owl#>
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX xsd:<http://www.w3.org/2001/XMLSchema#>
"""
query = prefix + """ SELECT ?subj ?prop ?obj WHERE { ?o1 a ?class . ?subj a ?o1 . ?subj ?prop ?obj. }"""
# prefix = """PREFIX :<http://www.cubist_project.eu/test#>
# PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
# PREFIX owl:<http://www.w3.org/2002/07/owl#>
# PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
# PREFIX xsd:<http://www.w3.org/2001/XMLSchema#>
# """
#
# query = prefix + """ select distinct ?o1 ?a1 where {
# ?x1 rdf:type :Tissue ; rdfs:label ?o1 .
# ?x1 :has_theiler_stage :theiler_stage_TS07 .
# ?x2 rdf:type :Gene ; rdfs:label ?a1 .
# ?ta :in_tissue ?x1 ; :has_involved_gene ?x2 ; :has_strength :level_detected_derived . }"""
# repositoryManager = RemoteRepositoryManager("http://127.0.0.1:8080/openrdf-sesame")
# repositoryManager.initialize()
#
# #Get the repository to use
# repository = repositoryManager.getRepository("wine")
# repository.setPreferredTupleQueryResultFormat(TupleQueryResultFormat.JSON)
# repository.initialize()
# #Open a connection to this repository
# repositoryConnection = repository.getConnection()
# preparedQuery = repositoryConnection.prepareQuery(QueryLanguage.SPARQL, query);
# result = preparedQuery.evaluate()
#
# return render_to_response('fca/index.html', context_instance=RequestContext(request))
result = False
return result
@staticmethod
def search_sparqlwrapper(prefix, query):
sparql = SPARQLWrapper("http://127.0.0.1:8080/openrdf-sesame/repositories/wine2")
#sparql = SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery(prefix + """ """ + query)
sparql.setReturnFormat(JSON)
try:
results = sparql.query().convert()
except Exception, e:
results = {"error": e.message if e.message else str(e.reason) }
return results
@staticmethod
def sparql2context(results): #TODO min supp
objs = []
attrs = []
table = []
tempRel = dict([])
queryString = "SELECT ?subj ?prop ?obj WHERE { ?o1 a ?class . ?subj a ?o1 . ?subj ?prop ?obj. }"
sparql = SPARQLWrapper2("http://127.0.0.1:8080/openrdf-sesame/repositories/wine2")
# add a default graph, though that can also be in the query string
#sparql.addDefaultGraph("http://www.example.com/data.rdf")
sparql.setQuery(queryString)
ret = sparql.query()
print ret.variables # this is an array consisting of "subj" and "prop"
print str(len(ret.bindings))
for binding in ret.bindings :
# each binding is a dictionary. Let us just print the results
obj_name = binding[u"subj"].value.split("#")
attr_name = binding[u"prop"].value.split("#")
attr_value = binding[u"obj"].value.split("#")
if len(obj_name) < 2 or len(attr_name) < 2 or len(attr_value) < 2:
continue #skip literals? TODO skip uris
else:
obj_name = obj_name[1]
attr_name = attr_name[1]
attr_value = attr_value[1]
attr_val = attr_name + "-" + attr_value
# add obj
if not obj_name in objs :
objs.append(obj_name)
obj_idx = objs.index(obj_name)
#add attr
if not attr_val in attrs :
attrs.append(attr_val)
attr_idx = attrs.index(attr_val)
# if len(table) <= obj_idx:
# table.insert(obj_idx, [])
#table[obj_idx].insert(attr_idx,True)
if not obj_name in tempRel:
tempRel[obj_name] = []
tempRel[obj_name].append(attr_idx)
# FILTER OBJECTS WITH LOW SUPP # TODO to it in the context class TODO fazer para attr supp tbm
for obj_name in tempRel.keys():
if len(tempRel[obj_name]) < 2:
del tempRel[obj_name]
objs.remove(obj_name)
table = [None]*len(objs)
for obj_name in tempRel.keys():
row = [False]*len(attrs)
for attr_idx in tempRel[obj_name]:
row[attr_idx] = True
obj_idx = objs.index(obj_name)
table[obj_idx] = row
return Context(_table=table, _attributes=attrs, _objects=objs)
@staticmethod
def sparql2context2(results_table, col_types, hide_prefix): #TODO min supp
results_table = simplejson.loads(results_table)
col_types = simplejson.loads(col_types)
objs = []
attrs = []
table = []
tempRel = dict([])
obj_column = 'subj'
attr_column = 'prop'
attrval_column = None#'obj'
for x in col_types:
if col_types[x] == 'obj':
obj_column = x
elif col_types[x] == 'attr':
attr_column = x
elif col_types[x] == 'attrval':
attrval_column = x
for binding in results_table[u'results'][u'bindings'] :
obj_name = binding[obj_column][u"value"]
attr_name = binding[attr_column][u"value"]
attr_value = None
if attrval_column:
attr_value = binding[attrval_column][u"value"]
if hide_prefix:
if '#' in obj_name:
obj_name = obj_name.split("#")[1]
if '#' in attr_name:
attr_name = attr_name.split("#")[1]
if attr_value and '#' in attr_value:
attr_value = attr_value.split("#")[1]
if attr_value:
attr_val = attr_name + "-" + attr_value
else:
attr_val = attr_name
# add obj
if not obj_name in objs :
objs.append(obj_name)
obj_idx = objs.index(obj_name)
#add attr
if not attr_val in attrs :
attrs.append(attr_val)
attr_idx = attrs.index(attr_val)
# if len(table) <= obj_idx:
# table.insert(obj_idx, [])
#table[obj_idx].insert(attr_idx,True)
if not obj_name in tempRel:
tempRel[obj_name] = []
tempRel[obj_name].append(attr_idx)
# FILTER OBJECTS WITH LOW SUPP # TODO to it in the context class TODO fazer para attr supp tbm
for obj_name in tempRel.keys():
if len(tempRel[obj_name]) < 2:
del tempRel[obj_name]
objs.remove(obj_name)
table = [None]*len(objs)
for obj_name in tempRel.keys():
row = [False]*len(attrs)
for attr_idx in tempRel[obj_name]:
row[attr_idx] = True
obj_idx = objs.index(obj_name)
table[obj_idx] = row
return Context(_table=table, _attributes=attrs, _objects=objs)
@staticmethod
def search_http(prefix, query):
prefix = """PREFIX :<http://www.cubist_project.eu/HWU#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX owl:<http://www.w3.org/2002/07/owl#>
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX xsd:<http://www.w3.org/2001/XMLSchema#>
"""
thequery = prefix + """ select distinct ?o1 ?a1 where {
?x1 rdf:type :Tissue ; rdfs:label ?o1 .
?x1 :has_theiler_stage :theiler_stage_TS07 .
?x2 rdf:type :Gene ; rdfs:label ?a1 .
?ta :in_tissue ?x1 ; :has_involved_gene ?x2 ; :has_strength :level_detected_derived . }"""
query = 'SELECT DISTINCT ?type WHERE { ?thing a ?type . } ORDER BY ?type'
repository = 'cubix'
endpoint = "http://127.0.0.1:8080/openrdf-sesame/repositories/%s" % (repository)
params = { 'query': thequery }
headers = {
'content-type': 'application/x-www-form-urlencoded',
'accept': 'application/sparql-results+json'
}
(response, content) = httplib2.Http().request(endpoint, 'POST', urllib.urlencode(params), headers=headers)
return content
| apache-2.0 | -8,049,403,118,015,015,000 | 34.801394 | 114 | 0.511533 | false |
leopoul/ncclient | ncclient/operations/session.py | 6 | 1434 | # Copyright 2009 Shikhar Bhushan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Session-related NETCONF operations"
from ncclient.xml_ import *
from ncclient.operations.rpc import RPC
class CloseSession(RPC):
"`close-session` RPC. The connection to NETCONF server is also closed."
def request(self):
"Request graceful termination of the NETCONF session, and also close the transport."
try:
return self._request(new_ele("close-session"))
finally:
self.session.close()
class KillSession(RPC):
"`kill-session` RPC."
def request(self, session_id):
"""Force the termination of a NETCONF session (not the current one!)
*session_id* is the session identifier of the NETCONF session to be terminated as a string
"""
node = new_ele("kill-session")
sub_ele(node, "session-id").text = session_id
return self._request(node)
| apache-2.0 | -8,597,631,695,114,993,000 | 31.590909 | 98 | 0.699442 | false |
snegovick/bcam | bcam/tool_op_offset_follow.py | 1 | 8940 | from __future__ import absolute_import, division, print_function
import math
from bcam.tool_operation import ToolOperation, TOEnum
from bcam.tool_abstract_follow import TOAbstractFollow
from bcam.generalized_setting import TOSetting
from bcam.calc_utils import (find_vect_normal, mk_vect, normalize, vect_sum,
vect_len, scale_vect, pt_to_pt_dist)
from bcam.elements import ELine, EArc, ECircle
from bcam.singleton import Singleton
from logging import debug, info, warning, error, critical
from bcam.util import dbgfname
import cairo
import json
class TOOffsetFollow(TOAbstractFollow):
def __init__(self, state, depth=0, index=0, offset=0, data=None):
super(TOAbstractFollow, self).__init__(state)
self.state = state
self.name = TOEnum.offset_follow
if data == None:
self.index = index
self.depth = depth
self.offset = offset
self.path = None
self.offset_path = None
else:
self.deserialize(data)
self.display_name = TOEnum.offset_follow+" "+str(self.index)
def serialize(self):
return {'type': 'tooffsetfollow', 'path_ref': self.path.name, 'depth': self.depth, 'index': self.index, 'offset': self.offset}
def deserialize(self, data):
self.depth = data["depth"]
self.index = data["index"]
self.offset = data["offset"]
p = self.try_load_path_by_name(data["path_ref"], Singleton.state)
if p:
self.apply(p)
def get_settings_list(self):
settings_lst = [TOSetting("float", 0, Singleton.state.settings.material.thickness, self.depth, "Depth, mm: ", self.set_depth_s),
TOSetting("float", None, None, self.offset, "Offset, mm: ", self.set_offset_s)]
return settings_lst
def set_depth_s(self, setting):
self.depth = setting.new_value
def set_offset_s(self, setting):
self.offset = setting.new_value
self.offset_path = self.__build_offset_path(self.path)
self.draw_list = self.offset_path
def __remove_intersected_parts(self, p):
dbgfname()
return p
def __build_offset_path(self, p):
dbgfname()
offset_path = self.__build_offset_path_normals(p)
debug(" checking offset path")
if (offset_path != None):
out = self.__remove_intersected_parts(offset_path)
return out
debug(" offset path is None")
return None
def __two_point_offset(self, prev, next):
dbgfname()
nsc = next.start
nec = next.end
sc = prev.start
ec = prev.end
nnsn = next.get_normalized_start_normal()
nen = prev.get_normalized_end_normal()
e_s_pt = [nen[0]*self.offset+sc[0], nen[1]*self.offset+sc[1], 0]
e_e_pt = [nen[0]*self.offset+ec[0], nen[1]*self.offset+ec[1], 0]
ne_s_pt = [nnsn[0]*self.offset+nsc[0], nnsn[1]*self.offset+nsc[1], 0]
ne_e_pt = [nnsn[0]*self.offset+nec[0], nnsn[1]*self.offset+nec[1], 0]
ne_dy = ne_e_pt[1]-ne_s_pt[1]
ne_dx = ne_e_pt[0]-ne_s_pt[0]
e_dy = e_e_pt[1]-e_s_pt[1]
e_dx = e_e_pt[0]-e_s_pt[0]
if ((e_dx == 0) and (ne_dy == 0)):
x = e_e_pt[0]
y = ne_e_pt[1]
debug(" case 1, x: "+str(x)+" y: "+str(y))
elif((e_dy == 0) and (ne_dx == 0)):
x = ne_e_pt[0]
y = e_e_pt[1]
debug(" case 2, x: "+str(x)+" y: "+str(y))
elif (((e_dy == 0) and (ne_dy == 0)) or ((e_dx == 0) and (ne_dx == 0))): #parallel lines
x = e_e_pt[0]
y = e_e_pt[1]
else:
a = (ne_e_pt[0]*ne_s_pt[1]-ne_s_pt[0]*ne_e_pt[1])
b = (e_e_pt[0]*e_s_pt[1]-e_s_pt[0]*e_e_pt[1])
debug(" a: "+str(a)+" b: "+str(b)+" e_dx: "+str(e_dx)+" e_dy: "+str(e_dy)+" ne_dx: "+str(ne_dx)+" ne_dy: "+str(ne_dy))
x = (a*e_dx-b*ne_dx)/(e_dy*ne_dx-ne_dy*e_dx)
if e_dx == 0:
y = (x*ne_dy+a)/ne_dx
else:
y = (x*e_dy+b)/e_dx
debug(" case 3, x: "+str(x)+" y: "+str(y))
e_pt = [x, y]
return e_pt
def __build_offset_path_normals(self, p):
dbgfname()
new_elements = []
elements = p.get_ordered_elements()
if len(elements)==0:
return None
if len(elements)==1:
e = elements[0]
if type(e).__name__ == "ECircle":
new_elements.append(ECircle(e.center, e.radius+self.offset, e.lt, e.color, None))
elif type(e).__name__ == "ELine":
el = elements[0]
s = el.start
e = el.end
nsn = elements[0].get_normalized_start_normal()
s_pt = [nsn[0]*self.offset+s[0], nsn[1]*self.offset+s[1], 0]
e_pt = [nsn[0]*self.offset+e[0], nsn[1]*self.offset+e[1], 0]
ne = ELine(s_pt, e_pt, el.lt, el.color)
new_elements.append(ne)
else:
s = elements[0].start
e = elements[0].end
nsn = elements[0].get_normalized_start_normal()
s_pt = [nsn[0]*self.offset+s[0], nsn[1]*self.offset+s[1], 0]
#preprocess, convert arcs to sequencies of lines
converted_elements = []
for i, e in enumerate(elements):
if type(e).__name__ == "EArc":
sa = e.startangle
ea = e.endangle
if sa > ea:
ea+=math.pi*2
da = (ea - sa)
n_steps = int(da/0.1)
s_pt = (e.center[0]+math.cos(sa)*e.radius, e.center[1]+math.sin(sa)*e.radius)
debug(" splitting arc, start angle: "+str(sa)+" start_pt: "+str(s_pt))
for i in range(1,n_steps):
a = sa+i*0.1
e_pt = (e.center[0]+math.cos(a)*e.radius, e.center[1]+math.sin(a)*e.radius)
ne = ELine(s_pt, e_pt, e.lt, e.color)
debug(" angle: "+str(a)+" line: "+str(s_pt)+" "+str(e_pt))
s_pt = e_pt
converted_elements.append(ne)
e_pt = e.end
ne = ELine(s_pt, e_pt, e.lt, e.color)
converted_elements.append(ne)
else:
converted_elements.append(e)
elements = converted_elements
s_pt = None
#s_pt = [nsn[0]*self.offset+s[0], nsn[1]*self.offset+s[1], 0]
for i, e in enumerate(elements):
sc = e.start # current start
ec = e.end # current end
if s_pt == None:
if pt_to_pt_dist(sc, elements[-1].end)<0.001:
debug(" s_pt")
s_pt = self.__two_point_offset(elements[-1], e)
else:
nsn = e.get_normalized_start_normal()
#n = vect_sum(nsn, nen) # sum of new start normal and prev end normal
n = nsn
shift = sc
s_pt = [n[0]*self.offset+shift[0], n[1]*self.offset+shift[1], 0]
if i<len(elements)-1:
e_pt = self.__two_point_offset(e, elements[i+1])
else:
nen = e.get_normalized_end_normal()
n = nen
if pt_to_pt_dist(ec, elements[0].start)<0.001:
e_pt = self.__two_point_offset(e, elements[0])
else:
shift = ec
e_pt = [n[0]*self.offset+shift[0], n[1]*self.offset+shift[1], 0]
if type(e).__name__ == "ELine":
ne = ELine(s_pt, e_pt, e.lt, e.color)
elif type(e).__name__ == "EArc":
ne = EArc(center=e.center, lt=e.lt, start=s_pt, end=e_pt, color=e.color)
new_elements.append(ne)
s_pt = e_pt
e_pt = None
offset_path = new_elements
debug(" offset_path: "+str(offset_path))
return offset_path
def apply(self, path):
dbgfname()
debug(" apply path: "+str(path))
if path.operations[self.name]:
debug(" path ordered elements: "+str(path.ordered_elements))
if path.ordered_elements!=None:
self.path = path
self.offset_path = self.__build_offset_path(path)
self.draw_list = self.offset_path
return True
return False
def get_gcode(self):
return self.get_gcode_base(self.draw_list)
def __repr__(self):
return "<Offset follow>"
| gpl-3.0 | -2,979,988,364,097,858,600 | 37.869565 | 136 | 0.482662 | false |
opennode/waldur-mastermind | src/waldur_mastermind/invoices/handlers.py | 1 | 6194 | import datetime
import logging
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from rest_framework.exceptions import ValidationError
from waldur_core.core import utils as core_utils
from waldur_mastermind.invoices import signals as cost_signals
from waldur_mastermind.marketplace import models as marketplace_models
from . import log, models, registrators
logger = logging.getLogger(__name__)
def log_invoice_state_transition(sender, instance, created=False, **kwargs):
if created:
return
state = instance.state
if state == models.Invoice.States.PENDING or state == instance.tracker.previous(
'state'
):
return
if state == models.Invoice.States.CREATED:
log.event_logger.invoice.info(
'Invoice for customer {customer_name} has been created.',
event_type='invoice_created',
event_context={
'month': instance.month,
'year': instance.year,
'customer': instance.customer,
},
)
elif state == models.Invoice.States.PAID:
log.event_logger.invoice.info(
'Invoice for customer {customer_name} has been paid.',
event_type='invoice_paid',
event_context={
'month': instance.month,
'year': instance.year,
'customer': instance.customer,
},
)
elif state == models.Invoice.States.CANCELED:
log.event_logger.invoice.info(
'Invoice for customer {customer_name} has been canceled.',
event_type='invoice_canceled',
event_context={
'month': instance.month,
'year': instance.year,
'customer': instance.customer,
},
)
def set_tax_percent_on_invoice_creation(sender, instance, **kwargs):
if instance.pk is not None:
return
instance.tax_percent = instance.customer.default_tax_percent
def set_project_name_on_invoice_item_creation(
sender, instance, created=False, **kwargs
):
if created and instance.project:
item = instance
item.project_name = item.project.name
item.project_uuid = item.project.uuid.hex
item.save(update_fields=('project_name', 'project_uuid'))
def update_invoice_item_on_project_name_update(sender, instance, **kwargs):
project = instance
if not project.tracker.has_changed('name'):
return
query = Q(project=project, invoice__state=models.Invoice.States.PENDING)
for item in models.InvoiceItem.objects.filter(query).only('pk'):
item.project_name = project.name
item.save(update_fields=['project_name'])
def emit_invoice_created_event(sender, instance, created=False, **kwargs):
if created:
return
state = instance.state
if state != models.Invoice.States.CREATED or state == instance.tracker.previous(
'state'
):
return
cost_signals.invoice_created.send(
sender=models.Invoice,
invoice=instance,
issuer_details=settings.WALDUR_INVOICES['ISSUER_DETAILS'],
)
def prevent_deletion_of_customer_with_invoice(sender, instance, user, **kwargs):
if user.is_staff:
return
PENDING = models.Invoice.States.PENDING
for invoice in models.Invoice.objects.filter(customer=instance):
if invoice.state != PENDING or invoice.price > 0:
raise ValidationError(
_('Can\'t delete organization with invoice %s.') % invoice
)
def update_current_cost_when_invoice_item_is_updated(
sender, instance, created=False, **kwargs
):
invoice_item = instance
if created or set(invoice_item.tracker.changed()) & {
'start',
'end',
'quantity',
'unit_price',
}:
transaction.on_commit(lambda: invoice_item.invoice.update_current_cost())
def update_current_cost_when_invoice_item_is_deleted(sender, instance, **kwargs):
def update_invoice():
try:
instance.invoice.update_current_cost()
except ObjectDoesNotExist:
# It is okay to skip cache invalidation if invoice has been already removed
pass
transaction.on_commit(update_invoice)
def projects_customer_has_been_changed(
sender, project, old_customer, new_customer, created=False, **kwargs
):
try:
today = timezone.now()
date = core_utils.month_start(today)
invoice = models.Invoice.objects.get(
customer=old_customer,
state=models.Invoice.States.PENDING,
month=date.month,
year=date.year,
)
except models.Invoice.DoesNotExist:
return
new_invoice, create = registrators.RegistrationManager.get_or_create_invoice(
new_customer, date
)
if create:
invoice.items.filter(project=project).delete()
else:
invoice.items.filter(project=project).update(invoice=new_invoice)
def create_recurring_usage_if_invoice_has_been_created(
sender, instance, created=False, **kwargs
):
if not created:
return
invoice = instance
now = timezone.now()
prev_month = (now.replace(day=1) - datetime.timedelta(days=1)).date()
prev_month_start = prev_month.replace(day=1)
usages = marketplace_models.ComponentUsage.objects.filter(
resource__project__customer=invoice.customer,
recurring=True,
billing_period__gte=prev_month_start,
).exclude(resource__state=marketplace_models.Resource.States.TERMINATED)
if not usages:
return
for usage in usages:
marketplace_models.ComponentUsage.objects.create(
resource=usage.resource,
component=usage.component,
usage=usage.usage,
description=usage.description,
date=now,
plan_period=usage.plan_period,
recurring=usage.recurring,
billing_period=core_utils.month_start(now),
)
| mit | -8,756,834,033,047,450,000 | 29.97 | 87 | 0.642073 | false |
TheTimmy/spack | lib/spack/spack/cmd/url.py | 2 | 12655 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from __future__ import division, print_function
from collections import defaultdict
import spack
from llnl.util import tty
from spack.url import *
from spack.util.web import find_versions_of_archive
from spack.util.naming import simplify_name
description = "debugging tool for url parsing"
section = "developer"
level = "long"
def setup_parser(subparser):
sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='subcommand')
# Parse
parse_parser = sp.add_parser('parse', help='attempt to parse a url')
parse_parser.add_argument(
'url',
help='url to parse')
parse_parser.add_argument(
'-s', '--spider', action='store_true',
help='spider the source page for versions')
# List
list_parser = sp.add_parser('list', help='list urls in all packages')
list_parser.add_argument(
'-c', '--color', action='store_true',
help='color the parsed version and name in the urls shown '
'(versions will be cyan, name red)')
list_parser.add_argument(
'-e', '--extrapolation', action='store_true',
help='color the versions used for extrapolation as well '
'(additional versions will be green, names magenta)')
excl_args = list_parser.add_mutually_exclusive_group()
excl_args.add_argument(
'-n', '--incorrect-name', action='store_true',
help='only list urls for which the name was incorrectly parsed')
excl_args.add_argument(
'-N', '--correct-name', action='store_true',
help='only list urls for which the name was correctly parsed')
excl_args.add_argument(
'-v', '--incorrect-version', action='store_true',
help='only list urls for which the version was incorrectly parsed')
excl_args.add_argument(
'-V', '--correct-version', action='store_true',
help='only list urls for which the version was correctly parsed')
# Summary
sp.add_parser(
'summary',
help='print a summary of how well we are parsing package urls')
def url(parser, args):
action = {
'parse': url_parse,
'list': url_list,
'summary': url_summary
}
action[args.subcommand](args)
def url_parse(args):
url = args.url
tty.msg('Parsing URL: {0}'.format(url))
print()
ver, vs, vl, vi, vregex = parse_version_offset(url)
tty.msg('Matched version regex {0:>2}: r{1!r}'.format(vi, vregex))
name, ns, nl, ni, nregex = parse_name_offset(url, ver)
tty.msg('Matched name regex {0:>2}: r{1!r}'.format(ni, nregex))
print()
tty.msg('Detected:')
try:
print_name_and_version(url)
except UrlParseError as e:
tty.error(str(e))
print(' name: {0}'.format(name))
print(' version: {0}'.format(ver))
print()
tty.msg('Substituting version 9.9.9b:')
newurl = substitute_version(url, '9.9.9b')
print_name_and_version(newurl)
if args.spider:
print()
tty.msg('Spidering for versions:')
versions = find_versions_of_archive(url)
if not versions:
print(' Found no versions for {0}'.format(name))
return
max_len = max(len(str(v)) for v in versions)
for v in sorted(versions):
print('{0:{1}} {2}'.format(v, max_len, versions[v]))
def url_list(args):
urls = set()
# Gather set of URLs from all packages
for pkg in spack.repo.all_packages():
url = getattr(pkg.__class__, 'url', None)
urls = url_list_parsing(args, urls, url, pkg)
for params in pkg.versions.values():
url = params.get('url', None)
urls = url_list_parsing(args, urls, url, pkg)
# Print URLs
for url in sorted(urls):
if args.color or args.extrapolation:
print(color_url(url, subs=args.extrapolation, errors=True))
else:
print(url)
# Return the number of URLs that were printed, only for testing purposes
return len(urls)
def url_summary(args):
# Collect statistics on how many URLs were correctly parsed
total_urls = 0
correct_names = 0
correct_versions = 0
# Collect statistics on which regexes were matched and how often
name_regex_dict = dict()
name_count_dict = defaultdict(int)
version_regex_dict = dict()
version_count_dict = defaultdict(int)
tty.msg('Generating a summary of URL parsing in Spack...')
# Loop through all packages
for pkg in spack.repo.all_packages():
urls = set()
url = getattr(pkg.__class__, 'url', None)
if url:
urls.add(url)
for params in pkg.versions.values():
url = params.get('url', None)
if url:
urls.add(url)
# Calculate statistics
for url in urls:
total_urls += 1
# Parse versions
version = None
try:
version, vs, vl, vi, vregex = parse_version_offset(url)
version_regex_dict[vi] = vregex
version_count_dict[vi] += 1
if version_parsed_correctly(pkg, version):
correct_versions += 1
except UndetectableVersionError:
pass
# Parse names
try:
name, ns, nl, ni, nregex = parse_name_offset(url, version)
name_regex_dict[ni] = nregex
name_count_dict[ni] += 1
if name_parsed_correctly(pkg, name):
correct_names += 1
except UndetectableNameError:
pass
print()
print(' Total URLs found: {0}'.format(total_urls))
print(' Names correctly parsed: {0:>4}/{1:>4} ({2:>6.2%})'.format(
correct_names, total_urls, correct_names / total_urls))
print(' Versions correctly parsed: {0:>4}/{1:>4} ({2:>6.2%})'.format(
correct_versions, total_urls, correct_versions / total_urls))
print()
tty.msg('Statistics on name regular expressions:')
print()
print(' Index Count Regular Expression')
for ni in name_regex_dict:
print(' {0:>3}: {1:>6} r{2!r}'.format(
ni, name_count_dict[ni], name_regex_dict[ni]))
print()
tty.msg('Statistics on version regular expressions:')
print()
print(' Index Count Regular Expression')
for vi in version_regex_dict:
print(' {0:>3}: {1:>6} r{2!r}'.format(
vi, version_count_dict[vi], version_regex_dict[vi]))
print()
# Return statistics, only for testing purposes
return (total_urls, correct_names, correct_versions,
name_count_dict, version_count_dict)
def print_name_and_version(url):
"""Prints a URL. Underlines the detected name with dashes and
the detected version with tildes.
Args:
url (str): The url to parse
"""
name, ns, nl, ntup, ver, vs, vl, vtup = substitution_offsets(url)
underlines = [' '] * max(ns + nl, vs + vl)
for i in range(ns, ns + nl):
underlines[i] = '-'
for i in range(vs, vs + vl):
underlines[i] = '~'
print(' {0}'.format(url))
print(' {0}'.format(''.join(underlines)))
def url_list_parsing(args, urls, url, pkg):
"""Helper function for :func:`url_list`.
Args:
args (argparse.Namespace): The arguments given to ``spack url list``
urls (set): List of URLs that have already been added
url (str or None): A URL to potentially add to ``urls`` depending on
``args``
pkg (spack.package.PackageBase): The Spack package
Returns:
set: The updated set of ``urls``
"""
if url:
if args.correct_name or args.incorrect_name:
# Attempt to parse the name
try:
name = parse_name(url)
if (args.correct_name and
name_parsed_correctly(pkg, name)):
# Add correctly parsed URLs
urls.add(url)
elif (args.incorrect_name and
not name_parsed_correctly(pkg, name)):
# Add incorrectly parsed URLs
urls.add(url)
except UndetectableNameError:
if args.incorrect_name:
# Add incorrectly parsed URLs
urls.add(url)
elif args.correct_version or args.incorrect_version:
# Attempt to parse the version
try:
version = parse_version(url)
if (args.correct_version and
version_parsed_correctly(pkg, version)):
# Add correctly parsed URLs
urls.add(url)
elif (args.incorrect_version and
not version_parsed_correctly(pkg, version)):
# Add incorrectly parsed URLs
urls.add(url)
except UndetectableVersionError:
if args.incorrect_version:
# Add incorrectly parsed URLs
urls.add(url)
else:
urls.add(url)
return urls
def name_parsed_correctly(pkg, name):
"""Determine if the name of a package was correctly parsed.
Args:
pkg (spack.package.PackageBase): The Spack package
name (str): The name that was extracted from the URL
Returns:
bool: True if the name was correctly parsed, else False
"""
pkg_name = pkg.name
name = simplify_name(name)
# After determining a name, `spack create` determines a build system.
# Some build systems prepend a special string to the front of the name.
# Since this can't be guessed from the URL, it would be unfair to say
# that these names are incorrectly parsed, so we remove them.
if pkg_name.startswith('r-'):
pkg_name = pkg_name[2:]
elif pkg_name.startswith('py-'):
pkg_name = pkg_name[3:]
elif pkg_name.startswith('perl-'):
pkg_name = pkg_name[5:]
elif pkg_name.startswith('octave-'):
pkg_name = pkg_name[7:]
return name == pkg_name
def version_parsed_correctly(pkg, version):
"""Determine if the version of a package was correctly parsed.
Args:
pkg (spack.package.PackageBase): The Spack package
version (str): The version that was extracted from the URL
Returns:
bool: True if the name was correctly parsed, else False
"""
version = remove_separators(version)
# If the version parsed from the URL is listed in a version()
# directive, we assume it was correctly parsed
for pkg_version in pkg.versions:
pkg_version = remove_separators(pkg_version)
if pkg_version == version:
return True
return False
def remove_separators(version):
"""Removes separator characters ('.', '_', and '-') from a version.
A version like 1.2.3 may be displayed as 1_2_3 in the URL.
Make sure 1.2.3, 1-2-3, 1_2_3, and 123 are considered equal.
Unfortunately, this also means that 1.23 and 12.3 are equal.
Args:
version (str or Version): A version
Returns:
str: The version with all separator characters removed
"""
version = str(version)
version = version.replace('.', '')
version = version.replace('_', '')
version = version.replace('-', '')
return version
| lgpl-2.1 | -4,598,628,802,133,864,400 | 31.87013 | 78 | 0.59194 | false |
dotunolafunmiloye/spark | python/pyspark/files.py | 10 | 1896 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
class SparkFiles(object):
"""
Resolves paths to files added through
L{SparkContext.addFile()<pyspark.context.SparkContext.addFile>}.
SparkFiles contains only classmethods; users should not create SparkFiles
instances.
"""
_root_directory = None
_is_running_on_worker = False
_sc = None
def __init__(self):
raise NotImplementedError("Do not construct SparkFiles objects")
@classmethod
def get(cls, filename):
"""
Get the absolute path of a file added through C{SparkContext.addFile()}.
"""
path = os.path.join(SparkFiles.getRootDirectory(), filename)
return os.path.abspath(path)
@classmethod
def getRootDirectory(cls):
"""
Get the root directory that contains files added through
C{SparkContext.addFile()}.
"""
if cls._is_running_on_worker:
return cls._root_directory
else:
# This will have to change if we support multiple SparkContexts:
return cls._sc._jvm.org.apache.spark.SparkFiles.getRootDirectory()
| apache-2.0 | 6,849,211,554,122,222,000 | 33.472727 | 80 | 0.694093 | false |
dpmatthews/cylc | lib/cylc/subprocpool.py | 2 | 14792 | #!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2019 NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Manage queueing and pooling of subprocesses for the suite server program."""
from collections import deque
import json
import os
import select
from signal import SIGKILL
import sys
from tempfile import SpooledTemporaryFile
from threading import RLock
from time import time
from cylc import LOG
from cylc.cfgspec.glbl_cfg import glbl_cfg
from cylc.cylc_subproc import procopen
from cylc.wallclock import get_current_time_string
_XTRIG_FUNCS = {}
def get_func(func_name, src_dir):
"""Find and return an xtrigger function from a module of the same name.
Can be in <src_dir>/lib/python, CYLC_MOD_LOC, or in Python path.
Suite source directory passed in because this is executed in an independent
process in the command pool - and therefore doesn't know about the suite.
"""
if func_name in _XTRIG_FUNCS:
return _XTRIG_FUNCS[func_name]
# First look in <src-dir>/lib/python.
sys.path.insert(0, os.path.join(src_dir, 'lib', 'python'))
mod_name = func_name
try:
mod_by_name = __import__(mod_name, fromlist=[mod_name])
except ImportError:
# Then look in built-in xtriggers.
mod_name = "%s.%s" % ("cylc.xtriggers", func_name)
try:
mod_by_name = __import__(mod_name, fromlist=[mod_name])
except ImportError:
raise
try:
_XTRIG_FUNCS[func_name] = getattr(mod_by_name, func_name)
except AttributeError:
# Module func_name has no function func_name.
raise
return _XTRIG_FUNCS[func_name]
def run_function(func_name, json_args, json_kwargs, src_dir):
"""Run a Python function in the process pool.
func_name(*func_args, **func_kwargs)
Redirect any function stdout to stderr (and suite log in debug mode).
Return value printed to stdout as a JSON string - allows use of the
existing process pool machinery as-is. src_dir is for local modules.
"""
func_args = json.loads(json_args)
func_kwargs = json.loads(json_kwargs)
# Find and import then function.
func = get_func(func_name, src_dir)
# Redirect stdout to stderr.
orig_stdout = sys.stdout
sys.stdout = sys.stderr
res = func(*func_args, **func_kwargs)
# Restore stdout.
sys.stdout = orig_stdout
# Write function return value as JSON to stdout.
sys.stdout.write(json.dumps(res))
class SubProcPool(object):
"""Manage queueing and pooling of subprocesses.
This is mainly used by the main loop of the suite server program, although
the SubProcPool.run_command can be used as a standalone utility function
to run the command in a cylc.subprocctx.SubProcContext.
A command to run under a subprocess in the pool is expected to be wrapped
using a cylc.subprocctx.SubProcContext object. The caller will add the
context object using the SubProcPool.put_command method. A callback can
be specified to notify the caller on exit of the subprocess.
A command launched by the pool is expected to write to STDOUT and STDERR.
These are captured while the command runs and/or when the command exits.
The contents are appended to the `.out` and `.err` attributes of the
SubProcContext object as they are read. STDIN can also be specified for the
command. This is currently fed into the command using a temporary file.
Note: For a cylc command that uses `cylc.option_parsers.CylcOptionParser`,
the default logging handler writes to the STDERR via a StreamHandler.
Therefore, log messages will only be written to the suite log by the
callback function when the command exits (and only if the callback function
has the logic to do so).
"""
ERR_SUITE_STOPPING = 'suite stopping, command not run'
JOBS_SUBMIT = 'jobs-submit'
POLLREAD = select.POLLIN | select.POLLPRI
RET_CODE_SUITE_STOPPING = 999
def __init__(self):
self.size = glbl_cfg().get(['process pool size'])
self.proc_pool_timeout = glbl_cfg().get(['process pool timeout'])
self.closed = False # Close queue
self.stopping = False # No more job submit if True
# .stopping may be set by an API command in a different thread
self.stopping_lock = RLock()
self.queuings = deque()
self.runnings = []
try:
self.pipepoller = select.poll()
except AttributeError: # select.poll not implemented for this OS
self.pipepoller = None
def close(self):
"""Close pool."""
self.set_stopping()
self.closed = True
@staticmethod
def get_temporary_file():
"""Return a SpooledTemporaryFile for feeding data to command STDIN."""
return SpooledTemporaryFile()
def is_not_done(self):
"""Return True if queuings or runnings not empty."""
return self.queuings or self.runnings
def _is_stopping(self):
"""Return True if .stopping is True."""
stopping = False
with self.stopping_lock:
stopping = self.stopping
return stopping
def _proc_exit(self, proc, err_xtra, ctx, callback, callback_args):
"""Get ret_code, out, err of exited command, and call its callback."""
ctx.ret_code = proc.wait()
out, err = (f.decode() for f in proc.communicate())
if out:
if ctx.out is None:
ctx.out = ''
ctx.out += out
if err + err_xtra:
if ctx.err is None:
ctx.err = ''
ctx.err += err + err_xtra
self._run_command_exit(ctx, callback, callback_args)
def process(self):
"""Process done child processes and submit more."""
# Handle child processes that are done
runnings = []
for proc, ctx, callback, callback_args in self.runnings:
# Command completed/exited
if proc.poll() is not None:
self._proc_exit(proc, "", ctx, callback, callback_args)
continue
# Command timed out, kill it
if time() > ctx.timeout:
try:
os.killpg(proc.pid, SIGKILL) # kill process group
except OSError:
# must have just exited, since poll.
err_xtra = ""
else:
err_xtra = "\nkilled on timeout (%s)" % (
self.proc_pool_timeout)
self._proc_exit(proc, err_xtra, ctx, callback, callback_args)
continue
# Command still running, see if STDOUT/STDERR are readable or not
runnings.append([proc, ctx, callback, callback_args])
# Unblock proc's STDOUT/STDERR if necessary. Otherwise, a full
# STDOUT or STDERR may stop command from proceeding.
self._poll_proc_pipes(proc, ctx)
# Update list of running items
self.runnings[:] = runnings
# Create more child processes, if items in queue and space in pool
stopping = self._is_stopping()
while self.queuings and len(self.runnings) < self.size:
ctx, callback, callback_args = self.queuings.popleft()
if stopping and ctx.cmd_key == self.JOBS_SUBMIT:
ctx.err = self.ERR_SUITE_STOPPING
ctx.ret_code = self.RET_CODE_SUITE_STOPPING
self._run_command_exit(ctx)
else:
proc = self._run_command_init(ctx, callback, callback_args)
if proc is not None:
ctx.timeout = time() + self.proc_pool_timeout
self.runnings.append([proc, ctx, callback, callback_args])
def put_command(self, ctx, callback=None, callback_args=None):
"""Queue a new shell command to execute.
Arguments:
ctx (cylc.subprocctx.SubProcContext):
A context object containing the command to run and its status.
callback (callable):
Function to call back when command exits or on error.
Should have signature:
callback(ctx, *callback_args) -> None
callback_args (list):
Extra arguments to the callback function.
"""
if (self.closed or self._is_stopping() and
ctx.cmd_key == self.JOBS_SUBMIT):
ctx.err = self.ERR_SUITE_STOPPING
ctx.ret_code = self.RET_CODE_SUITE_STOPPING
self._run_command_exit(ctx, callback, callback_args)
else:
self.queuings.append([ctx, callback, callback_args])
@classmethod
def run_command(cls, ctx):
"""Execute command in ctx and capture its output and exit status.
Arguments:
ctx (cylc.subprocctx.SubProcContext):
A context object containing the command to run and its status.
"""
proc = cls._run_command_init(ctx)
if proc:
ctx.out, ctx.err = (f.decode() for f in proc.communicate())
ctx.ret_code = proc.wait()
cls._run_command_exit(ctx)
def set_stopping(self):
"""Stop job submission."""
with self.stopping_lock:
self.stopping = True
def terminate(self):
"""Drain queue, and kill and process remaining child processes."""
self.close()
# Drain queue
while self.queuings:
ctx = self.queuings.popleft()[0]
ctx.err = self.ERR_SUITE_STOPPING
ctx.ret_code = self.RET_CODE_SUITE_STOPPING
self._run_command_exit(ctx)
# Kill remaining processes
for value in self.runnings:
proc = value[0]
if proc:
os.killpg(proc.pid, SIGKILL)
# Wait for child processes
self.process()
def _poll_proc_pipes(self, proc, ctx):
"""Poll STDOUT/ERR of proc and read some data if possible.
This helps to unblock the command by unblocking its pipes.
"""
if self.pipepoller is None:
return # select.poll not supported on this OS
for handle in [proc.stdout, proc.stderr]:
if not handle.closed:
self.pipepoller.register(handle.fileno(), self.POLLREAD)
while True:
fileno_list = [
fileno
for fileno, event in self.pipepoller.poll(0.0)
if event & self.POLLREAD]
if not fileno_list:
# Nothing readable
break
for fileno in fileno_list:
# If a file handle is readable, read something from it, add
# results into the command context object's `.out` or `.err`,
# whichever is relevant. To avoid blocking:
# 1. Use `os.read` here instead of `file.read` to avoid any
# buffering that may cause the file handle to block.
# 2. Call os.read only once after a poll. Poll again before
# another read - otherwise the os.read call may block.
try:
data = os.read(fileno, 65536).decode() # 64K
except OSError:
continue
if fileno == proc.stdout.fileno():
if ctx.out is None:
ctx.out = ''
ctx.out += data
elif fileno == proc.stderr.fileno():
if ctx.err is None:
ctx.err = ''
ctx.err += data
self.pipepoller.unregister(proc.stdout.fileno())
self.pipepoller.unregister(proc.stderr.fileno())
@classmethod
def _run_command_init(cls, ctx, callback=None, callback_args=None):
"""Prepare and launch shell command in ctx."""
try:
if ctx.cmd_kwargs.get('stdin_files'):
if len(ctx.cmd_kwargs['stdin_files']) > 1:
stdin_file = cls.get_temporary_file()
for file_ in ctx.cmd_kwargs['stdin_files']:
if hasattr(file_, 'read'):
stdin_file.write(file_.read())
else:
stdin_file.write(open(file_, 'rb').read())
stdin_file.seek(0)
elif hasattr(ctx.cmd_kwargs['stdin_files'][0], 'read'):
stdin_file = ctx.cmd_kwargs['stdin_files'][0]
else:
stdin_file = open(
ctx.cmd_kwargs['stdin_files'][0], 'rb')
elif ctx.cmd_kwargs.get('stdin_str'):
stdin_file = cls.get_temporary_file()
stdin_file.write(ctx.cmd_kwargs.get('stdin_str').encode())
stdin_file.seek(0)
else:
stdin_file = open(os.devnull)
proc = procopen(
ctx.cmd, stdin=stdin_file, stdoutpipe=True, stderrpipe=True,
# Execute command as a process group leader,
# so we can use "os.killpg" to kill the whole group.
preexec_fn=os.setpgrp,
env=ctx.cmd_kwargs.get('env'),
usesh=ctx.cmd_kwargs.get('shell'))
# calls to open a shell are aggregated in cylc_subproc.procopen()
# with logging for what is calling it and the commands given
except (IOError, OSError) as exc:
if exc.filename is None:
exc.filename = ctx.cmd[0]
LOG.exception(exc)
ctx.ret_code = 1
ctx.err = str(exc)
cls._run_command_exit(ctx, callback, callback_args)
return None
else:
LOG.debug(ctx.cmd)
return proc
@classmethod
def _run_command_exit(cls, ctx, callback=None, callback_args=None):
"""Process command completion."""
ctx.timestamp = get_current_time_string()
if callable(callback):
if not callback_args:
callback_args = []
callback(ctx, *callback_args)
| gpl-3.0 | 1,397,070,487,950,449,200 | 39.526027 | 79 | 0.591333 | false |
onurozuduru/graph-of-likes | generate_graph.py | 1 | 7131 | ##################################################################################
#File: generate_graph.py
#Author: Onur Ozuduru
# e-mail: onur.ozuduru { at } gmail.com
# github: github.com/onurozuduru
# twitter: twitter.com/OnurOzuduru
#
#License: The MIT License (MIT)
#
# Copyright (c) 2016 Onur Ozuduru
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#NOTE: Instagram API has its own license and platform policy,
# for more information about it please visit:
# https://www.instagram.com/developer/.
##################################################################################
from instagram.client import InstagramAPI
from pattern.graph import Graph
import lxml.html as L
access_token = "YOUR ACCESS TOKEN" ## Replace with token.
client_secret = "YOUR CLIENT SECRET" ## Replace with secret.
# Name/Path of the folder that keeps output files.
output_path = "MyGraph"
# Class name of the user nodes in the graph. It can be seen on HTML output.
css_user = "node-user"
# Class name of the image nodes in the graph. It can be seen on HTML output.
css_image = "node-photo"
# Distance of the graph.
distance = 20
# Force constant of the graph.
k = 3.0
# Force radius of the graph.
repulsion = 15
# Size of the canvas that includes graph.
width = 1200
height = 600
# JavaScript code that converts URLs of the images nodes to <img> tags in other words
# it replaces addresses with real images.
js = """
<script type="text/javascript">
window.onload = function() {
nodeList = document.getElementsByClassName("%(image)s");
for(var i = 0; i < nodeList.length; i++) {
var url = (nodeList[i].innerText || nodeList[i].textContent);
nodeList[i].innerHTML = '<img src="'+url+'" width="75px" height="75px" style="position:absolute; left:-37px; top:-37px; z-index:-1;" />';
}
userList = document.getElementsByClassName("%(user)s");
for(var i = 0; i < userList.length; i++) {
var username = userList[i].innerHTML;
userList[i].innerHTML = '<img src="https://openclipart.org/image/36px/svg_to_png/145537/Simple-Red-Heart.png" style="position:absolute; left:-18px; top:-18px; z-index:-1;" />' + username;
}
images = document.getElementsByTagName('img');
for(var i = 0; i < images.length; i++) {
images[i].ondragstart = function() { return false; };
}
};
</script>
""" % {"image": css_image, "user": css_user}
# Create new Instagram API.
api = InstagramAPI(access_token=access_token, client_secret=client_secret)
# Create new Graph.
graph = Graph(distance=distance)
# It is for finding user-id of an user.
# It takes only one username (string) as an argument and
# returns an User object and its user-id (as string.)
# !! Exact username must be given as argument otherwise that function will return wrong user!
def find_user(username):
if not username:
print "Name is empty!"
return None, None
res = api.user_search(q="@"+username, count=1)
if not res:
print "{user} cannot be found!".format(user=username)
return None, None
ret_user = res[0]
return ret_user, ret_user.id
# It is for getting only the necessary parts of Image objects (which are URLs
# and name of the users who liked the image.)
# It takes user-id (string) and counter number (integer) that implies number of images to process and
# returns a list that includes dictionaries in following format:
# {"url": URLofIMAGE, "liked_usernames":[ListofUsernames]}
def recent_media_likes(userid, count):
if not userid or not count:
return []
media_list, _ = api.user_recent_media(user_id=userid, count=count)
ret = []
for media in media_list:
media_dict = {"url":"", "liked_usernames":[]}
media_dict["url"] = media.images["thumbnail"].url.split('?')[0]
media_dict["liked_usernames"] = [u.username for u in api.media_likes(media.id)]
ret.append(media_dict)
return ret
# It is for creating new nodes and edges between them.
# Example path: [User (who owns images)] -> [Image0] -> [User (who likes Image0)]
# where brackets([]) shows Nodes and Arrows(->) shows Edges.
def create_nodes(username, media_list):
css = {username: "owner"}
graph.add_node(username, fill=(0,0,0,1))
for media in media_list:
image = media["url"]
likes = media["liked_usernames"]
graph.add_node(image)
graph.add_edge(username, image, weight=0.0, type='shared-it')
css[image] = css_image
for liked_by in likes:
graph.add_node(liked_by)
graph.add_edge(image, liked_by, weight=0.0, type='is-liked-by')
css[liked_by] = css_user
return graph, css
# It exports the graph to visualize and modifies the HTML code for a nice visualization.
def create_output(css):
graph.export(path=output_path, directed=False, width=width, height=height, css=css, k=k, repulsion=repulsion)
with open(output_path+"/index.html", "r") as f:
html_data = f.read()
page = L.fromstring(html_data)
page.body.insert(page.body.index(page.body.find(".//div[@id='graph']"))+1, L.fragment_fromstring(js))
with open(output_path+"/index.html", "w") as f:
f.write(L.tostring(page))
def sort_users_by_likes():
nodes = graph.nodes
nodes = filter(lambda n: False if n.id[0:7] == "http://" or n.id[0:8] == "https://" else True, nodes)
for node in sorted(nodes, key=lambda n: n.weight, reverse=True):
print '%.2f' % node.weight, node.id
def run(username, userid, count):
_, css = create_nodes(username, recent_media_likes(userid, count))
create_output(css)
sort_users_by_likes()
if __name__ == "__main__":
# Uncomment below line to create a graph for a different user than yourself.
# user, userid = find_user("nasa")
username = "Me" # user.username
userid = "self" # Comment this line if you are creating a graph for a different user than yourself.
count = 3
run(username, userid, count)
| mit | 3,990,361,394,095,999,000 | 43.018519 | 200 | 0.653765 | false |
jianghuaw/nova | nova/tests/unit/api/openstack/compute/test_instance_usage_audit_log.py | 9 | 8866 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_utils import fixture as utils_fixture
from nova.api.openstack.compute import instance_usage_audit_log as v21_ial
from nova import context
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_service
service_base = test_service.fake_service
TEST_COMPUTE_SERVICES = [dict(service_base, host='foo', topic='compute'),
dict(service_base, host='bar', topic='compute'),
dict(service_base, host='baz', topic='compute'),
dict(service_base, host='plonk', topic='compute'),
dict(service_base, host='wibble', topic='bogus'),
]
begin1 = datetime.datetime(2012, 7, 4, 6, 0, 0)
begin2 = end1 = datetime.datetime(2012, 7, 5, 6, 0, 0)
begin3 = end2 = datetime.datetime(2012, 7, 6, 6, 0, 0)
end3 = datetime.datetime(2012, 7, 7, 6, 0, 0)
# test data
TEST_LOGS1 = [
# all services done, no errors.
dict(host="plonk", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=23, message="test1"),
dict(host="baz", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=17, message="test2"),
dict(host="bar", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=10, message="test3"),
dict(host="foo", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=7, message="test4"),
]
TEST_LOGS2 = [
# some still running...
dict(host="plonk", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=23, message="test5"),
dict(host="baz", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=17, message="test6"),
dict(host="bar", period_beginning=begin2, period_ending=end2,
state="RUNNING", errors=0, task_items=10, message="test7"),
dict(host="foo", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=7, message="test8"),
]
TEST_LOGS3 = [
# some errors..
dict(host="plonk", period_beginning=begin3, period_ending=end3,
state="DONE", errors=0, task_items=23, message="test9"),
dict(host="baz", period_beginning=begin3, period_ending=end3,
state="DONE", errors=2, task_items=17, message="test10"),
dict(host="bar", period_beginning=begin3, period_ending=end3,
state="DONE", errors=0, task_items=10, message="test11"),
dict(host="foo", period_beginning=begin3, period_ending=end3,
state="DONE", errors=1, task_items=7, message="test12"),
]
def fake_task_log_get_all(context, task_name, begin, end,
host=None, state=None):
assert task_name == "instance_usage_audit"
if begin == begin1 and end == end1:
return TEST_LOGS1
if begin == begin2 and end == end2:
return TEST_LOGS2
if begin == begin3 and end == end3:
return TEST_LOGS3
raise AssertionError("Invalid date %s to %s" % (begin, end))
def fake_last_completed_audit_period(unit=None, before=None):
audit_periods = [(begin3, end3),
(begin2, end2),
(begin1, end1)]
if before is not None:
for begin, end in audit_periods:
if before > end:
return begin, end
raise AssertionError("Invalid before date %s" % (before))
return begin1, end1
class InstanceUsageAuditLogTestV21(test.NoDBTestCase):
def setUp(self):
super(InstanceUsageAuditLogTestV21, self).setUp()
self.context = context.get_admin_context()
self.useFixture(
utils_fixture.TimeFixture(datetime.datetime(2012, 7, 5, 10, 0, 0)))
self._set_up_controller()
self.host_api = self.controller.host_api
def fake_service_get_all(context, disabled):
self.assertIsNone(disabled)
return TEST_COMPUTE_SERVICES
self.stub_out('nova.utils.last_completed_audit_period',
fake_last_completed_audit_period)
self.stub_out('nova.db.service_get_all', fake_service_get_all)
self.stub_out('nova.db.task_log_get_all', fake_task_log_get_all)
self.req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
self.controller = v21_ial.InstanceUsageAuditLogController()
def test_index(self):
result = self.controller.index(self.req)
self.assertIn('instance_usage_audit_logs', result)
logs = result['instance_usage_audit_logs']
self.assertEqual(57, logs['total_instances'])
self.assertEqual(0, logs['total_errors'])
self.assertEqual(4, len(logs['log']))
self.assertEqual(4, logs['num_hosts'])
self.assertEqual(4, logs['num_hosts_done'])
self.assertEqual(0, logs['num_hosts_running'])
self.assertEqual(0, logs['num_hosts_not_run'])
self.assertEqual("ALL hosts done. 0 errors.", logs['overall_status'])
def test_show(self):
result = self.controller.show(self.req, '2012-07-05 10:00:00')
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEqual(57, logs['total_instances'])
self.assertEqual(0, logs['total_errors'])
self.assertEqual(4, len(logs['log']))
self.assertEqual(4, logs['num_hosts'])
self.assertEqual(4, logs['num_hosts_done'])
self.assertEqual(0, logs['num_hosts_running'])
self.assertEqual(0, logs['num_hosts_not_run'])
self.assertEqual("ALL hosts done. 0 errors.", logs['overall_status'])
def test_show_with_running(self):
result = self.controller.show(self.req, '2012-07-06 10:00:00')
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEqual(57, logs['total_instances'])
self.assertEqual(0, logs['total_errors'])
self.assertEqual(4, len(logs['log']))
self.assertEqual(4, logs['num_hosts'])
self.assertEqual(3, logs['num_hosts_done'])
self.assertEqual(1, logs['num_hosts_running'])
self.assertEqual(0, logs['num_hosts_not_run'])
self.assertEqual("3 of 4 hosts done. 0 errors.",
logs['overall_status'])
def test_show_with_errors(self):
result = self.controller.show(self.req, '2012-07-07 10:00:00')
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEqual(57, logs['total_instances'])
self.assertEqual(3, logs['total_errors'])
self.assertEqual(4, len(logs['log']))
self.assertEqual(4, logs['num_hosts'])
self.assertEqual(4, logs['num_hosts_done'])
self.assertEqual(0, logs['num_hosts_running'])
self.assertEqual(0, logs['num_hosts_not_run'])
self.assertEqual("ALL hosts done. 3 errors.",
logs['overall_status'])
class InstanceUsageAuditPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(InstanceUsageAuditPolicyEnforcementV21, self).setUp()
self.controller = v21_ial.InstanceUsageAuditLogController()
self.req = fakes.HTTPRequest.blank('')
def test_index_policy_failed(self):
rule_name = "os_compute_api:os-instance-usage-audit-log"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_show_policy_failed(self):
rule_name = "os_compute_api:os-instance-usage-audit-log"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, '2012-07-05 10:00:00')
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
| apache-2.0 | 8,930,285,025,171,244,000 | 40.624413 | 79 | 0.634897 | false |
Lujeni/ansible | test/units/modules/source_control/gitlab/test_gitlab_deploy_key.py | 13 | 4137 | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import
import pytest
from ansible.modules.source_control.gitlab.gitlab_deploy_key import GitLabDeployKey
def _dummy(x):
"""Dummy function. Only used as a placeholder for toplevel definitions when the test is going
to be skipped anyway"""
return x
pytestmark = []
try:
from .gitlab import (GitlabModuleTestCase,
python_version_match_requirement,
resp_get_project, resp_find_project_deploy_key,
resp_create_project_deploy_key, resp_delete_project_deploy_key)
# GitLab module requirements
if python_version_match_requirement():
from gitlab.v4.objects import ProjectKey
except ImportError:
pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
# Need to set these to something so that we don't fail when parsing
GitlabModuleTestCase = object
resp_get_project = _dummy
resp_find_project_deploy_key = _dummy
resp_create_project_deploy_key = _dummy
resp_delete_project_deploy_key = _dummy
# Unit tests requirements
try:
from httmock import with_httmock # noqa
except ImportError:
pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
with_httmock = _dummy
class TestGitlabDeployKey(GitlabModuleTestCase):
def setUp(self):
super(TestGitlabDeployKey, self).setUp()
self.moduleUtil = GitLabDeployKey(module=self.mock_module, gitlab_instance=self.gitlab_instance)
@with_httmock(resp_get_project)
@with_httmock(resp_find_project_deploy_key)
def test_deploy_key_exist(self):
project = self.gitlab_instance.projects.get(1)
rvalue = self.moduleUtil.existsDeployKey(project, "Public key")
self.assertEqual(rvalue, True)
rvalue = self.moduleUtil.existsDeployKey(project, "Private key")
self.assertEqual(rvalue, False)
@with_httmock(resp_get_project)
@with_httmock(resp_create_project_deploy_key)
def test_create_deploy_key(self):
project = self.gitlab_instance.projects.get(1)
deploy_key = self.moduleUtil.createDeployKey(project, {"title": "Public key",
"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM"
"4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc"
"KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfD"
"zpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0="})
self.assertEqual(type(deploy_key), ProjectKey)
self.assertEqual(deploy_key.title, "Public key")
@with_httmock(resp_get_project)
@with_httmock(resp_find_project_deploy_key)
@with_httmock(resp_create_project_deploy_key)
def test_update_deploy_key(self):
project = self.gitlab_instance.projects.get(1)
deployKey = self.moduleUtil.findDeployKey(project, "Public key")
changed, newDeploy_key = self.moduleUtil.updateDeployKey(deployKey, {"title": "Private key"})
self.assertEqual(changed, True)
self.assertEqual(type(newDeploy_key), ProjectKey)
self.assertEqual(newDeploy_key.title, "Private key")
changed, newDeploy_key = self.moduleUtil.updateDeployKey(deployKey, {"title": "Private key"})
self.assertEqual(changed, False)
self.assertEqual(newDeploy_key.title, "Private key")
@with_httmock(resp_get_project)
@with_httmock(resp_find_project_deploy_key)
@with_httmock(resp_delete_project_deploy_key)
def test_delete_deploy_key(self):
project = self.gitlab_instance.projects.get(1)
self.moduleUtil.existsDeployKey(project, "Public key")
rvalue = self.moduleUtil.deleteDeployKey()
self.assertEqual(rvalue, None)
| gpl-3.0 | -8,097,522,561,245,702,000 | 37.663551 | 122 | 0.665216 | false |
alikins/virt-who | event.py | 2 | 12960 | """
Loop for reading events from libvirt, based on example from libvirt-python.
"""
#################################################################################
# Start off by implementing a general purpose event loop for anyones use
#################################################################################
import sys
import os
import libvirt
import select
import time
import threading
import signal
# Type of virtualization
virtType = None
# This general purpose event loop will support waiting for file handle
# I/O and errors events, as well as scheduling repeatable timers with
# a fixed interval.
#
# It is a pure python implementation based around the poll() API
#
class virEventLoopPure:
# This class contains the data we need to track for a
# single file handle
class virEventLoopPureHandle:
def __init__(self, handle, fd, events, cb, opaque):
self.handle = handle
self.fd = fd
self.events = events
self.cb = cb
self.opaque = opaque
def get_id(self):
return self.handle
def get_fd(self):
return self.fd
def get_events(self):
return self.events
def set_events(self, events):
self.events = events
def dispatch(self, events):
self.cb(self.handle,
self.fd,
events,
self.opaque[0],
self.opaque[1])
# This class contains the data we need to track for a
# single periodic timer
class virEventLoopPureTimer:
def __init__(self, timer, interval, cb, opaque):
self.timer = timer
self.interval = interval
self.cb = cb
self.opaque = opaque
self.lastfired = 0
def get_id(self):
return self.timer
def get_interval(self):
return self.interval
def set_interval(self, interval):
self.interval = interval
def get_last_fired(self):
return self.lastfired
def set_last_fired(self, now):
self.lastfired = now
def dispatch(self):
self.cb(self.timer,
self.opaque[0],
self.opaque[1])
def __init__(self):
self.poll = select.poll()
self.pipetrick = os.pipe()
self.nextHandleID = 1
self.nextTimerID = 1
self.handles = []
self.timers = []
self.quit = False
# The event loop can be used from multiple threads at once.
# Specifically while the main thread is sleeping in poll()
# waiting for events to occur, another thread may come along
# and add/update/remove a file handle, or timer. When this
# happens we need to interrupt the poll() sleep in the other
# thread, so that it'll see the file handle / timer changes.
#
# Using OS level signals for this is very unreliable and
# hard to implement correctly. Thus we use the real classic
# "self pipe" trick. A anonymous pipe, with one end registered
# with the event loop for input events. When we need to force
# the main thread out of a poll() sleep, we simple write a
# single byte of data to the other end of the pipe.
self.poll.register(self.pipetrick[0], select.POLLIN)
# Calculate when the next timeout is due to occurr, returning
# the absolute timestamp for the next timeout, or 0 if there is
# no timeout due
def next_timeout(self):
next = 0
for t in self.timers:
last = t.get_last_fired()
interval = t.get_interval()
if interval < 0:
continue
if next == 0 or (last + interval) < next:
next = last + interval
return next
# Lookup a virEventLoopPureHandle object based on file descriptor
def get_handle_by_fd(self, fd):
for h in self.handles:
if h.get_fd() == fd:
return h
return None
# Lookup a virEventLoopPureHandle object based on its event loop ID
def get_handle_by_id(self, handleID):
for h in self.handles:
if h.get_id() == handleID:
return h
return None
# This is the heart of the event loop, performing one single
# iteration. It asks when the next timeout is due, and then
# calcuates the maximum amount of time it is able to sleep
# for in poll() pending file handle events.
#
# It then goes into the poll() sleep.
#
# When poll() returns, there will zero or more file handle
# events which need to be dispatched to registered callbacks
# It may also be time to fire some periodic timers.
#
# Due to the coarse granularity of schedular timeslices, if
# we ask for a sleep of 500ms in order to satisfy a timer, we
# may return upto 1 schedular timeslice early. So even though
# our sleep timeout was reached, the registered timer may not
# technically be at its expiry point. This leads to us going
# back around the loop with a crazy 5ms sleep. So when checking
# if timeouts are due, we allow a margin of 20ms, to avoid
# these pointless repeated tiny sleeps.
def run_once(self):
sleep = -1
next = self.next_timeout()
if next > 0:
now = int(time.time() * 1000)
if now >= next:
sleep = 0
else:
sleep = (next - now) / 1000.0
events = self.poll.poll(sleep)
# Dispatch any file handle events that occurred
for (fd, revents) in events:
# See if the events was from the self-pipe
# telling us to wakup. if so, then discard
# the data just continue
if fd == self.pipetrick[0]:
data = os.read(fd, 1)
continue
h = self.get_handle_by_fd(fd)
if h:
h.dispatch(self.events_from_poll(revents))
now = int(time.time() * 1000)
for t in self.timers:
interval = t.get_interval()
if interval < 0:
continue
want = t.get_last_fired() + interval
# Deduct 20ms, since schedular timeslice
# means we could be ever so slightly early
if now >= (want-20):
t.set_last_fired(now)
t.dispatch()
# Actually the event loop forever
def run_loop(self):
self.quit = False
while not self.quit:
self.run_once()
def interrupt(self):
os.write(self.pipetrick[1], 'c')
# Registers a new file handle 'fd', monitoring for 'events' (libvirt
# event constants), firing the callback cb() when an event occurs.
# Returns a unique integer identier for this handle, that should be
# used to later update/remove it
def add_handle(self, fd, events, cb, opaque):
handleID = self.nextHandleID + 1
self.nextHandleID = self.nextHandleID + 1
h = self.virEventLoopPureHandle(handleID, fd, events, cb, opaque)
self.handles.append(h)
self.poll.register(fd, self.events_to_poll(events))
self.interrupt()
return handleID
# Registers a new timer with periodic expiry at 'interval' ms,
# firing cb() each time the timer expires. If 'interval' is -1,
# then the timer is registered, but not enabled
# Returns a unique integer identier for this handle, that should be
# used to later update/remove it
def add_timer(self, interval, cb, opaque):
timerID = self.nextTimerID + 1
self.nextTimerID = self.nextTimerID + 1
h = self.virEventLoopPureTimer(timerID, interval, cb, opaque)
self.timers.append(h)
self.interrupt()
return timerID
# Change the set of events to be monitored on the file handle
def update_handle(self, handleID, events):
h = self.get_handle_by_id(handleID)
if h:
h.set_events(events)
self.poll.unregister(h.get_fd())
self.poll.register(h.get_fd(), self.events_to_poll(events))
self.interrupt()
# Change the periodic frequency of the timer
def update_timer(self, timerID, interval):
for h in self.timers:
if h.get_id() == timerID:
h.set_interval(interval);
self.interrupt()
break
# Stop monitoring for events on the file handle
def remove_handle(self, handleID):
handles = []
for h in self.handles:
if h.get_id() == handleID:
self.poll.unregister(h.get_fd())
else:
handles.append(h)
self.handles = handles
self.interrupt()
# !!! This is NOT present in original example from libvirt
# Remove handle happens when libvirtd dies, so we'll restart ourself
# Only for XEN, works fine for other virt types
if virtType is not None and virtType == "Xen":
os.kill(os.getpid(), signal.SIGHUP)
# Stop firing the periodic timer
def remove_timer(self, timerID):
timers = []
for h in self.timers:
if h.get_id() != timerID:
timers.append(h)
self.timers = timers
self.interrupt()
# Convert from libvirt event constants, to poll() events constants
def events_to_poll(self, events):
ret = 0
if events & libvirt.VIR_EVENT_HANDLE_READABLE:
ret |= select.POLLIN
if events & libvirt.VIR_EVENT_HANDLE_WRITABLE:
ret |= select.POLLOUT
if events & libvirt.VIR_EVENT_HANDLE_ERROR:
ret |= select.POLLERR;
if events & libvirt.VIR_EVENT_HANDLE_HANGUP:
ret |= select.POLLHUP;
return ret
# Convert from poll() event constants, to libvirt events constants
def events_from_poll(self, events):
ret = 0;
if events & select.POLLIN:
ret |= libvirt.VIR_EVENT_HANDLE_READABLE;
if events & select.POLLOUT:
ret |= libvirt.VIR_EVENT_HANDLE_WRITABLE;
if events & select.POLLNVAL:
ret |= libvirt.VIR_EVENT_HANDLE_ERROR;
if events & select.POLLERR:
ret |= libvirt.VIR_EVENT_HANDLE_ERROR;
if events & select.POLLHUP:
ret |= libvirt.VIR_EVENT_HANDLE_HANGUP;
return ret;
###########################################################################
# Now glue an instance of the general event loop into libvirt's event loop
###########################################################################
# This single global instance of the event loop wil be used for
# monitoring libvirt events
eventLoop = None
# This keeps track of what thread is running the event loop,
# (if it is run in a background thread)
eventLoopThread = None
# These next set of 6 methods are the glue between the official
# libvirt events API, and our particular impl of the event loop
#
# There is no reason why the 'virEventLoopPure' has to be used.
# An application could easily may these 6 glue methods hook into
# another event loop such as GLib's, or something like the python
# Twisted event framework.
def virEventAddHandleImpl(fd, events, cb, opaque):
global eventLoop
return eventLoop.add_handle(fd, events, cb, opaque)
def virEventUpdateHandleImpl(handleID, events):
global eventLoop
return eventLoop.update_handle(handleID, events)
def virEventRemoveHandleImpl(handleID):
global eventLoop
return eventLoop.remove_handle(handleID)
def virEventAddTimerImpl(interval, cb, opaque):
global eventLoop
return eventLoop.add_timer(interval, cb, opaque)
def virEventUpdateTimerImpl(timerID, interval):
global eventLoop
return eventLoop.update_timer(timerID, interval)
def virEventRemoveTimerImpl(timerID):
global eventLoop
return eventLoop.remove_timer(timerID)
# This tells libvirt what event loop implementation it
# should use
def virEventLoopPureRegister():
libvirt.virEventRegisterImpl(virEventAddHandleImpl,
virEventUpdateHandleImpl,
virEventRemoveHandleImpl,
virEventAddTimerImpl,
virEventUpdateTimerImpl,
virEventRemoveTimerImpl)
# Directly run the event loop in the current thread
def virEventLoopPureRun():
global eventLoop
eventLoop.run_loop()
# Spawn a background thread to run the event loop
def virEventLoopPureStart():
global eventLoopThread
global eventLoop
eventLoop = virEventLoopPure()
virEventLoopPureRegister()
eventLoopThread = threading.Thread(target=virEventLoopPureRun, name="libvirtEventLoop")
eventLoopThread.setDaemon(True)
eventLoopThread.start()
| gpl-2.0 | 130,258,314,230,258,900 | 32.83812 | 91 | 0.59892 | false |
Changaco/oh-mainline | vendor/packages/gdata/tests/gdata_tests/gauth_test.py | 16 | 30751 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = '[email protected] (Jeff Scudder)'
import unittest
import gdata.gauth
import atom.http_core
import gdata.test_config as conf
PRIVATE_TEST_KEY = """
-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V
A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d
7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ
hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H
X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm
uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw
rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z
zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn
qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG
WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno
cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+
3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8
AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54
Lw03eHTNQghS0A==
-----END PRIVATE KEY-----"""
class AuthSubTest(unittest.TestCase):
def test_generate_request_url(self):
url = gdata.gauth.generate_auth_sub_url('http://example.com',
['http://example.net/scope1'])
self.assert_(isinstance(url, atom.http_core.Uri))
self.assertEqual(url.query['secure'], '0')
self.assertEqual(url.query['session'], '1')
self.assertEqual(url.query['scope'], 'http://example.net/scope1')
self.assertEqual(atom.http_core.Uri.parse_uri(
url.query['next']).query['auth_sub_scopes'],
'http://example.net/scope1')
self.assertEqual(atom.http_core.Uri.parse_uri(url.query['next']).path,
'/')
self.assertEqual(atom.http_core.Uri.parse_uri(url.query['next']).host,
'example.com')
def test_from_url(self):
token_str = gdata.gauth.auth_sub_string_from_url(
'http://example.com/?token=123abc')[0]
self.assertEqual(token_str, '123abc')
def test_from_http_body(self):
token_str = gdata.gauth.auth_sub_string_from_body('Something\n'
'Token=DQAA...7DCTN\n'
'Expiration=20061004T123456Z\n')
self.assertEqual(token_str, 'DQAA...7DCTN')
def test_modify_request(self):
token = gdata.gauth.AuthSubToken('tval')
request = atom.http_core.HttpRequest()
token.modify_request(request)
self.assertEqual(request.headers['Authorization'], 'AuthSub token=tval')
def test_create_and_upgrade_tokens(self):
token = gdata.gauth.AuthSubToken.from_url(
'http://example.com/?token=123abc')
self.assert_(isinstance(token, gdata.gauth.AuthSubToken))
self.assertEqual(token.token_string, '123abc')
self.assertEqual(token.scopes, [])
token._upgrade_token('Token=456def')
self.assertEqual(token.token_string, '456def')
self.assertEqual(token.scopes, [])
class SecureAuthSubTest(unittest.TestCase):
def test_build_data(self):
request = atom.http_core.HttpRequest(method='PUT')
request.uri = atom.http_core.Uri.parse_uri('http://example.com/foo?a=1')
data = gdata.gauth.build_auth_sub_data(request, 1234567890, 'mynonce')
self.assertEqual(data,
'PUT http://example.com/foo?a=1 1234567890 mynonce')
def test_generate_signature(self):
request = atom.http_core.HttpRequest(
method='GET', uri=atom.http_core.Uri(host='example.com', path='/foo',
query={'a': '1'}))
data = gdata.gauth.build_auth_sub_data(request, 1134567890, 'p234908')
self.assertEqual(data,
'GET http://example.com/foo?a=1 1134567890 p234908')
self.assertEqual(
gdata.gauth.generate_signature(data, PRIVATE_TEST_KEY),
'GeBfeIDnT41dvLquPgDB4U5D4hfxqaHk/5LX1kccNBnL4BjsHWU1djbEp7xp3BL9ab'
'QtLrK7oa/aHEHtGRUZGg87O+ND8iDPR76WFXAruuN8O8GCMqCDdPduNPY++LYO4MdJ'
'BZNY974Nn0m6Hc0/T4M1ElqvPhl61fkXMm+ElSM=')
class TokensToAndFromBlobsTest(unittest.TestCase):
def test_client_login_conversion(self):
token = gdata.gauth.ClientLoginToken('test|key')
copy = gdata.gauth.token_from_blob(gdata.gauth.token_to_blob(token))
self.assertEqual(token.token_string, copy.token_string)
self.assert_(isinstance(copy, gdata.gauth.ClientLoginToken))
def test_authsub_conversion(self):
token = gdata.gauth.AuthSubToken('test|key')
copy = gdata.gauth.token_from_blob(gdata.gauth.token_to_blob(token))
self.assertEqual(token.token_string, copy.token_string)
self.assert_(isinstance(copy, gdata.gauth.AuthSubToken))
scopes = ['http://example.com', 'http://other||test', 'thir|d']
token = gdata.gauth.AuthSubToken('key-=', scopes)
copy = gdata.gauth.token_from_blob(gdata.gauth.token_to_blob(token))
self.assertEqual(token.token_string, copy.token_string)
self.assert_(isinstance(copy, gdata.gauth.AuthSubToken))
self.assertEqual(token.scopes, scopes)
def test_join_and_split(self):
token_string = gdata.gauth._join_token_parts('1x', 'test|string', '%x%',
'', None)
self.assertEqual(token_string, '1x|test%7Cstring|%25x%25||')
token_type, a, b, c, d = gdata.gauth._split_token_parts(token_string)
self.assertEqual(token_type, '1x')
self.assertEqual(a, 'test|string')
self.assertEqual(b, '%x%')
self.assert_(c is None)
self.assert_(d is None)
def test_secure_authsub_conversion(self):
token = gdata.gauth.SecureAuthSubToken(
'%^%', 'myRsaKey', ['http://example.com', 'http://example.org'])
copy = gdata.gauth.token_from_blob(gdata.gauth.token_to_blob(token))
self.assertEqual(copy.token_string, '%^%')
self.assertEqual(copy.rsa_private_key, 'myRsaKey')
self.assertEqual(copy.scopes,
['http://example.com', 'http://example.org'])
token = gdata.gauth.SecureAuthSubToken(rsa_private_key='f',
token_string='b')
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(blob, '1s|b|f')
copy = gdata.gauth.token_from_blob(blob)
self.assertEqual(copy.token_string, 'b')
self.assertEqual(copy.rsa_private_key, 'f')
self.assertEqual(copy.scopes, [])
token = gdata.gauth.SecureAuthSubToken(None, '')
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(blob, '1s||')
copy = gdata.gauth.token_from_blob(blob)
self.assertEqual(copy.token_string, None)
self.assertEqual(copy.rsa_private_key, None)
self.assertEqual(copy.scopes, [])
token = gdata.gauth.SecureAuthSubToken('', None)
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(blob, '1s||')
copy = gdata.gauth.token_from_blob(blob)
self.assertEqual(copy.token_string, None)
self.assertEqual(copy.rsa_private_key, None)
self.assertEqual(copy.scopes, [])
token = gdata.gauth.SecureAuthSubToken(
None, None, ['http://example.net', 'http://google.com'])
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(
blob, '1s|||http%3A%2F%2Fexample.net|http%3A%2F%2Fgoogle.com')
copy = gdata.gauth.token_from_blob(blob)
self.assert_(copy.token_string is None)
self.assert_(copy.rsa_private_key is None)
self.assertEqual(copy.scopes, ['http://example.net', 'http://google.com'])
def test_oauth_rsa_conversion(self):
token = gdata.gauth.OAuthRsaToken(
'consumerKey', 'myRsa', 't', 'secret',
gdata.gauth.AUTHORIZED_REQUEST_TOKEN, 'http://example.com/next',
'verifier')
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(
blob, '1r|consumerKey|myRsa|t|secret|2|http%3A%2F%2Fexample.com'
'%2Fnext|verifier')
copy = gdata.gauth.token_from_blob(blob)
self.assert_(isinstance(copy, gdata.gauth.OAuthRsaToken))
self.assertEqual(copy.consumer_key, token.consumer_key)
self.assertEqual(copy.rsa_private_key, token.rsa_private_key)
self.assertEqual(copy.token, token.token)
self.assertEqual(copy.token_secret, token.token_secret)
self.assertEqual(copy.auth_state, token.auth_state)
self.assertEqual(copy.next, token.next)
self.assertEqual(copy.verifier, token.verifier)
token = gdata.gauth.OAuthRsaToken(
'', 'myRsa', 't', 'secret', 0)
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(blob, '1r||myRsa|t|secret|0||')
copy = gdata.gauth.token_from_blob(blob)
self.assert_(isinstance(copy, gdata.gauth.OAuthRsaToken))
self.assert_(copy.consumer_key != token.consumer_key)
self.assert_(copy.consumer_key is None)
self.assertEqual(copy.rsa_private_key, token.rsa_private_key)
self.assertEqual(copy.token, token.token)
self.assertEqual(copy.token_secret, token.token_secret)
self.assertEqual(copy.auth_state, token.auth_state)
self.assertEqual(copy.next, token.next)
self.assert_(copy.next is None)
self.assertEqual(copy.verifier, token.verifier)
self.assert_(copy.verifier is None)
token = gdata.gauth.OAuthRsaToken(
rsa_private_key='myRsa', token='t', token_secret='secret',
auth_state=gdata.gauth.ACCESS_TOKEN, verifier='v', consumer_key=None)
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(blob, '1r||myRsa|t|secret|3||v')
copy = gdata.gauth.token_from_blob(blob)
self.assertEqual(copy.consumer_key, token.consumer_key)
self.assert_(copy.consumer_key is None)
self.assertEqual(copy.rsa_private_key, token.rsa_private_key)
self.assertEqual(copy.token, token.token)
self.assertEqual(copy.token_secret, token.token_secret)
self.assertEqual(copy.auth_state, token.auth_state)
self.assertEqual(copy.next, token.next)
self.assert_(copy.next is None)
self.assertEqual(copy.verifier, token.verifier)
def test_oauth_hmac_conversion(self):
token = gdata.gauth.OAuthHmacToken(
'consumerKey', 'consumerSecret', 't', 'secret',
gdata.gauth.REQUEST_TOKEN, 'http://example.com/next', 'verifier')
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(
blob, '1h|consumerKey|consumerSecret|t|secret|1|http%3A%2F%2F'
'example.com%2Fnext|verifier')
copy = gdata.gauth.token_from_blob(blob)
self.assert_(isinstance(copy, gdata.gauth.OAuthHmacToken))
self.assertEqual(copy.consumer_key, token.consumer_key)
self.assertEqual(copy.consumer_secret, token.consumer_secret)
self.assertEqual(copy.token, token.token)
self.assertEqual(copy.token_secret, token.token_secret)
self.assertEqual(copy.auth_state, token.auth_state)
self.assertEqual(copy.next, token.next)
self.assertEqual(copy.verifier, token.verifier)
token = gdata.gauth.OAuthHmacToken(
consumer_secret='c,s', token='t', token_secret='secret',
auth_state=7, verifier='v', consumer_key=None)
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(blob, '1h||c%2Cs|t|secret|7||v')
copy = gdata.gauth.token_from_blob(blob)
self.assert_(isinstance(copy, gdata.gauth.OAuthHmacToken))
self.assertEqual(copy.consumer_key, token.consumer_key)
self.assert_(copy.consumer_key is None)
self.assertEqual(copy.consumer_secret, token.consumer_secret)
self.assertEqual(copy.token, token.token)
self.assertEqual(copy.token_secret, token.token_secret)
self.assertEqual(copy.auth_state, token.auth_state)
self.assertEqual(copy.next, token.next)
self.assert_(copy.next is None)
self.assertEqual(copy.verifier, token.verifier)
def test_oauth2_conversion(self):
token = gdata.gauth.OAuth2Token(
'clientId', 'clientSecret', 'https://www.google.com/calendar/feeds',
'userAgent', 'https://accounts.google.com/o/oauth2/auth',
'https://accounts.google.com/o/oauth2/token',
'accessToken', 'refreshToken')
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(
blob, '2o|clientId|clientSecret|https%3A%2F%2Fwww.google.com%2F'
'calendar%2Ffeeds|userAgent|https%3A%2F%2Faccounts.google.com%2F'
'o%2Foauth2%2Fauth|https%3A%2F%2Faccounts.google.com%2Fo%2Foauth2'
'%2Ftoken|accessToken|refreshToken')
copy = gdata.gauth.token_from_blob(blob)
self.assert_(isinstance(copy, gdata.gauth.OAuth2Token))
self.assertEqual(copy.client_id, token.client_id)
self.assertEqual(copy.client_secret, token.client_secret)
self.assertEqual(copy.scope, token.scope)
self.assertEqual(copy.user_agent, token.user_agent)
self.assertEqual(copy.auth_uri, token.auth_uri)
self.assertEqual(copy.token_uri, token.token_uri)
self.assertEqual(copy.access_token, token.access_token)
self.assertEqual(copy.refresh_token, token.refresh_token)
token = gdata.gauth.OAuth2Token(
'clientId', 'clientSecret', 'https://www.google.com/calendar/feeds',
'', 'https://accounts.google.com/o/oauth2/auth',
'https://accounts.google.com/o/oauth2/token',
'', '')
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(
blob, '2o|clientId|clientSecret|https%3A%2F%2Fwww.google.com%2F'
'calendar%2Ffeeds||https%3A%2F%2Faccounts.google.com%2F'
'o%2Foauth2%2Fauth|https%3A%2F%2Faccounts.google.com%2Fo%2Foauth2'
'%2Ftoken||')
copy = gdata.gauth.token_from_blob(blob)
self.assert_(isinstance(copy, gdata.gauth.OAuth2Token))
self.assertEqual(copy.client_id, token.client_id)
self.assertEqual(copy.client_secret, token.client_secret)
self.assertEqual(copy.scope, token.scope)
self.assert_(copy.user_agent is None)
self.assertEqual(copy.auth_uri, token.auth_uri)
self.assertEqual(copy.token_uri, token.token_uri)
self.assert_(copy.access_token is None)
self.assert_(copy.refresh_token is None)
token = gdata.gauth.OAuth2Token(
'clientId', 'clientSecret', 'https://www.google.com/calendar/feeds',
None, 'https://accounts.google.com/o/oauth2/auth',
'https://accounts.google.com/o/oauth2/token')
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(
blob, '2o|clientId|clientSecret|https%3A%2F%2Fwww.google.com%2F'
'calendar%2Ffeeds||https%3A%2F%2Faccounts.google.com%2F'
'o%2Foauth2%2Fauth|https%3A%2F%2Faccounts.google.com%2Fo%2Foauth2'
'%2Ftoken||')
copy = gdata.gauth.token_from_blob(blob)
self.assert_(isinstance(copy, gdata.gauth.OAuth2Token))
self.assertEqual(copy.client_id, token.client_id)
self.assertEqual(copy.client_secret, token.client_secret)
self.assertEqual(copy.scope, token.scope)
self.assert_(copy.user_agent is None)
self.assertEqual(copy.auth_uri, token.auth_uri)
self.assertEqual(copy.token_uri, token.token_uri)
self.assert_(copy.access_token is None)
self.assert_(copy.refresh_token is None)
def test_illegal_token_types(self):
class MyToken(object):
pass
token = MyToken()
self.assertRaises(gdata.gauth.UnsupportedTokenType,
gdata.gauth.token_to_blob, token)
blob = '~~z'
self.assertRaises(gdata.gauth.UnsupportedTokenType,
gdata.gauth.token_from_blob, blob)
class OAuthHmacTokenTests(unittest.TestCase):
def test_build_base_string(self):
request = atom.http_core.HttpRequest('http://example.com/', 'GET')
base_string = gdata.gauth.build_oauth_base_string(
request, 'example.org', '12345', gdata.gauth.HMAC_SHA1, 1246301653,
'1.0')
self.assertEqual(
base_string, 'GET&http%3A%2F%2Fexample.com%2F&oauth_callback%3Doob%2'
'6oauth_consumer_key%3Dexample.org%26oauth_nonce%3D12345%26oauth_sig'
'nature_method%3DHMAC-SHA1%26oauth_timestamp%3D1246301653%26oauth_ve'
'rsion%3D1.0')
# Test using example from documentation.
request = atom.http_core.HttpRequest(
'http://www.google.com/calendar/feeds/default/allcalendars/full'
'?orderby=starttime', 'GET')
base_string = gdata.gauth.build_oauth_base_string(
request, 'example.com', '4572616e48616d6d65724c61686176',
gdata.gauth.RSA_SHA1, 137131200, '1.0', token='1%2Fab3cd9j4ks73hf7g',
next='http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(
base_string, 'GET&http%3A%2F%2Fwww.google.com%2Fcalendar%2Ffeeds%2Fd'
'efault%2Fallcalendars%2Ffull&oauth_callback%3Dhttp%253A%252F%252Fgo'
'oglecodesamples.com%252Foauth_playground%252Findex.php%26oauth_cons'
'umer_key%3Dexample.com%26oauth_nonce%3D4572616e48616d6d65724c616861'
'76%26oauth_signature_method%3DRSA-SHA1%26oauth_timestamp%3D13713120'
'0%26oauth_token%3D1%25252Fab3cd9j4ks73hf7g%26oauth_version%3D1.0%26'
'orderby%3Dstarttime')
# Test various defaults.
request = atom.http_core.HttpRequest('http://eXample.COM', 'get')
base_string = gdata.gauth.build_oauth_base_string(
request, 'example.org', '12345', gdata.gauth.HMAC_SHA1, 1246301653,
'1.0')
self.assertEqual(
base_string, 'GET&http%3A%2F%2Fexample.com%2F&oauth_callback%3Doob%2'
'6oauth_consumer_key%3Dexample.org%26oauth_nonce%3D12345%26oauth_sig'
'nature_method%3DHMAC-SHA1%26oauth_timestamp%3D1246301653%26oauth_ve'
'rsion%3D1.0')
request = atom.http_core.HttpRequest('https://eXample.COM:443', 'get')
base_string = gdata.gauth.build_oauth_base_string(
request, 'example.org', '12345', gdata.gauth.HMAC_SHA1, 1246301653,
'1.0', 'http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(
base_string, 'GET&https%3A%2F%2Fexample.com%2F&oauth_callback%3Dhttp'
'%253A%252F%252Fgooglecodesamples.com%252Foauth_playground%252Findex'
'.php%26oauth_consumer_key%3Dexample.org%26oauth_nonce%3D12345%26oau'
'th_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1246301653%26oa'
'uth_version%3D1.0')
request = atom.http_core.HttpRequest('http://eXample.COM:443', 'get')
base_string = gdata.gauth.build_oauth_base_string(
request, 'example.org', '12345', gdata.gauth.HMAC_SHA1, 1246301653,
'1.0')
self.assertEqual(
base_string, 'GET&http%3A%2F%2Fexample.com%3A443%2F&oauth_callback%3'
'Doob%26oauth_consumer_key%3De'
'xample.org%26oauth_nonce%3D12345%26oauth_signature_method%3DHMAC-SH'
'A1%26oauth_timestamp%3D1246301653%26oauth_version%3D1.0')
request = atom.http_core.HttpRequest(
atom.http_core.Uri(host='eXample.COM'), 'GET')
base_string = gdata.gauth.build_oauth_base_string(
request, 'example.org', '12345', gdata.gauth.HMAC_SHA1, 1246301653,
'1.0', next='oob')
self.assertEqual(
base_string, 'GET&http%3A%2F%2Fexample.com%2F&oauth_callback%3Doob%2'
'6oauth_consumer_key%3Dexample.org%26oauth_nonce%3D12345%26oauth_sig'
'nature_method%3DHMAC-SHA1%26oauth_timestamp%3D1246301653%26oauth_ve'
'rsion%3D1.0')
request = atom.http_core.HttpRequest(
'https://www.google.com/accounts/OAuthGetRequestToken', 'GET')
request.uri.query['scope'] = ('https://docs.google.com/feeds/'
' http://docs.google.com/feeds/')
base_string = gdata.gauth.build_oauth_base_string(
request, 'anonymous', '48522759', gdata.gauth.HMAC_SHA1, 1246489532,
'1.0', 'http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(
base_string, 'GET&https%3A%2F%2Fwww.google.com%2Faccounts%2FOAuthGet'
'RequestToken&oauth_callback%3Dhttp%253A%252F%252Fgooglecodesamples.'
'com%252Foauth_playground%252Findex.php%26oauth_consumer_key%3Danony'
'mous%26oauth_nonce%3D4852275'
'9%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D12464895'
'32%26oauth_version%3D1.0%26scope%3Dhttps%253A%252F%252Fdocs.google.'
'com%252Ffeeds%252F%2520http%253A%252F%252Fdocs.google.com%252Ffeeds'
'%252F')
def test_generate_hmac_signature(self):
# Use the example from the OAuth playground:
# http://googlecodesamples.com/oauth_playground/
request = atom.http_core.HttpRequest(
'https://www.google.com/accounts/OAuthGetRequestToken?'
'scope=http%3A%2F%2Fwww.blogger.com%2Ffeeds%2F', 'GET')
signature = gdata.gauth.generate_hmac_signature(
request, 'anonymous', 'anonymous', '1246491360',
'c0155b3f28697c029e7a62efff44bd46', '1.0',
next='http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(signature, '5a2GPdtAY3LWYv8IdiT3wp1Coeg=')
# Try the same request but with a non escaped Uri object.
request = atom.http_core.HttpRequest(
'https://www.google.com/accounts/OAuthGetRequestToken', 'GET')
request.uri.query['scope'] = 'http://www.blogger.com/feeds/'
signature = gdata.gauth.generate_hmac_signature(
request, 'anonymous', 'anonymous', '1246491360',
'c0155b3f28697c029e7a62efff44bd46', '1.0',
'http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(signature, '5a2GPdtAY3LWYv8IdiT3wp1Coeg=')
# A different request also checked against the OAuth playground.
request = atom.http_core.HttpRequest(
'https://www.google.com/accounts/OAuthGetRequestToken', 'GET')
request.uri.query['scope'] = ('https://www.google.com/analytics/feeds/ '
'http://www.google.com/base/feeds/ '
'http://www.google.com/calendar/feeds/')
signature = gdata.gauth.generate_hmac_signature(
request, 'anonymous', 'anonymous', 1246491797,
'33209c4d7a09be4eb1d6ff18e00f8548', '1.0',
next='http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(signature, 'kFAgTTFDIWz4/xAabIlrcZZMTq8=')
class OAuthRsaTokenTests(unittest.TestCase):
def test_generate_rsa_signature(self):
request = atom.http_core.HttpRequest(
'https://www.google.com/accounts/OAuthGetRequestToken?'
'scope=http%3A%2F%2Fwww.blogger.com%2Ffeeds%2F', 'GET')
signature = gdata.gauth.generate_rsa_signature(
request, 'anonymous', PRIVATE_TEST_KEY, '1246491360',
'c0155b3f28697c029e7a62efff44bd46', '1.0',
next='http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(
signature,
'bfMantdttKaTrwoxU87JiXmMeXhAiXPiq79a5XmLlOYwwlX06Pu7CafMp7hW1fPeZtL'
'4o9Sz3NvPI8GECCaZk7n5vi1EJ5/wfIQbddrC8j45joBG6gFSf4tRJct82dSyn6bd71'
'knwPZH1sKK46Y0ePJvEIDI3JDd7pRZuMM2sN8=')
class OAuth2TokenTests(unittest.TestCase):
def test_generate_authorize_url(self):
token = gdata.gauth.OAuth2Token('clientId', 'clientSecret',
'https://www.google.com/calendar/feeds',
'userAgent')
url = token.generate_authorize_url()
self.assertEqual(url,
'https://accounts.google.com/o/oauth2/auth?scope=https%3A%2F%2Fwww.google'
'.com%2Fcalendar%2Ffeeds&redirect_uri=oob&response_type=code&client_id='
'clientId')
url = token.generate_authorize_url('https://www.example.com/redirect', 'token')
self.assertEqual(url,
'https://accounts.google.com/o/oauth2/auth?scope=https%3A%2F%2Fwww.google'
'.com%2Fcalendar%2Ffeeds&redirect_uri=https%3A%2F%2Fwww.example.com%2F'
'redirect&response_type=token&client_id=clientId')
def test_modify_request(self):
token = gdata.gauth.OAuth2Token('clientId', 'clientSecret',
'https://www.google.com/calendar/feeds',
'userAgent', access_token='accessToken')
request = atom.http_core.HttpRequest()
token.modify_request(request)
self.assertEqual(request.headers['Authorization'], 'OAuth accessToken')
class OAuthHeaderTest(unittest.TestCase):
def test_generate_auth_header(self):
header = gdata.gauth.generate_auth_header(
'consumerkey', 1234567890, 'mynonce', 'unknown_sig_type', 'sig')
self.assert_(header.startswith('OAuth'))
self.assert_(header.find('oauth_nonce="mynonce"') > -1)
self.assert_(header.find('oauth_timestamp="1234567890"') > -1)
self.assert_(header.find('oauth_consumer_key="consumerkey"') > -1)
self.assert_(
header.find('oauth_signature_method="unknown_sig_type"') > -1)
self.assert_(header.find('oauth_version="1.0"') > -1)
self.assert_(header.find('oauth_signature="sig"') > -1)
header = gdata.gauth.generate_auth_header(
'consumer/key', 1234567890, 'ab%&33', '', 'ab/+-_=')
self.assert_(header.find('oauth_nonce="ab%25%2633"') > -1)
self.assert_(header.find('oauth_consumer_key="consumer%2Fkey"') > -1)
self.assert_(header.find('oauth_signature_method=""') > -1)
self.assert_(header.find('oauth_signature="ab%2F%2B-_%3D"') > -1)
class OAuthGetRequestToken(unittest.TestCase):
def test_request_hmac_request_token(self):
request = gdata.gauth.generate_request_for_request_token(
'anonymous', gdata.gauth.HMAC_SHA1,
['http://www.blogger.com/feeds/',
'http://www.google.com/calendar/feeds/'],
consumer_secret='anonymous')
request_uri = str(request.uri)
self.assert_('http%3A%2F%2Fwww.blogger.com%2Ffeeds%2F' in request_uri)
self.assert_(
'http%3A%2F%2Fwww.google.com%2Fcalendar%2Ffeeds%2F' in request_uri)
auth_header = request.headers['Authorization']
self.assert_('oauth_consumer_key="anonymous"' in auth_header)
self.assert_('oauth_signature_method="HMAC-SHA1"' in auth_header)
self.assert_('oauth_version="1.0"' in auth_header)
self.assert_('oauth_signature="' in auth_header)
self.assert_('oauth_nonce="' in auth_header)
self.assert_('oauth_timestamp="' in auth_header)
def test_request_rsa_request_token(self):
request = gdata.gauth.generate_request_for_request_token(
'anonymous', gdata.gauth.RSA_SHA1,
['http://www.blogger.com/feeds/',
'http://www.google.com/calendar/feeds/'],
rsa_key=PRIVATE_TEST_KEY)
request_uri = str(request.uri)
self.assert_('http%3A%2F%2Fwww.blogger.com%2Ffeeds%2F' in request_uri)
self.assert_(
'http%3A%2F%2Fwww.google.com%2Fcalendar%2Ffeeds%2F' in request_uri)
auth_header = request.headers['Authorization']
self.assert_('oauth_consumer_key="anonymous"' in auth_header)
self.assert_('oauth_signature_method="RSA-SHA1"' in auth_header)
self.assert_('oauth_version="1.0"' in auth_header)
self.assert_('oauth_signature="' in auth_header)
self.assert_('oauth_nonce="' in auth_header)
self.assert_('oauth_timestamp="' in auth_header)
def test_extract_token_from_body(self):
body = ('oauth_token=4%2F5bNFM_efIu3yN-E9RrF1KfZzOAZG&oauth_token_secret='
'%2B4O49V9WUOkjXgpOobAtgYzy&oauth_callback_confirmed=true')
token, secret = gdata.gauth.oauth_token_info_from_body(body)
self.assertEqual(token, '4/5bNFM_efIu3yN-E9RrF1KfZzOAZG')
self.assertEqual(secret, '+4O49V9WUOkjXgpOobAtgYzy')
def test_hmac_request_token_from_body(self):
body = ('oauth_token=4%2F5bNFM_efIu3yN-E9RrF1KfZzOAZG&oauth_token_secret='
'%2B4O49V9WUOkjXgpOobAtgYzy&oauth_callback_confirmed=true')
request_token = gdata.gauth.hmac_token_from_body(body, 'myKey',
'mySecret', True)
self.assertEqual(request_token.consumer_key, 'myKey')
self.assertEqual(request_token.consumer_secret, 'mySecret')
self.assertEqual(request_token.token, '4/5bNFM_efIu3yN-E9RrF1KfZzOAZG')
self.assertEqual(request_token.token_secret, '+4O49V9WUOkjXgpOobAtgYzy')
self.assertEqual(request_token.auth_state, gdata.gauth.REQUEST_TOKEN)
def test_rsa_request_token_from_body(self):
body = ('oauth_token=4%2F5bNFM_efIu3yN-E9RrF1KfZzOAZG&oauth_token_secret='
'%2B4O49V9WUOkjXgpOobAtgYzy&oauth_callback_confirmed=true')
request_token = gdata.gauth.rsa_token_from_body(body, 'myKey',
'rsaKey', True)
self.assertEqual(request_token.consumer_key, 'myKey')
self.assertEqual(request_token.rsa_private_key, 'rsaKey')
self.assertEqual(request_token.token, '4/5bNFM_efIu3yN-E9RrF1KfZzOAZG')
self.assertEqual(request_token.token_secret, '+4O49V9WUOkjXgpOobAtgYzy')
self.assertEqual(request_token.auth_state, gdata.gauth.REQUEST_TOKEN)
class OAuthAuthorizeToken(unittest.TestCase):
def test_generate_authorization_url(self):
url = gdata.gauth.generate_oauth_authorization_url('/+=aosdpikk')
self.assert_(str(url).startswith(
'https://www.google.com/accounts/OAuthAuthorizeToken'))
self.assert_('oauth_token=%2F%2B%3Daosdpikk' in str(url))
def test_extract_auth_token(self):
url = ('http://www.example.com/test?oauth_token='
'CKF50YzIHxCT85KMAg&oauth_verifier=123zzz')
token = gdata.gauth.oauth_token_info_from_url(url)
self.assertEqual(token[0], 'CKF50YzIHxCT85KMAg')
self.assertEqual(token[1], '123zzz')
class FindScopesForService(unittest.TestCase):
def test_find_all_scopes(self):
count = 0
for key, scopes in gdata.gauth.AUTH_SCOPES.iteritems():
count += len(scopes)
self.assertEqual(count, len(gdata.gauth.find_scopes_for_services()))
def test_single_service(self):
self.assertEqual(
gdata.gauth.FindScopesForServices(('codesearch',)),
['http://www.google.com/codesearch/feeds/'])
def test_multiple_services(self):
self.assertEqual(
set(gdata.gauth.find_scopes_for_services(('jotspot', 'wise'))),
set(['http://sites.google.com/feeds/',
'https://sites.google.com/feeds/',
'https://spreadsheets.google.com/feeds/']))
def suite():
return conf.build_suite([AuthSubTest, TokensToAndFromBlobsTest,
OAuthHmacTokenTests, OAuthRsaTokenTests,
OAuthHeaderTest, OAuthGetRequestToken,
OAuthAuthorizeToken, FindScopesForService])
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | -3,943,616,419,657,142,300 | 45.172673 | 83 | 0.687295 | false |
yasserglez/pytiger2c | packages/pytiger2c/ast/__init__.py | 1 | 4014 | # -*- coding: utf-8 -*-
"""
Definición de los nodos del árbol de sintáxis abstracta.
"""
from pytiger2c.ast.assignmentnode import AssignmentNode
from pytiger2c.ast.ifthenstatementnode import IfThenStatementNode
from pytiger2c.ast.whilestatementnode import WhileStatementNode
from pytiger2c.ast.forstatementnode import ForStatementNode
from pytiger2c.ast.breakstatementnode import BreakStatementNode
from pytiger2c.ast.typedeclarationgroupnode import TypeDeclarationGroupNode
from pytiger2c.ast.functiondeclarationgroupnode import FunctionDeclarationGroupNode
from pytiger2c.ast.functiondeclarationnode import FunctionDeclarationNode
from pytiger2c.ast.proceduredeclarationnode import ProcedureDeclarationNode
from pytiger2c.ast.inferredvariabledeclarationnode import InferredVariableDeclarationNode
from pytiger2c.ast.staticvariabledeclarationnode import StaticVariableDeclarationNode
from pytiger2c.ast.aliastypedeclarationnode import AliasTypeDeclarationNode
from pytiger2c.ast.recorddeclarationnode import RecordDeclarationNode
from pytiger2c.ast.arraydeclarationnode import ArrayDeclarationNode
from pytiger2c.ast.nilexpressionnode import NilExpressionNode
from pytiger2c.ast.integerliteralexpressionnode import IntegerLiteralExpressionNode
from pytiger2c.ast.stringliteralexpressionnode import StringLiteralExpressionNode
from pytiger2c.ast.arrayliteralexpressionnode import ArrayLiteralExpressionNode
from pytiger2c.ast.recordliteralexpressionnode import RecordLiteralExpressionNode
from pytiger2c.ast.ifthenelsestatementnode import IfThenElseStatementNode
from pytiger2c.ast.functioncallnode import FunctionCallNode
from pytiger2c.ast.letnode import LetNode
from pytiger2c.ast.expressionsequencenode import ExpressionSequenceNode
from pytiger2c.ast.variableaccessnode import VariableAccessNode
from pytiger2c.ast.recordaccessnode import RecordAccessNode
from pytiger2c.ast.arrayaccessnode import ArrayAccessNode
from pytiger2c.ast.unaryminusoperatornode import UnaryMinusOperatorNode
from pytiger2c.ast.plusoperatornode import PlusOperatorNode
from pytiger2c.ast.minusoperatornode import MinusOperatorNode
from pytiger2c.ast.timesoperatornode import TimesOperatorNode
from pytiger2c.ast.divideoperatornode import DivideOperatorNode
from pytiger2c.ast.equalsoperatornode import EqualsOperatorNode
from pytiger2c.ast.notequalsoperatornode import NotEqualsOperatorNode
from pytiger2c.ast.lessthanoperatornode import LessThanOperatorNode
from pytiger2c.ast.lessequalsthanoperatornode import LessEqualsThanOperatorNode
from pytiger2c.ast.greaterthanoperatornode import GreaterThanOperatorNode
from pytiger2c.ast.greaterequalsthanoperatornode import GreaterEqualsThanOperatorNode
from pytiger2c.ast.andoperatornode import AndOperatorNode
from pytiger2c.ast.oroperatornode import OrOperatorNode
# Members that should be imported when "from pytiger2c.ast import *" is used.
__all__ = [
'AssignmentNode',
'IfThenStatementNode',
'IfThenElseStatementNode',
'WhileStatementNode',
'BreakStatementNode',
'PlusOperatorNode',
'MinusOperatorNode',
'TimesOperatorNode',
'DivideOperatorNode',
'EqualsOperatorNode',
'NotEqualsOperatorNode',
'LessThanOperatorNode',
'LessEqualsThanOperatorNode',
'GreaterThanOperatorNode',
'GreaterEqualsThanOperatorNode',
'AndOperatorNode',
'OrOperatorNode',
'UnaryMinusOperatorNode',
'NilExpressionNode',
'IntegerLiteralExpressionNode',
'StringLiteralExpressionNode',
'ExpressionSequenceNode',
'ForStatementNode',
'AliasTypeDeclarationNode',
'InferredVariableDeclarationNode',
'StaticVariableDeclarationNode',
'VariableAccessNode',
'ArrayAccessNode',
'RecordAccessNode',
'TypeDeclarationGroupNode',
'FunctionDeclarationGroupNode',
'FunctionCallNode',
'ArrayDeclarationNode',
'LetNode',
'RecordDeclarationNode',
'RecordLiteralExpressionNode',
'ArrayLiteralExpressionNode',
'ProcedureDeclarationNode',
'FunctionDeclarationNode',
]
| mit | -3,014,361,298,434,665,500 | 44.067416 | 89 | 0.84717 | false |
Ultimaker/Cura | scripts/line_length_checker.py | 1 | 2386 | import re
import sys
def getValue(line: str, key: str, default = None):
"""Convenience function that finds the value in a line of g-code.
When requesting key = x from line "G1 X100" the value 100 is returned.
"""
if not key in line or (';' in line and line.find(key) > line.find(';')):
return default
sub_part = line[line.find(key) + 1:]
m = re.search('^-?[0-9]+\.?[0-9]*', sub_part)
if m is None:
return default
try:
return int(m.group(0))
except ValueError: #Not an integer.
try:
return float(m.group(0))
except ValueError: #Not a number at all.
return default
def analyse(gcode, distance_to_report, print_layers = False):
lines_found = 0
previous_x = 0
previous_y = 0
dist_squared = distance_to_report * distance_to_report
current_layer = 0
for line in gcode.split("\n"):
if not line.startswith("G1"):
if line.startswith(";LAYER:"):
previous_x = 0
previous_y = 0
current_layer += 1
continue
current_x = getValue(line, "X")
current_y = getValue(line, "Y")
if current_x is None or current_y is None:
continue
diff_x = current_x - previous_x
diff_y = current_y - previous_y
if diff_x * diff_x + diff_y * diff_y < dist_squared:
lines_found += 1
if print_layers:
print("[!] ", distance_to_report, " layer ", current_layer, " ", previous_x, previous_y)
previous_y = current_y
previous_x = current_x
return lines_found
def loadAndPrettyPrint(file_name):
print(file_name.replace(".gcode",""))
with open(file_name) as f:
data = f.read()
print("| Line length | Num segments |")
print("| ------------- | ------------- |")
print("| 1 |", analyse(data, 1), "|")
print("| 0.5 |", analyse(data, 0.5), "|")
print("| 0.1 |", analyse(data, 0.1), "|")
print("| 0.05 |", analyse(data, 0.05), "|")
print("| 0.01 |", analyse(data, 0.01), "|")
print("| 0.005 |", analyse(data, 0.005), "|")
print("| 0.001 |", analyse(data, 0.001), "|")
if __name__ == "__main__":
if len(sys.argv) != 2 :
print("Usage: <input g-code>")
sys.exit(1)
in_filename = sys.argv[1]
loadAndPrettyPrint(sys.argv[1])
| lgpl-3.0 | -3,099,304,207,897,882,000 | 33.085714 | 104 | 0.533529 | false |
lnielsen/invenio | invenio/legacy/goto/webinterface.py | 2 | 3517 | ## This file is part of Invenio.
## Copyright (C) 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Implements persistent URLs
"""
import inspect
from invenio.config import CFG_SITE_URL
from invenio.ext.legacy.handler import WebInterfaceDirectory
from invenio.ext.logging import register_exception
from invenio.legacy.webuser import collect_user_info
from invenio.modules.redirector.api import get_redirection_data
from invenio.modules.redirector.registry import get_redirect_method
from invenio.utils.apache import SERVER_RETURN, HTTP_NOT_FOUND
from invenio.utils.url import redirect_to_url
class WebInterfaceGotoPages(WebInterfaceDirectory):
def _lookup(self, component, path):
try:
redirection_data = get_redirection_data(component)
goto_plugin = get_redirect_method(redirection_data['plugin'])
args, dummy_varargs, dummy_varkw, defaults = inspect.getargspec(goto_plugin)
args = args and list(args) or []
args.reverse()
defaults = defaults and list(defaults) or []
defaults.reverse()
params_to_pass = {}
for arg, default in map(None, args, defaults):
params_to_pass[arg] = default
def goto_handler(req, form):
## Let's put what is in the GET query
for key, value in dict(form).items():
if key in params_to_pass:
params_to_pass[key] = str(value)
## Let's override the params_to_pass to the call with the
## arguments in the configuration
configuration_parameters = redirection_data['parameters'] or {}
params_to_pass.update(configuration_parameters)
## Let's add default parameters if the plugin expects them
if 'component' in params_to_pass:
params_to_pass['component'] = component
if 'path' in params_to_pass:
params_to_pass['path'] = path
if 'user_info' in params_to_pass:
params_to_pass['user_info'] = collect_user_info(req)
if 'req' in params_to_pass:
params_to_pass['req'] = req
try:
new_url = goto_plugin(**params_to_pass)
except Exception as err:
register_exception(req=req, alert_admin=True)
raise SERVER_RETURN(HTTP_NOT_FOUND)
if new_url:
if new_url.startswith('/'):
new_url = CFG_SITE_URL + new_url
redirect_to_url(req, new_url)
else:
raise SERVER_RETURN(HTTP_NOT_FOUND)
return goto_handler, []
except ValueError:
return None, []
| gpl-2.0 | 7,060,681,742,422,500,000 | 42.419753 | 88 | 0.610179 | false |
wutron/dlcoal | dlcoal/deps/rasmus/textdraw.py | 4 | 2080 | # python libs
import sys
# rasmus libs
from rasmus import util
class TextCanvas:
"""Draw ascii art on a automatically growing matrix"""
def __init__(self, default=' '):
self.mat = util.Dict(dim=2, default=default)
self.default = default
def set(self, x, y, char):
self.mat[int(y)][int(x)] = char
def line(self, x1, y1, x2, y2, char='*'):
# swap coords if needed
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
nsamples = int(max(x2 - x1, y2 - y1, 1))
dx = (x2 - x1) / float(nsamples)
dy = (y2 - y1) / float(nsamples)
for i in xrange(nsamples):
self.set(x1 + i*dx, y1 + i*dy, char)
def text(self, x, y, text, dir="horizontal", width=10000):
x2 = 0
y2 = 0
if dir == "horizontal":
for i in xrange(len(text)):
if text[i] == "\n":
x2 = 0
y2 += 1
elif x2 < width:
x2 += 1
self.set(x+x2, y+y2, text[i])
elif dir == "vertical":
for i in xrange(len(text)):
if text[i] == "\n" or x2 > width:
y2 = 0
x2 += 1
elif x2 < width:
y2 += 1
self.set(x+x2, y+y2, text[i])
else:
raise Exception("unknown text direction '%s'" % dir)
def display(self, out=sys.stdout):
ykeys = util.sort(self.mat.keys())
y = min(ykeys)
for ykey in ykeys:
while y < ykey:
y += 1
out.write("\n")
row = self.mat[ykey]
xkeys = util.sort(row.keys())
x = 0
for xkey in xkeys:
while x < xkey:
x += 1
out.write(self.default)
out.write(row[xkey])
x += 1
out.write("\n")
| gpl-2.0 | 1,567,965,880,281,946,000 | 25.329114 | 64 | 0.402885 | false |
hub-cap/lady-rainicorn | rainicorn/openstack/common/loopingcall.py | 1 | 4681 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from eventlet import event
from eventlet import greenthread
from rainicorn.openstack.common.gettextutils import _ # noqa
from rainicorn.openstack.common import log as logging
from rainicorn.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = timeutils.utcnow()
self.f(*self.args, **self.kw)
end = timeutils.utcnow()
if not self._running:
break
delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') %
-delay)
greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
# TODO(mikal): this class name is deprecated in Havana and should be removed
# in the I release
LoopingCall = FixedIntervalLoopingCall
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f '
'seconds'), idle)
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done | apache-2.0 | -6,199,461,647,888,024,000 | 30.85034 | 78 | 0.58919 | false |
jonasjberg/autonameow | tests/devscripts/test_find_todo_comments.py | 1 | 21275 | # -*- coding: utf-8 -*-
# Copyright(c) 2016-2020 Jonas Sjöberg <[email protected]>
# Source repository: https://github.com/jonasjberg/autonameow
#
# This file is part of autonameow.
#
# autonameow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# autonameow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with autonameow. If not, see <http://www.gnu.org/licenses/>.
from unittest import TestCase
from devscripts.find_todo_comments import find_todo_comments_in_text
class TestFindTodoCommentsInText(TestCase):
def test_returns_empty_list_given_empty_text(self):
actual = find_todo_comments_in_text('')
self.assertFalse(actual)
def _assert_finds(self, expected, given_text):
actual = find_todo_comments_in_text(given_text)
self.assertEqual(expected, actual)
def test_single_line_without_todo(self):
actual = find_todo_comments_in_text('foo')
self.assertEqual(list(), actual)
def test_single_line_with_single_todo(self):
actual = find_todo_comments_in_text('# TODO: foo\n')
self.assertEqual(['# TODO: foo'], actual)
def test_multiple_lines_with_single_todo(self):
actual = find_todo_comments_in_text('\nfoobar\n\n# TODO: foo\n')
self.assertEqual(['# TODO: foo'], actual)
def test_hack_todo_single_line(self):
self.skipTest('TODO')
self._assert_finds(
expected=[
'# TODO: [hack] Fix failing regression test 9017 properly!',
],
given_text='''
# TODO: [hack] Fix failing regression test 9017 properly!
''')
def test_hack_todo_multiple_lines(self):
self._assert_finds(
expected=[
'# TODO: [hack] Sorting is reversed so that,\n'
'# ..lower values in the "year" field.\n'
'# This applies only when foo are bar.'
],
given_text='''
# TODO: [hack] Sorting is reversed so that,
# ..lower values in the "year" field.
# This applies only when foo are bar.
''')
def test_hack_todo_multiple_lines_and_other_todo(self):
self._assert_finds(
expected=[
'# TODO: [hack] Sorting is reversed so that,\n'
'# ..lower values in the "year" field.\n'
'# This applies only when whatever ..',
'# TODO: foo bar',
],
given_text='''
# TODO: [hack] Sorting is reversed so that,
# ..lower values in the "year" field.
# This applies only when whatever ..
# TODO: foo bar
''')
def test_temporary_whole_file(self):
text = '''#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright(c) 2016-2020 Jonas Sjöberg <[email protected]>
# Source repository: https://github.com/jonasjberg/autonameow
#
# This file is part of autonameow.
#
# autonameow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# autonameow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with autonameow. If not, see <http://www.gnu.org/licenses/>.
import logging
import sys
import time
from core import config
from core import constants as C
from core import event
from core import exceptions
from core import FileObject
from core import interactive
from core import logs
from core import master_provider
from core import persistence
from core.context import FileContext
from core.datastore import repository
from core.namebuilder import FilenamePostprocessor
from core.renamer import FileRenamer
from util import disk
from util import encoding as enc
from util import process
log = logging.getLogger(__name__)
class Autonameow(object):
"""
Main class to manage a running "autonameow" instance.
"""
def __init__(self, opts, ui, file_renamer=FileRenamer):
"""
Main program entry point. Initializes a autonameow instance/session.
Args:
opts: Dict with parsed and validated options.
"""
assert isinstance(opts, dict)
self.opts = check_option_combinations(opts)
# Package in 'autonameow/core/view' or equivalent interface.
self.ui = ui
# For calculating the total runtime.
self.start_time = time.time()
self.config = None
self.postprocessor = None
self.renamer = file_renamer(
dry_run=self.opts.get('dry_run'),
timid=self.opts.get('mode_timid')
)
self._exit_code = C.EXIT_SUCCESS
def __enter__(self):
self._dispatch_event_on_startup()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._dispatch_event_on_shutdown()
def _dispatch_event_on_startup(self):
# Send "global" startup call to all registered listeners.
event.dispatcher.on_startup(autonameow_instance=self)
def _dispatch_event_on_shutdown(self):
# Send "global" shutdown call to all registered listeners.
event.dispatcher.on_shutdown(autonameow_instance=self)
def run(self):
if self.opts.get('quiet'):
self.ui.silence()
# Display various information depending on verbosity level.
if self.opts.get('verbose') or self.opts.get('debug'):
self.ui.print_start_info()
# Display startup banner with program version and exit.
if self.opts.get('show_version'):
self.ui.print_version_info(verbose=self.opts.get('verbose'))
self.exit_program(C.EXIT_SUCCESS)
# Check the configuration file.
# If a specific config path was not passed in with the options
# and no config file is found in the OS-specific default paths,
# write the default "example" config, tell the user and exit.
if self.opts.get('config_path'):
self._load_config_from_options_config_path()
else:
filepath_default_config = persistence.DefaultConfigFilePath
if persistence.has_config_file():
self._load_config_from_path(filepath_default_config)
else:
log.info('No configuration file was found.')
self._write_example_config_to_path(filepath_default_config)
self.exit_program(C.EXIT_SUCCESS)
if not self.config:
log.critical('Unable to load configuration --- Aborting ..')
self.exit_program(C.EXIT_ERROR)
# Dispatch configuration change event.
config.set_global_configuration(self.config)
if self.opts.get('dump_options'):
self._dump_options()
if self.opts.get('dump_config'):
# TODO: [TD0148] Fix '!!python/object' in '--dump-config' output.
self._dump_active_config_and_exit()
if self.opts.get('dump_meowuris'):
self._dump_registered_meowuris()
# Initialize a re-usable post-processor.
# TODO: Improve access of nested configuration values.
self.postprocessor = FilenamePostprocessor(
lowercase_filename=self.config.get(['POST_PROCESSING', 'lowercase_filename']),
uppercase_filename=self.config.get(['POST_PROCESSING', 'uppercase_filename']),
regex_replacements=self.config.get(['POST_PROCESSING', 'replacements']),
simplify_unicode=self.config.get(['POST_PROCESSING', 'simplify_unicode'])
)
# Abort if input paths are missing.
if not self.opts.get('input_paths'):
log.warning('No input files specified ..')
self.exit_program(C.EXIT_SUCCESS)
# Path name encoding boundary. Returns list of paths in internal format.
files_to_process = self._collect_paths_from_opts()
log.info('Got %s files to process', len(files_to_process))
# Handle any input paths/files.
self._handle_files(files_to_process)
stats = 'Processed {t} files. Renamed {r} Skipped {s} ' \
'Failed {f}'.format(t=len(files_to_process),
r=self.renamer.stats['renamed'],
s=self.renamer.stats['skipped'],
f=self.renamer.stats['failed'])
log.info(stats)
self.exit_program(self.exit_code)
def _collect_paths_from_opts(self):
path_collector = disk.PathCollector(
ignore_globs=self.config.options['FILESYSTEM']['ignore'],
recurse=self.opts.get('recurse_paths')
)
path_collector.collect_from(self.opts.get('input_paths'))
for error in path_collector.errors:
log.warning(str(error))
return list(path_collector.filepaths)
@logs.log_func_runtime(log)
def load_config(self, path):
try:
self.config = persistence.load_config_from_file(path)
except exceptions.ConfigError as e:
log.critical('Unable to load configuration --- %s', e)
def _dump_options(self):
filepath_config = persistence.get_config_persistence_path()
filepath_default_config = persistence.DefaultConfigFilePath
include_opts = {
'config_filepath': '"{!s}"'.format(
enc.displayable_path(filepath_default_config)
),
'cache_directory_path': '"{!s}"'.format(
enc.displayable_path(filepath_config)
)
}
self.ui.options.prettyprint_options(self.opts, include_opts)
def _dump_active_config_and_exit(self):
self.ui.msg('Active Configuration:', style='heading')
self.ui.msg(str(self.config))
self.exit_program(C.EXIT_SUCCESS)
def _dump_registered_meowuris(self):
if self.opts.get('verbose'):
cf_registered = self.ui.ColumnFormatter()
cf_excluded = self.ui.ColumnFormatter()
for uri, klass in sorted(master_provider.Registry.meowuri_sources.items()):
cf_registered.addrow(str(uri), str(klass.name()))
for klass in sorted(master_provider.Registry.excluded_providers):
cf_excluded.addrow(str(klass.name()))
str_registered_providers = str(cf_registered)
str_excluded_providers = str(cf_excluded)
self.ui.msg('Registered MeowURIs', style='heading')
self.ui.msg(str_registered_providers)
self.ui.msg('Providers Excluded (unmet dependencies)', style='heading')
self.ui.msg(str_excluded_providers)
else:
meowuris = sorted(master_provider.Registry.mapped_meowuris)
self.ui.msg('\n'.join(str(m) for m in meowuris))
self.ui.msg('\n')
def _load_config_from_path(self, filepath):
str_filepath = enc.displayable_path(filepath)
log.info('Using configuration: "%s"', str_filepath)
self.load_config(filepath)
def _load_config_from_options_config_path(self):
filepath = self.opts.get('config_path')
self._load_config_from_path(filepath)
def _write_example_config_to_path(self, filepath):
str_filepath = enc.displayable_path(filepath)
try:
persistence.write_default_config()
except exceptions.ConfigError as e:
log.critical('Unable to write template configuration file to path: '
'"%s" --- %s', str_filepath, e)
self.exit_program(C.EXIT_ERROR)
message = 'Wrote default configuration file to "{!s}"'.format(str_filepath)
self.ui.msg(message, style='info')
def _handle_files(self, filepath):
"""
Main loop. Iterate over input paths/files.
Assume all state is setup and completely reset for each loop iteration.
It is not currently possible to share "information" between runs.
"""
aggregate_repository_contents = list()
should_list_all = self.opts.get('list_all')
for filepath in filepaths:
str_filepath = enc.displayable_path(filepath)
log.info('Processing: "%s"', str_filepath)
try:
current_file = FileObject(filepath)
except (exceptions.InvalidFileArgumentError,
exceptions.FilesystemError) as e:
log.warning('{%s} --- SKIPPING: "%s"', e, str_filepath)
continue
if should_list_all:
log.debug('Calling provider.delegate_every_possible_meowuri()')
master_provider.delegate_every_possible_meowuri(current_file)
if self.opts.get('mode_postprocess_only'):
new_name = str(current_file)
else:
context = FileContext(
fileobject=current_file,
ui=self.ui,
autonameow_exit_code=self.exit_code,
options=self.opts,
active_config=self.config,
masterprovider=master_provider
)
try:
new_name = context.find_new_name()
except exceptions.AutonameowException as e:
log.critical('%s --- SKIPPING: "%s"', e, str_filepath)
self.exit_code = C.EXIT_WARNING
continue
# TODO: [TD0153] Detect and clean up incrementally numbered files.
# TODO: [TD0154] Add "incrementing counter" template placeholder.
# TODO: Check destination path exists at a somewhat high level in
# order to trigger routines to handle [TD0153] and [TD0154].
if new_name:
postprocessed_new_name = self._do_post_processing(new_name)
self.renamer.add_pending(
from_path=current_file.abspath,
new_basename=postprocessed_new_name,
)
for filename_delta in self.renamer.skipped:
message = (
'Skipped "{!s}" because the current name is the same as '
'the new name'.format(filename_delta.displayable_old)
)
self.ui.msg(message)
for filename_delta in self.renamer.needs_confirmation:
# TODO: [cleanup] The renamer is a mess. Why pass 'timid'?
log.debug('Timid mode enabled. Asking user to confirm ..')
if interactive.ask_confirm_rename(filename_delta.displayable_old,
filename_delta.displayable_new):
self.renamer.confirm(filename_delta)
else:
self.renamer.reject(filename_delta)
# TODO: Display renames as they actually happen?
for filename_delta in self.renamer.pending:
self.ui.msg_rename(
filename_delta.displayable_old,
filename_delta.displayable_new,
dry_run=self.opts.get('dry_run')
)
try:
self.renamer.do_renames()
except exceptions.FilesystemError as e:
log.error('Rename FAILED: %s', e)
# TODO: [TD0131] Hack!
aggregate_repository_contents.append(str(repository.SessionRepository))
# TODO: [TD0131] Limit repository size! Do not remove everything!
# TODO: [TD0131] Keep all but very bulky data like extracted text.
repository.SessionRepository.remove(current_file)
if should_list_all:
if not aggregate_repository_contents:
str_repo = 'The session repository does not contain any data ..\n'
else:
str_repo = '\n'.join(aggregate_repository_contents)
self.ui.msg('Session Repository Data', style='heading')
self.ui.msg(str_repo)
def _do_post_processing(self, filename):
before = str(filename)
postprocessed_filename = self.postprocessor(filename)
after = str(postprocessed_filename)
if before != after:
self.ui.msg_filename_replacement(before, after)
return postprocessed_filename
@property
def runtime_seconds(self):
return time.time() - self.start_time
def exit_program(self, exit_code_value):
"""
Main program exit point. Shuts down this autonameow instance/session.
Args:
exit_code_value: Integer exit code to pass to the parent process.
Indicate success with 0, failure non-zero.
"""
self.exit_code = exit_code_value
elapsed_time = self.runtime_seconds
if self.opts and self.opts.get('verbose'):
self.ui.print_exit_info(self.exit_code, elapsed_time)
logs.log_previously_logged_runtimes(log)
log.debug('Exiting with exit code: %s', self.exit_code)
log.debug('Total execution time: %.6f seconds', elapsed_time)
self._dispatch_event_on_shutdown()
sys.exit(self.exit_code)
@property
def exit_code(self):
"""
Returns:
The current exit code for this autonameow instance as an integer.
"""
return self._exit_code
@exit_code.setter
def exit_code(self, value):
"""
Updates the exit code value for this autonameow instance.
The exit code is only actually updated if the given value is greater
than the current value. This makes errors take precedence over warnings.
Args:
value: Optional new exit status as an integer, preferably one of
the values in 'constants.py' prefixed 'EXIT_'.
"""
if isinstance(value, int) and value > self._exit_code:
log.debug('Exit code updated: %s -> %s', self._exit_code, value)
self._exit_code = value
def __hash__(self):
return hash((process.current_process_id(), self.start_time))
def check_option_combinations(options):
opts = dict(options)
# TODO: [cleanup] This is pretty messy ..
# Check legality of option combinations.
if opts.get('mode_automagic') and opts.get('mode_interactive'):
log.warning('Operating mode must be either one of "automagic" or '
'"interactive", not both. Reverting to default: '
'[interactive mode].')
opts['mode_automagic'] = False
opts['mode_interactive'] = True
if opts.get('mode_batch'):
if opts.get('mode_interactive'):
log.warning('Operating mode "batch" can not be used with '
'"interactive". Disabling "interactive"..')
opts['mode_interactive'] = False
if opts.get('mode_interactive'):
if opts.get('mode_timid'):
log.warning('Operating mode "interactive" implies "timid". '
'Disabling "timid"..')
opts['mode_timid'] = False
if opts.get('mode_postprocess_only'):
# Do not figure out a new name; do "post-processing" on existing.
if opts.get('mode_automagic'):
log.warning('Operating mode "automagic" can not be used with '
'"post-process only". Disabling "automagic".')
opts['mode_automagic'] = False
if opts.get('mode_interactive'):
log.warning('Operating mode "interactive" can not be used with '
'"post-process only". Disabling "interactive".')
opts['mode_interactive'] = False
return opts
'''
self._assert_finds(
expected=[
"# TODO: [TD0148] Fix '!!python/object' in '--dump-config' output.",
'# TODO: Improve access of nested configuration values.',
'# TODO: [TD0153] Detect and clean up incrementally numbered files.',
'# TODO: [TD0154] Add "incrementing counter" template placeholder.',
'# TODO: Check destination path exists at a somewhat high level in\n'
'# order to trigger routines to handle [TD0153] and [TD0154].',
"# TODO: [cleanup] The renamer is a mess. Why pass 'timid'?",
'# TODO: Display renames as they actually happen?',
'# TODO: [TD0131] Hack!',
'# TODO: [TD0131] Limit repository size! Do not remove everything!',
'# TODO: [TD0131] Keep all but very bulky data like extracted text.',
'# TODO: [cleanup] This is pretty messy ..',
],
given_text=text
)
| gpl-2.0 | -4,907,480,109,364,437,000 | 37.678182 | 90 | 0.598505 | false |
LinuxChristian/home-assistant | homeassistant/components/fan/isy994.py | 2 | 4317 | """
Support for ISY994 fans.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/fan.isy994/
"""
import logging
from typing import Callable
from homeassistant.components.fan import (FanEntity, DOMAIN, SPEED_OFF,
SPEED_LOW, SPEED_MEDIUM,
SPEED_HIGH)
import homeassistant.components.isy994 as isy
from homeassistant.const import STATE_UNKNOWN, STATE_ON, STATE_OFF
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
# Define term used for medium speed. This must be set as the fan component uses
# 'medium' which the ISY does not understand
ISY_SPEED_MEDIUM = 'med'
VALUE_TO_STATE = {
0: SPEED_OFF,
63: SPEED_LOW,
64: SPEED_LOW,
190: SPEED_MEDIUM,
191: SPEED_MEDIUM,
255: SPEED_HIGH,
}
STATE_TO_VALUE = {}
for key in VALUE_TO_STATE:
STATE_TO_VALUE[VALUE_TO_STATE[key]] = key
STATES = [SPEED_OFF, SPEED_LOW, ISY_SPEED_MEDIUM, SPEED_HIGH]
# pylint: disable=unused-argument
def setup_platform(hass, config: ConfigType,
add_devices: Callable[[list], None], discovery_info=None):
"""Set up the ISY994 fan platform."""
if isy.ISY is None or not isy.ISY.connected:
_LOGGER.error("A connection has not been made to the ISY controller")
return False
devices = []
for node in isy.filter_nodes(isy.NODES, states=STATES):
devices.append(ISYFanDevice(node))
for program in isy.PROGRAMS.get(DOMAIN, []):
try:
status = program[isy.KEY_STATUS]
actions = program[isy.KEY_ACTIONS]
assert actions.dtype == 'program', 'Not a program'
except (KeyError, AssertionError):
pass
else:
devices.append(ISYFanProgram(program.name, status, actions))
add_devices(devices)
class ISYFanDevice(isy.ISYDevice, FanEntity):
"""Representation of an ISY994 fan device."""
def __init__(self, node) -> None:
"""Initialize the ISY994 fan device."""
isy.ISYDevice.__init__(self, node)
@property
def speed(self) -> str:
"""Return the current speed."""
return self.state
@property
def state(self) -> str:
"""Get the state of the ISY994 fan device."""
return VALUE_TO_STATE.get(self.value, STATE_UNKNOWN)
def set_speed(self, speed: str) -> None:
"""Send the set speed command to the ISY994 fan device."""
if not self._node.on(val=STATE_TO_VALUE.get(speed, 0)):
_LOGGER.debug("Unable to set fan speed")
else:
self.speed = self.state
def turn_on(self, speed: str=None, **kwargs) -> None:
"""Send the turn on command to the ISY994 fan device."""
self.set_speed(speed)
def turn_off(self, **kwargs) -> None:
"""Send the turn off command to the ISY994 fan device."""
if not self._node.off():
_LOGGER.debug("Unable to set fan speed")
else:
self.speed = self.state
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
class ISYFanProgram(ISYFanDevice):
"""Representation of an ISY994 fan program."""
def __init__(self, name: str, node, actions) -> None:
"""Initialize the ISY994 fan program."""
ISYFanDevice.__init__(self, node)
self._name = name
self._actions = actions
self.speed = STATE_ON if self.is_on else STATE_OFF
@property
def state(self) -> str:
"""Get the state of the ISY994 fan program."""
return STATE_ON if bool(self.value) else STATE_OFF
def turn_off(self, **kwargs) -> None:
"""Send the turn on command to ISY994 fan program."""
if not self._actions.runThen():
_LOGGER.error("Unable to turn off the fan")
else:
self.speed = STATE_ON if self.is_on else STATE_OFF
def turn_on(self, **kwargs) -> None:
"""Send the turn off command to ISY994 fan program."""
if not self._actions.runElse():
_LOGGER.error("Unable to turn on the fan")
else:
self.speed = STATE_ON if self.is_on else STATE_OFF
| apache-2.0 | 6,836,543,730,617,045,000 | 31.216418 | 79 | 0.615474 | false |
zmywahrheit/PPB | PPB_calculate.py | 2 | 1499 |
###############Plasma Protein Binding (PPB) Prediction ###################
#Author:Mengyuan Zhu, Binghe Wang
#Email: [email protected]
#Department of Chemistry
#Georgia State University
#Usage: python PPB_calculate.py filename
##########################################################################
import pybel
from ctypes import *
import sys
import pybel
lib=CDLL("lib/ppblib.so")
inputfile=pybel.readfile(sys.argv[1].split(".")[-1],sys.argv[1])
value=()
for mol in inputfile:
descvalues=mol.calcdesc()
value= value+(descvalues.get('TPSA'),)
value= value+(descvalues.get('HBD'),)
value= value+(descvalues.get('logP'),)
value= value+(descvalues.get('MW'),)
value= value+(descvalues.get('tbonds'),)
value= value+(descvalues.get('nF'),)
value= value+(descvalues.get('bonds'),)
value= value+(descvalues.get('atoms'),)
value= value+(descvalues.get('HBA1'),)
value= value+(descvalues.get('HBA2'),)
value= value+(descvalues.get('sbonds'),)
value= value+(descvalues.get('dbonds'),)
value= value+(descvalues.get('MR'),)
value= value+(descvalues.get('abonds'),)
smarts = pybel.Smarts("[+]")
num=smarts.findall(mol)
value= value+(len(num),)
smarts = pybel.Smarts("[-]")
num=smarts.findall(mol)
value= value+(len(num),)
i=0
array_type=c_double*16
value_c=array_type()
while i<16:
value_c[i]=value[i]
i=i+1
function=lib.PPB_ann
function.restype=c_double
result= function(byref(value_c))
if result >100:
result=100
if result<0:
result=0
print round(result,2)
| gpl-2.0 | 636,421,285,610,724,700 | 22.793651 | 74 | 0.643763 | false |
mscherer/ansible | plugins/inventory/vagrant.py | 37 | 3660 | #!/usr/bin/env python
"""
Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and
returns it under the host group 'vagrant'
Example Vagrant configuration using this script:
config.vm.provision :ansible do |ansible|
ansible.playbook = "./provision/your_playbook.yml"
ansible.inventory_file = "./provision/inventory/vagrant.py"
ansible.verbose = true
end
"""
# Copyright (C) 2013 Mark Mandel <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Thanks to the spacewalk.py inventory script for giving me the basic structure
# of this.
#
import sys
import subprocess
import re
import string
from optparse import OptionParser
try:
import json
except:
import simplejson as json
# Options
#------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of Vagrant servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
#
# helper functions
#
# get all the ssh configs for all boxes in an array of dictionaries.
def get_ssh_config():
configs = []
boxes = list_running_boxes()
for box in boxes:
config = get_a_ssh_config(box)
configs.append(config)
return configs
#list all the running boxes
def list_running_boxes():
output = subprocess.check_output(["vagrant", "status"]).split('\n')
boxes = []
for line in output:
matcher = re.search("([^\s]+)[\s]+running \(.+", line)
if matcher:
boxes.append(matcher.group(1))
return boxes
#get the ssh config for a single box
def get_a_ssh_config(box_name):
"""Gives back a map of all the machine's ssh configurations"""
output = subprocess.check_output(["vagrant", "ssh-config", box_name]).split('\n')
config = {}
for line in output:
if line.strip() != '':
matcher = re.search("( )?([a-zA-Z]+) (.*)", line)
config[matcher.group(2)] = matcher.group(3)
return config
# List out servers that vagrant has running
#------------------------------
if options.list:
ssh_config = get_ssh_config()
hosts = { 'vagrant': []}
for data in ssh_config:
hosts['vagrant'].append(data['HostName'])
print json.dumps(hosts)
sys.exit(0)
# Get out the host details
#------------------------------
elif options.host:
result = {}
ssh_config = get_ssh_config()
details = filter(lambda x: (x['HostName'] == options.host), ssh_config)
if len(details) > 0:
#pass through the port, in case it's non standard.
result = details[0]
result['ansible_ssh_port'] = result['Port']
print json.dumps(result)
sys.exit(0)
# Print out help
#------------------------------
else:
parser.print_help()
sys.exit(0)
| gpl-3.0 | -3,744,634,004,048,204,000 | 26.727273 | 94 | 0.644809 | false |
jonyroda97/redbot-amigosprovaveis | lib/numpy/lib/tests/test_mixins.py | 13 | 6856 | from __future__ import division, absolute_import, print_function
import numbers
import operator
import sys
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises)
PY2 = sys.version_info.major < 3
# NOTE: This class should be kept as an exact copy of the example from the
# docstring for NDArrayOperatorsMixin.
class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
def __init__(self, value):
self.value = np.asarray(value)
# One might also consider adding the built-in list type to this
# list, to support operations like np.add(array_like, list)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.get('out', ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use ArrayLike instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle ArrayLike objects.
if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x.value if isinstance(x, ArrayLike) else x
for x in inputs)
if out:
kwargs['out'] = tuple(
x.value if isinstance(x, ArrayLike) else x
for x in out)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple:
# multiple return values
return tuple(type(self)(x) for x in result)
elif method == 'at':
# no return value
return None
else:
# one return value
return type(self)(result)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.value)
def wrap_array_like(result):
if type(result) is tuple:
return tuple(ArrayLike(r) for r in result)
else:
return ArrayLike(result)
def _assert_equal_type_and_value(result, expected, err_msg=None):
assert_equal(type(result), type(expected), err_msg=err_msg)
if isinstance(result, tuple):
assert_equal(len(result), len(expected), err_msg=err_msg)
for result_item, expected_item in zip(result, expected):
_assert_equal_type_and_value(result_item, expected_item, err_msg)
else:
assert_equal(result.value, expected.value, err_msg=err_msg)
assert_equal(getattr(result.value, 'dtype', None),
getattr(expected.value, 'dtype', None), err_msg=err_msg)
_ALL_BINARY_OPERATORS = [
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
# TODO: test div on Python 2, only
operator.mod,
divmod,
pow,
operator.lshift,
operator.rshift,
operator.and_,
operator.xor,
operator.or_,
]
class TestNDArrayOperatorsMixin(TestCase):
def test_array_like_add(self):
def check(result):
_assert_equal_type_and_value(result, ArrayLike(0))
check(ArrayLike(0) + 0)
check(0 + ArrayLike(0))
check(ArrayLike(0) + np.array(0))
check(np.array(0) + ArrayLike(0))
check(ArrayLike(np.array(0)) + 0)
check(0 + ArrayLike(np.array(0)))
check(ArrayLike(np.array(0)) + np.array(0))
check(np.array(0) + ArrayLike(np.array(0)))
def test_inplace(self):
array_like = ArrayLike(np.array([0]))
array_like += 1
_assert_equal_type_and_value(array_like, ArrayLike(np.array([1])))
array = np.array([0])
array += ArrayLike(1)
_assert_equal_type_and_value(array, ArrayLike(np.array([1])))
def test_opt_out(self):
class OptOut(object):
"""Object that opts out of __array_ufunc__."""
__array_ufunc__ = None
def __add__(self, other):
return self
def __radd__(self, other):
return self
array_like = ArrayLike(1)
opt_out = OptOut()
# supported operations
assert_(array_like + opt_out is opt_out)
assert_(opt_out + array_like is opt_out)
# not supported
with assert_raises(TypeError):
# don't use the Python default, array_like = array_like + opt_out
array_like += opt_out
with assert_raises(TypeError):
array_like - opt_out
with assert_raises(TypeError):
opt_out - array_like
def test_subclass(self):
class SubArrayLike(ArrayLike):
"""Should take precedence over ArrayLike."""
x = ArrayLike(0)
y = SubArrayLike(1)
_assert_equal_type_and_value(x + y, y)
_assert_equal_type_and_value(y + x, y)
def test_object(self):
x = ArrayLike(0)
obj = object()
with assert_raises(TypeError):
x + obj
with assert_raises(TypeError):
obj + x
with assert_raises(TypeError):
x += obj
def test_unary_methods(self):
array = np.array([-1, 0, 1, 2])
array_like = ArrayLike(array)
for op in [operator.neg,
operator.pos,
abs,
operator.invert]:
_assert_equal_type_and_value(op(array_like), ArrayLike(op(array)))
def test_forward_binary_methods(self):
array = np.array([-1, 0, 1, 2])
array_like = ArrayLike(array)
for op in _ALL_BINARY_OPERATORS:
expected = wrap_array_like(op(array, 1))
actual = op(array_like, 1)
err_msg = 'failed for operator {}'.format(op)
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
def test_reflected_binary_methods(self):
for op in _ALL_BINARY_OPERATORS:
expected = wrap_array_like(op(2, 1))
actual = op(2, ArrayLike(1))
err_msg = 'failed for operator {}'.format(op)
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
def test_ufunc_at(self):
array = ArrayLike(np.array([1, 2, 3, 4]))
assert_(np.negative.at(array, np.array([0, 1])) is None)
_assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4]))
def test_ufunc_two_outputs(self):
mantissa, exponent = np.frexp(2 ** -3)
expected = (ArrayLike(mantissa), ArrayLike(exponent))
_assert_equal_type_and_value(
np.frexp(ArrayLike(2 ** -3)), expected)
_assert_equal_type_and_value(
np.frexp(ArrayLike(np.array(2 ** -3))), expected)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 | 4,439,336,615,723,455,500 | 30.449541 | 78 | 0.580951 | false |
mcoder/private-set-intersection | simulate_psi.py | 1 | 3466 | __author__ = 'Milinda Perera'
import random as pyrandom
from time import time
from modules.psi_3_round_paillier import PSI3RoundPaillier
from modules.psi_2_round_paillier import PSI2RoundPaillier
from modules.psi_2_round_elgamal import PSI2RoundElGamal
def timer(func, *pargs, **kargs):
"""
Measures the time required to run func with the given parameters.
Returns the time as well as the result of the computation.
"""
start = time()
ret = func(*pargs, **kargs)
elapsed = time() - start
return elapsed, ret
def run_psi_3_round_paillier(server_set, client_set):
"""
Simulates the 3-round PSI protocol based on Paillier encryption scheme
on the given server and client sets. Returns the final output of the client.
"""
psi = PSI3RoundPaillier(1024)
server_out_1, server_state = psi.server_to_client_1(server_set)
client_out_1 = psi.client_to_server(client_set, **server_out_1)
server_out_2 = psi.server_to_client_2(client_out_1, **server_state)
client_out_2 = psi.client_output(server_out_2)
return client_out_2
def run_psi_2_round_paillier(server_set, client_set):
"""
Simulates the 2-round PSI protocol based on Paillier encryption scheme
on the given server and client sets. Returns the final output of the client.
"""
psi = PSI2RoundPaillier(1024)
client_out_1, client_state = psi.client_to_server(client_set)
server_out = psi.server_to_client(server_set, **client_out_1)
client_out_2 = psi.client_output(server_out, **client_state)
return client_out_2
def run_psi_2_round_elgamal(server_set, client_set):
"""
Simulates the 2-round PSI protocol based on ElGamal encryption scheme
on the given server and client sets. Returns the final output of the client.
"""
psi = PSI2RoundElGamal()
client_out_1, client_state = psi.client_to_server(client_set)
server_out = psi.server_to_client(server_set, **client_out_1)
client_out_2 = psi.client_output(server_out, **client_state)
return client_out_2
if __name__ == '__main__':
# Obtain the server and client set lengths as well as the intersection length from the user.
set_len = input('Input set length: ')
intersection_len = input('Input intersection length: ')
# Generate server and client sets with above parameters.
server_set = []
client_set = []
while not (len(client_set) == len(server_set) == set_len):
server_set = list(set([pyrandom.randint(1, set_len * 10) for i in range(set_len * 5)]))[:set_len]
client_set = list(set([pyrandom.randint(set_len * 10, set_len * 20) for i in range(set_len * 5)]))[:set_len - intersection_len] + server_set[:intersection_len]
# Print generated sets as well as their intersection for comparison purposes.
print
print('server set: {0}'.format(sorted(server_set)))
print('client set: {0}'.format(sorted(client_set)))
print('intersection: {0}'.format(sorted(set(server_set) & set(client_set))))
print
sims = [['PSI3RoundPaillier', run_psi_3_round_paillier],
['PSI2RoundPaillier', run_psi_2_round_paillier],
['PSI2RoundElGamal', run_psi_2_round_elgamal]]
# Simulate the protocols and report results.
for sim in sims:
time_taken, result = timer(sim[1], server_set, client_set)
print('{0} output: {1}'.format(sim[0], sorted(result)))
print('{0} time: {1}'.format(sim[0], time_taken))
print | mit | -263,777,740,500,042,850 | 38.850575 | 167 | 0.675995 | false |
JioCloud/puppet-contrail | contrail/files/enable_kernel_core.py | 4 | 1719 | import re
import tempfile
class ExtList (list):
def findex(self, fun):
for i, x in enumerate(self):
if fun(x):
return i
raise LookupError('No matching element in list')
# end def findex
# end class ExtList
def enable_kernel_core():
'''
enable_kernel_core:
update grub file
install grub2
enable services
'''
gcnf = ''
_temp_dir_name = tempfile.mkdtemp()
with open('/etc/default/grub', 'r') as f:
gcnf = f.read()
p = re.compile('\s*GRUB_CMDLINE_LINUX')
el = ExtList(gcnf.split('\n'))
try:
i = el.findex(p.match)
exec (el[i])
el[i] = 'GRUB_CMDLINE_LINUX="%s crashkernel=128M"' % (
' '.join(filter(lambda x: not x.startswith(
'crashkernel='),
GRUB_CMDLINE_LINUX.split())))
exec (el[i])
el[i] = 'GRUB_CMDLINE_LINUX="%s kvm-intel.nested=1"' % (
' '.join(filter(lambda x: not x.startswith(
'kvm-intel.nested='),
GRUB_CMDLINE_LINUX.split())))
with open('%s/grub' % _temp_dir_name, 'w') as f:
f.write('\n'.join(el))
f.flush()
local('mv %s/grub /etc/default/grub' % (_temp_dir_name))
local('/usr/sbin/grub2-mkconfig -o /boot/grub2/grub.cfg')
except LookupError:
print 'Improper grub file, kernel crash not enabled'
# end enable_kernel_core
if __name__ == "__main__":
import cgitb
cgitb.enable(format='text')
enable_kernel_core()
| apache-2.0 | -1,208,264,068,724,775,000 | 30.254545 | 69 | 0.484002 | false |
noroutine/ansible | lib/ansible/modules/network/iosxr/iosxr_system.py | 5 | 8085 | #!/usr/bin/python
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: iosxr_system
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the system attributes on Cisco IOS XR devices
description:
- This module provides declarative management of node system attributes
on Cisco IOS XR devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
extends_documentation_fragment: iosxr
notes:
- Tested against IOS XR 6.1.2
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configure the IP domain name
on the remote device to the provided value. Value
should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name.
domain_search:
description:
- Provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
lookup_source:
description:
- The C(lookup_source) argument provides one or more source
interfaces to use for performing DNS lookups. The interface
provided in C(lookup_source) must be a valid interface configured
on the device.
lookup_enabled:
description:
- Provides administrative control
for enabling or disabling DNS lookups. When this argument is
set to True, lookups are performed and when it is set to False,
lookups are not performed.
type: bool
name_servers:
description:
- The C(name_serves) argument accepts a list of DNS name servers by
way of either FQDN or IP address to use to perform name resolution
lookups. This argument accepts wither a list of DNS servers See
examples.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain-name
iosxr_system:
hostname: iosxr01
domain_name: test.example.com
domain-search:
- ansible.com
- redhat.com
- cisco.com
- name: remove configuration
iosxr_system:
state: absent
- name: configure DNS lookup sources
iosxr_system:
lookup_source: MgmtEth0/0/CPU0/0
lookup_enabled: yes
- name: configure name servers
iosxr_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- hostname iosxr01
- ip domain-name test.example.com
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.iosxr.iosxr import get_config, load_config
from ansible.module_utils.network.iosxr.iosxr import iosxr_argument_spec
def diff_list(want, have):
adds = set(want).difference(have)
removes = set(have).difference(want)
return (adds, removes)
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
def needs_update(x):
return want.get(x) and (want.get(x) != have.get(x))
if state == 'absent':
if have['hostname'] != 'ios':
commands.append('no hostname')
if have['domain_name']:
commands.append('no domain name')
if have['lookup_source']:
commands.append('no domain lookup source-interface %s' % have['lookup_source'])
if not have['lookup_enabled']:
commands.append('no domain lookup disable')
for item in have['name_servers']:
commands.append('no domain name-server %s' % item)
for item in have['domain_search']:
commands.append('no domain list %s' % item)
elif state == 'present':
if needs_update('hostname'):
commands.append('hostname %s' % want['hostname'])
if needs_update('domain_name'):
commands.append('domain name %s' % want['domain_name'])
if needs_update('lookup_source'):
commands.append('domain lookup source-interface %s' % want['lookup_source'])
if needs_update('lookup_enabled'):
cmd = 'domain lookup disable'
if want['lookup_enabled']:
cmd = 'no %s' % cmd
commands.append(cmd)
if want['name_servers'] is not None:
adds, removes = diff_list(want['name_servers'], have['name_servers'])
for item in adds:
commands.append('domain name-server %s' % item)
for item in removes:
commands.append('no domain name-server %s' % item)
if want['domain_search'] is not None:
adds, removes = diff_list(want['domain_search'], have['domain_search'])
for item in adds:
commands.append('domain list %s' % item)
for item in removes:
commands.append('no domain list %s' % item)
return commands
def parse_hostname(config):
match = re.search(r'^hostname (\S+)', config, re.M)
return match.group(1)
def parse_domain_name(config):
match = re.search(r'^domain name (\S+)', config, re.M)
if match:
return match.group(1)
def parse_lookup_source(config):
match = re.search(r'^domain lookup source-interface (\S+)', config, re.M)
if match:
return match.group(1)
def map_config_to_obj(module):
config = get_config(module)
return {
'hostname': parse_hostname(config),
'domain_name': parse_domain_name(config),
'domain_search': re.findall(r'^domain list (\S+)', config, re.M),
'lookup_source': parse_lookup_source(config),
'lookup_enabled': 'domain lookup disable' not in config,
'name_servers': re.findall(r'^domain name-server (\S+)', config, re.M)
}
def map_params_to_obj(module):
return {
'hostname': module.params['hostname'],
'domain_name': module.params['domain_name'],
'domain_search': module.params['domain_search'],
'lookup_source': module.params['lookup_source'],
'lookup_enabled': module.params['lookup_enabled'],
'name_servers': module.params['name_servers']
}
def main():
""" Main entry point for Ansible module execution
"""
argument_spec = dict(
hostname=dict(),
domain_name=dict(),
domain_search=dict(type='list'),
name_servers=dict(type='list'),
lookup_source=dict(),
lookup_enabled=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present')
)
argument_spec.update(iosxr_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
result = {'changed': False, 'warnings': warnings}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
commit = not module.check_mode
diff = load_config(module, commands, commit=commit)
if diff:
result['diff'] = dict(prepared=diff)
result['changed'] = True
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 | 6,248,513,036,235,833,000 | 30.705882 | 92 | 0.634879 | false |
mohamedhagag/community-addons | document_sftp/document_sftp_server.py | 1 | 1792 | # -*- coding: utf-8 -*-
# © 2016 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from paramiko import AUTH_SUCCESSFUL, AUTH_FAILED,\
OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, OPEN_SUCCEEDED,\
RSAKey, ServerInterface
from paramiko.py3compat import decodebytes
from openerp.exceptions import AccessDenied
class DocumentSFTPServer(ServerInterface):
def __init__(self, env):
self.env = env
super(DocumentSFTPServer, self).__init__()
def check_auth_password(self, username, password):
try:
user = self.env['res.users'].search([('login', '=', username)])
if not user:
return AUTH_FAILED
user.sudo(user.id).check_credentials(password)
return AUTH_SUCCESSFUL
except AccessDenied:
pass
return AUTH_FAILED
def check_auth_publickey(self, username, key):
user = self.env['res.users'].search([('login', '=', username)])
if not user:
return AUTH_FAILED
for line in (user.authorized_keys or '').split('\n'):
if not line or line.startswith('#'):
continue
key_type, key_data = line.split(' ', 2)[:2]
if key_type != 'ssh-rsa':
self.logger.info(
'Ignoring key of unknown type for line %s', line)
continue
if RSAKey(data=decodebytes(key_data)) == key:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def get_allowed_auths(self, username):
return 'password,publickey'
def check_channel_request(self, kind, chanid):
if kind in ('session',):
return OPEN_SUCCEEDED
return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
| agpl-3.0 | -4,122,523,796,936,543,700 | 35.55102 | 75 | 0.594082 | false |
freedv/codec2 | octave/fskdemodgui.py | 1 | 5574 | #!/usr/bin/env python
#
# fsk_demod Statistics GUI
# Accepts the stats output from fsk_demod on stdin, and plots it.
#
# Mark Jessop 2016-03-13 <[email protected]>
#
# NOTE: This is intended to be run on a 'live' stream of samples, and hence expects
# updates at about 10Hz. Anything faster will fill up the input queue and be discarded.
#
# Call using:
# <producer>| ./fsk_demod 2X 8 923096 115387 - - S 2> >(python ~/Dev/codec2-dev/octave/fskdemodgui.py) | <consumer>
#
#
import sys, time, json, Queue, argparse
from threading import Thread
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
parser = argparse.ArgumentParser()
parser.add_argument("--wide", action="store_true", default=False, help="Alternate wide arrangement of widgets, for placement at bottom of 4:3 screen.")
args = parser.parse_args()
# Some settings...
update_rate = 10 # Hz
history_size = 10*10 # 10 seconds at 10Hz...
history_scale = np.linspace((-1*history_size+1)/10.0,0,history_size)
# Input queue
in_queue = Queue.Queue(history_size) # 1-element FIFO...
win = pg.GraphicsWindow()
win.setWindowTitle('FSK Demodulator Modem Statistics')
# Plot objects
ebno_plot = win.addPlot(title="Eb/No")
ppm_plot = win.addPlot(title="Sample Clock Offset")
if args.wide == False:
win.nextRow()
else:
win.resize(1024,200)
fest_plot = win.addPlot(title="Tone Frequency Estimation")
eye_plot = win.addPlot(title="Eye Diagram")
# Disable auto-ranging on eye plot and fix axes for a big speedup...
# Configure plot labels and scales.
ebno_plot.setLabel('left','Eb/No (dB)')
ebno_plot.setLabel('bottom','Time (seconds)')
ppm_plot.setLabel('left','Clock Offset (ppm)')
ppm_plot.setLabel('bottom','Time (seconds)')
fest_plot.setLabel('left','Frequency (Hz)')
fest_plot.setLabel('bottom','Time (seconds)')
eye_plot.disableAutoRange()
eye_plot.setYRange(0,1)
eye_plot.setXRange(0,15)
eye_xr = 15
# Data arrays...
ebno_data = np.zeros(history_size)*np.nan
ppm_data = np.zeros(history_size)*np.nan
fest_data = np.zeros((4,history_size))*np.nan
# Curve objects, so we can update them...
ebno_curve = ebno_plot.plot(x=history_scale,y=ebno_data)
ppm_curve = ppm_plot.plot(x=history_scale,y=ppm_data)
fest1_curve = fest_plot.plot(x=history_scale,y=fest_data[0,:],pen='r') # f1 = Red
fest2_curve = fest_plot.plot(x=history_scale,y=fest_data[1,:],pen='g') # f2 = Blue
fest3_curve = fest_plot.plot(x=history_scale,y=fest_data[2,:],pen='b') # f3 = Greem
fest4_curve = fest_plot.plot(x=history_scale,y=fest_data[3,:],pen='m') # f4 = Magenta
# Plot update function. Reads from queue, processes and updates plots.
def update_plots():
global timeout,timeout_counter,eye_plot,ebno_curve, ppm_curve, fest1_curve, fest2_curve, ebno_data, ppm_data, fest_data, in_queue, eye_xr
try:
if in_queue.empty():
return
in_data = in_queue.get_nowait()
in_data = json.loads(in_data)
except Exception as e:
sys.stderr.write(str(e))
return
# Roll data arrays
ebno_data[:-1] = ebno_data[1:]
ppm_data[:-1] = ppm_data[1:]
fest_data = np.roll(fest_data,-1,axis=1)
# Try reading in the new data points from the dictionary.
try:
new_ebno = in_data['EbNodB']
new_ppm = in_data['ppm']
new_fest1 = in_data['f1_est']
new_fest2 = in_data['f2_est']
except Exception as e:
print("ERROR reading dict: %s" % e)
# Try reading in the other 2 tones.
try:
new_fest3 = in_data['f3_est']
new_fest4 = in_data['f4_est']
fest_data[2,-1] = new_fest3
fest_data[3,-1] = new_fest4
except:
# If we can't read these tones out of the dict, fill with NaN
fest_data[2,-1] = np.nan
fest_data[3,-1] = np.nan
# Add in new data points
ebno_data[-1] = new_ebno
ppm_data[-1] = new_ppm
fest_data[0,-1] = new_fest1
fest_data[1,-1] = new_fest2
# Update plots
ebno_curve.setData(x=history_scale,y=ebno_data)
ppm_curve.setData(x=history_scale,y=ppm_data)
fest1_curve.setData(x=history_scale,y=fest_data[0,:],pen='r') # f1 = Red
fest2_curve.setData(x=history_scale,y=fest_data[1,:],pen='g') # f2 = Blue
fest3_curve.setData(x=history_scale,y=fest_data[2,:],pen='b') # f3 = Green
fest4_curve.setData(x=history_scale,y=fest_data[3,:],pen='m') # f4 = Magenta
#Now try reading in and plotting the eye diagram
try:
eye_data = np.array(in_data['eye_diagram'])
#eye_plot.disableAutoRange()
eye_plot.clear()
col_index = 0
for line in eye_data:
eye_plot.plot(line,pen=(col_index,eye_data.shape[0]))
col_index += 1
#eye_plot.autoRange()
#Quick autoranging for x-axis to allow for differing P and Ts values
if eye_xr != len(eye_data[0]) - 1:
eye_xr = len(eye_data[0]) - 1
eye_plot.setXRange(0,len(eye_data[0])-1)
except Exception as e:
pass
timer = pg.QtCore.QTimer()
timer.timeout.connect(update_plots)
timer.start(80)
# Thread to read from stdin and push into a queue to be processed.
def read_input():
global in_queue
while True:
in_line = sys.stdin.readline()
# Only push actual data into the queue...
# This stops sending heaps of empty strings into the queue when fsk_demod closes.
if in_line == "":
time.sleep(0.1)
continue
if not in_queue.full():
in_queue.put_nowait(in_line)
read_thread = Thread(target=read_input)
read_thread.daemon = True # Set as daemon, so when all other threads die, this one gets killed too.
read_thread.start()
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
try:
QtGui.QApplication.instance().exec_()
except KeyboardInterrupt:
sys.exit(0)
| lgpl-2.1 | 9,091,397,421,565,328,000 | 29.79558 | 151 | 0.693936 | false |
handroissuazo/tensorflow | tensorflow/contrib/distributions/python/ops/gumbel.py | 2 | 7559 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Gumbel distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class _Gumbel(distribution.Distribution):
"""The scalar Gumbel distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) of this distribution is,
```none
pdf(x; mu, sigma) = exp(-(x - mu) / sigma - exp(-(x - mu) / sigma))
```
where `loc = mu` and `scale = sigma`.
The cumulative densifyt function of this distribution is,
```cdf(x; mu, sigma) = exp(-exp(-(x - mu) / sigma))```
The Gumbel distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Gumbel(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Gumbel distribution.
dist = tf.contrib.distributions.Gumbel(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Gumbels.
# The first has mean 1 and scale 11, the second 2 and 22.
dist = tf.contrib.distributions.Gumbel(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Logistics.
# Both have mean 1, but different scales.
dist = tf.contrib.distributions.Gumbel(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Gumbel"):
"""Construct Gumbel distributions with location and scale `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor, the means of the distribution(s).
scale: Floating point tensor, the scales of the distribution(s).
scale must contain only positive values.
validate_args: Python `Boolean`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `Boolean`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: `String` name prefixed to Ops created by this class.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[loc, scale]) as ns:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
contrib_tensor_util.assert_same_float_dtype((self._loc, self._scale))
super(_Gumbel, self).__init__(
dtype=self._scale.dtype,
is_continuous=True,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat(([n], array_ops.shape(self.mean())), 0)
np_dtype = self.dtype.as_numpy_dtype()
minval = np.nextafter(np_dtype(0), np_dtype(1))
uniform = random_ops.random_uniform(shape=shape,
minval=minval,
maxval=1,
dtype=self.dtype,
seed=seed)
sampled = -math_ops.log(-math_ops.log(uniform))
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return -math_ops.exp(-self._z(x))
def _cdf(self, x):
return math_ops.exp(-math_ops.exp(-self._z(x)))
def _log_unnormalized_prob(self, x):
z = self._z(x)
return - z - math_ops.exp(-z)
def _log_normalization(self):
return math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
scale = self.scale * array_ops.ones_like(self.loc)
return 1 + math_ops.log(scale) + np.euler_gamma
def _mean(self):
return self.loc + self.scale * np.euler_gamma
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(6)
def _mode(self):
return self.loc * array_ops.ones_like(self.scale)
def _z(self, x):
"""Standardize input `x` to a unit logistic."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
| apache-2.0 | 7,956,107,652,797,558,000 | 33.20362 | 92 | 0.653129 | false |
geimer/easybuild-easyblocks | easybuild/easyblocks/t/tbb.py | 4 | 3733 | ##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for installing the Intel Threading Building Blocks (TBB) library, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import shutil
import glob
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.intelbase import IntelBase, ACTIVATION_NAME_2012, LICENSE_FILE_NAME_2012
class EB_tbb(IntelBase):
"""EasyBlock for tbb, threading building blocks"""
def install_step(self):
"""Custom install step, to add extra symlinks"""
silent_cfg_names_map = None
if LooseVersion(self.version) < LooseVersion('4.2'):
silent_cfg_names_map = {
'activation_name': ACTIVATION_NAME_2012,
'license_file_name': LICENSE_FILE_NAME_2012,
}
super(EB_tbb, self).install_step(silent_cfg_names_map=silent_cfg_names_map)
# save libdir
os.chdir(self.installdir)
if LooseVersion(self.version) < LooseVersion('4.1.0'):
libglob = 'tbb/lib/intel64/cc*libc*_kernel*'
else:
libglob = 'tbb/lib/intel64/gcc*'
libs = glob.glob(libglob)
if len(libs):
libdir = libs[-1] # take the last one, should be ordered by cc get_version.
# we're only interested in the last bit
libdir = libdir.split('/')[-1]
else:
self.log.error("No libs found using %s in %s" % (libglob, self.installdir))
self.libdir = libdir
self.libpath = "%s/tbb/libs/intel64/%s/" % (self.installdir, libdir)
self.log.debug("self.libpath: %s" % self.libpath)
# applications go looking into tbb/lib so we move what's in there to libs
# and symlink the right lib from /tbb/libs/intel64/... to lib
install_libpath = os.path.join(self.installdir, 'tbb', 'lib')
shutil.move(install_libpath, os.path.join(self.installdir, 'tbb', 'libs'))
os.symlink(self.libpath, install_libpath)
def sanity_check_step(self):
custom_paths = {
'files':[],
'dirs':["tbb/bin", "tbb/lib/", "tbb/libs/"]
}
super(EB_tbb, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Add correct path to lib to LD_LIBRARY_PATH. and intel license file"""
txt = super(EB_tbb, self).make_module_extra()
txt += "prepend-path\t%s\t\t%s\n" % ('LD_LIBRARY_PATH', self.libpath)
return txt
| gpl-2.0 | 4,316,174,890,114,890,000 | 37.484536 | 111 | 0.657112 | false |
efforia/django-feedly | feedly/urls.py | 1 | 1126 | #!/usr/bin/python
#
# This file is part of django-feedly project.
#
# Copyright (C) 2011-2020 William Oliveira de Lagos <[email protected]>
#
# Feedly is free software: you can redistribute it and/or modify
# it under the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Feedly is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Feedly. If not, see <http://www.gnu.org/licenses/>.
#
from django.conf.urls import url,include
from django.urls import path
from .views import *
urlpatterns = [
path('', BlocksView.as_view()),
# url(r'^mosaic', mosaic),
# url(r'^pages', page),
# url(r'^pageview', pageview),
# url(r'^pageedit', pageedit),
# url(r'^deadlines', deadlines),
]
| lgpl-3.0 | 2,359,775,990,932,228,000 | 32.121212 | 78 | 0.698046 | false |
ChromiumWebApps/chromium | tools/telemetry/examples/telemetry_perf_test.py | 4 | 1325 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import time
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from telemetry.core import browser_finder
from telemetry.core import browser_options
def Main(args):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser('telemetry_perf_test.py')
options, args = parser.parse_args(args)
browser_to_create = browser_finder.FindBrowser(options)
assert browser_to_create
with browser_to_create.Create() as b:
tab = b.tabs[0]
# Measure round-trip-time for evaluate
times = []
for i in range(1000):
start = time.time()
tab.EvaluateJavaScript('%i * 2' % i)
times.append(time.time() - start)
N = float(len(times))
avg = sum(times, 0.0) / N
squared_diffs = [(t - avg) * (t - avg) for t in times]
stdev = sum(squared_diffs, 0.0) / (N - 1)
times.sort()
percentile_75 = times[int(0.75 * N)]
print "%s: avg=%f; stdev=%f; min=%f; 75th percentile = %f" % (
"Round trip time (seconds)",
avg, stdev, min(times), percentile_75)
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause | -7,114,342,853,813,830,000 | 27.191489 | 72 | 0.653585 | false |
benrudolph/commcare-hq | custom/bihar/calculations/utils/calculations.py | 4 | 1986 | from casexml.apps.case.models import CommCareCase
def get_actions(case, action_filter=lambda a: True, reverse=False):
ordered = reversed if reverse else lambda x: x
for action in ordered(case.actions):
if action_filter(action):
yield action
def get_forms(case, action_filter=lambda a: True, form_filter=lambda f: True,
reverse=False, yield_action=False):
if not hasattr(case, '_forms_cache'):
case._forms_cache = {}
for action in get_actions(case, action_filter=action_filter,
reverse=reverse):
if action.xform_id not in case._forms_cache:
case._forms_cache[action.xform_id] = action.xform
xform = case._forms_cache[action.xform_id]
if xform and form_filter(xform):
if yield_action:
yield xform, action
else:
yield xform
def get_form(case, action_filter=lambda a: True, form_filter=lambda f: True, reverse=False):
"""
returns the first form that passes through both filter functions
"""
gf = get_forms(case, action_filter=action_filter, form_filter=form_filter, reverse=reverse)
try:
return gf.next()
except StopIteration:
return None
def get_related_props(case, property):
"""
Gets the specified property for all child cases in which that property exists
"""
if not hasattr(case, '_subcase_cache'):
case._subcase_cache = {}
for index in case.reverse_indices:
subcase_id = index.referenced_id
if subcase_id not in case._subcase_cache:
case._subcase_cache[subcase_id] = CommCareCase.get(subcase_id)
subcase = case._subcase_cache[subcase_id]
subcase_property = getattr(subcase, property, None)
if subcase_property:
yield subcase_property
def get_related_prop(case, property):
for value in get_related_props(case, property):
return value
return None | bsd-3-clause | -5,385,246,741,038,561,000 | 33.258621 | 95 | 0.636959 | false |
stevenliuit/3vilTwinAttacker | Modules/deauth_func.py | 1 | 15164 | from PyQt4.QtGui import *
from PyQt4.QtCore import *
from subprocess import Popen,PIPE
from scapy.all import *
from Dns_Func import frm_dhcp_Attack
import threading
from os import popen,system,getuid,path,makedirs
from re import search,compile,match
from Core.Settings import frm_Settings
from Modules.fuc_airodump import airdump_start,get_network_scan
class frm_window(QMainWindow):
def __init__(self, parent=None):
super(frm_window, self).__init__(parent)
self.form_widget = frm_deauth(self)
self.setCentralWidget(self.form_widget)
self.setWindowTitle("Deauth Attack wireless Route")
self.setWindowIcon(QIcon('rsc/icon.ico'))
self.config = frm_Settings()
self.loadtheme(self.config.XmlThemeSelected())
def loadtheme(self,theme):
if theme != "theme2":
sshFile=("Core/%s.css"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
else:
sshFile=("Core/%s.css"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
def closeEvent(self, event):
reply = QMessageBox.question(self, 'About Exit',"Are you sure to quit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
if getuid() == 0:
system("airmon-ng stop mon0")
system("clear")
self.deleteLater()
else:
pass
else:
event.ignore()
class frm_deauth(QWidget):
def __init__(self, parent=None):
super(frm_deauth, self).__init__(parent)
self.Main = QVBoxLayout()
self.interface = "mon0"
self.xmlcheck = frm_Settings()
self.ap_list = []
self.pacote = []
self.control = None
self.data = {'Bssid':[], 'Essid':[], 'Channel':[]}
self.window_qt()
def select_target(self):
item = self.tables.selectedItems()
if item != []:
self.linetarget.setText(item[2].text())
else:
QMessageBox.critical(self, "Error in row", "Nothing row in tables, please try scan network again")
self.linetarget.clear()
def window_qt(self):
self.controlador = QLabel("")
self.attack_OFF()
self.form0 = QFormLayout()
self.form1 = QFormLayout()
self.form2 = QFormLayout()
self.list = QListWidget()
self.list.clicked.connect(self.list_clicked)
self.list.setFixedHeight(20)
self.tables = QTableWidget(5,3)
self.tables.setFixedWidth(350)
self.tables.setRowCount(100)
self.tables.setFixedHeight(200)
self.tables.setSelectionBehavior(QAbstractItemView.SelectRows)
self.tables.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tables.clicked.connect(self.select_target)
self.tables.resizeColumnsToContents()
self.tables.resizeRowsToContents()
self.tables.horizontalHeader().resizeSection(1,120)
self.tables.horizontalHeader().resizeSection(0,60)
self.tables.horizontalHeader().resizeSection(2,158)
self.tables.verticalHeader().setVisible(False)
Headers = []
for n, key in enumerate(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
self.linetarget = QLineEdit()
self.input_client = QLineEdit(self)
self.input_client.setText("FF:FF:FF:FF:FF:FF")
self.btn_enviar = QPushButton("Send Attack", self)
self.btn_enviar.clicked.connect(self.attack_deauth)
self.btn_scan = QPushButton(" Network Scan ", self)
self.btn_scan.clicked.connect(self.exec_sniff)
self.btn_stop = QPushButton("Stop Attack ", self)
self.btn_stop.clicked.connect(self.kill_thread)
self.btn_enviar.setFixedWidth(170)
self.btn_stop.setFixedWidth(170)
#icons
self.btn_scan.setIcon(QIcon("rsc/network.png"))
self.btn_enviar.setIcon(QIcon("rsc/start.png"))
self.btn_stop.setIcon(QIcon("rsc/Stop.png"))
self.w_pacote = QComboBox(self)
self.w_pacote.addItem("1000 ")
self.w_pacote.addItem("2000 ")
self.w_pacote.addItem("3000 ")
self.w_pacote.addItem("4000 ")
self.w_pacote.addItem("5000 ")
self.w_pacote.addItem("10000 ")
self.w_pacote.addItem("infinite loop")
self.time_scan = QComboBox(self)
self.time_scan.addItem("10s")
self.time_scan.addItem("20s")
self.time_scan.addItem("30s")
self.get_placa = QComboBox(self)
Interfaces = frm_dhcp_Attack()
n = Interfaces.placa()
for i,j in enumerate(n):
if search("wlan", j):
self.get_placa.addItem(n[i])
self.form0.addRow("Network scan time:", self.time_scan)
self.form1.addRow(self.tables)
self.form1.addRow(self.get_placa, self.btn_scan)
self.form1.addRow("Target:", self.linetarget)
self.form1.addRow("Packet:",self.w_pacote)
self.form1.addRow("Client:", self.input_client)
self.form1.addRow("Status Attack:", self.controlador)
self.form2.addRow(self.btn_enviar, self.btn_stop)
self.Main.addLayout(self.form0)
self.Main.addLayout(self.form1)
self.Main.addLayout(self.form2)
self.setLayout(self.Main)
def scan_diveces_airodump(self):
dirpath = "Settings/Dump"
if not path.isdir(dirpath):
makedirs(dirpath)
self.data = {'Bssid':[], 'Essid':[], 'Channel':[]}
exit_air = airdump_start()
self.fix = False
if exit_air == None:
self.cap = get_network_scan()
if self.cap != None:
for i in self.cap:
i = i.split("||")
if self.check_is_mac(i[2]):
Headers = []
self.data['Channel'].append(i[0])
self.data['Essid'].append(i[1])
self.data['Bssid'].append(i[2])
for n, key in enumerate(self.data.keys()):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
self.cap =[]
def kill_thread(self):
self.attack_OFF()
self.control = 1
dat = self.xmlcheck.xmlSettings("item1","deauth_mdk3",None,False)
if dat == "True":
popen("killall xterm")
def exec_sniff(self):
self.data = {'Bssid':[], 'Essid':[], 'Channel':[]}
dot =1
count = 0
self.options_scan = self.xmlcheck.xmlSettings("monitor0", "scan_scapy", None, False)
if self.get_placa.currentText() == "":
QMessageBox.information(self, "Network Adapter", 'Network Adapter Not found try again.')
else:
comando = "ifconfig"
proc = Popen(comando,stdout=PIPE, shell=False)
data = proc.communicate()[0]
if search("mon0", data):
dot = 0
c = "airmon-ng stop mon0".split()
Popen(c,stdout=PIPE, shell=False)
system("airmon-ng start %s" %(self.get_placa.currentText()))
else:
system("airmon-ng start %s" %(self.get_placa.currentText()))
if self.time_scan.currentText() == "10s":
count = 10
elif self.time_scan.currentText() == "20s":
count = 20
elif self.time_scan.currentText() == "30s":
count = 30
if self.options_scan == "True":
sniff(iface=self.interface, prn =self.Scanner_devices, timeout=count)
t = len(self.ap_list) -1
i = 0
items = []
cap = []
for i in range(t):
if len(self.ap_list[i]) < len(self.ap_list[i+1]):
if i != 0:
for index in xrange(self.list.count()):
items.append(self.list.item(index))
if self.ap_list[i] or self.ap_list[i+1] in items:
pass
else:
self.list.addItem(self.ap_list[i] + " " + self.ap_list[i+1])
if not (self.ap_list[i] + " " + self.ap_list[i+1]) in cap:
cap.append(self.ap_list[i] + " " + self.ap_list[i+1])
else:
self.list.addItem(self.ap_list[i] + " " + self.ap_list[i+1])
if not (self.ap_list[i] + " " + self.ap_list[i+1]) in cap:
cap.append(self.ap_list[i] + " " + self.ap_list[i+1])
else:
self.list.addItem(self.ap_list[i+1] + " " + self.ap_list[i])
if not (self.ap_list[i+1] + " " + self.ap_list[i]) in cap:
cap.append(self.ap_list[i+1] + " " + self.ap_list[i])
if self.ap_list[i] < i:
pass
break
else:
dot = 1
self.list.clear()
for i in cap:
dat = i.split()
if self.check_is_mac(dat[3]):
self.data['Channel'].append(dat[0])
self.data['Essid'].append(dat[2])
self.data['Bssid'].append(dat[3])
Headers = []
for n, key in enumerate(self.data.keys()):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
cap = []
self.ap_list = []
else:
self.thread_airodump = threading.Thread(target=self.scan_diveces_airodump)
self.thread_airodump.daemon = True
self.thread_airodump.start()
def Scanner_devices(self,pkt):
if pkt.type == 0 and pkt.subtype == 8:
if pkt.addr2 not in self.ap_list:
self.ap_list.append(pkt.addr2)
self.ap_list.append(str(int(ord(pkt[Dot11Elt:3].info)))+" | " + pkt.info)
print "AP MAC: %s with SSID: %s CH %d"%(pkt.addr2, pkt.info, int(ord(pkt[Dot11Elt:3].info)))
def attack_deauth(self):
if self.linetarget.text() == "":
QMessageBox.information(self, "Target Error", "Please, first select Target for attack")
else:
self.ss = None
if self.w_pacote.currentText() == "infinite loop":
self.ss = 1
else:
self.ss = int(self.w_pacote.currentText())
self.bssid = str(self.linetarget.text())
self.deauth_check = self.xmlcheck.xmlSettings("item0", "deauth_scapy",None,False)
self.args = self.xmlcheck.xmlSettings("mdk3","arguments", None, False)
if self.deauth_check == "True":
self.controlador.setText("[ ON ]")
self.controlador.setStyleSheet("QLabel { color : green; }")
self.t = threading.Thread(target=self.deauth_attacker, args=(self.bssid,str(self.input_client.text()), self.ss))
self.t.daemon = True
self.t.start()
else:
self.controlador.setText("[ ON ]")
self.controlador.setStyleSheet("QLabel { color : green; }")
self.t = threading.Thread(target=self.mdk3_attacker, args=(self.bssid,self.args,))
self.t.daemon = True
self.t.start()
def attack_OFF(self):
self.controlador.setText("[ OFF ]")
self.controlador.setStyleSheet("QLabel { color : red; }")
system("clear")
def mdk3_attacker(self,bssid,args):
n = (popen("""sudo xterm -geometry 75x15-1+200 -T "mdk3 Target: %s" -e mdk3 mon0 %s %s & mdk3=$!"""%(bssid,args,bssid)).read()) + "exit"
while n != "dsa":
if n == "exit":
self.attack_OFF()
break
def deauth_attacker(self,bssid, client, count):
self.control = None
bot = 0
conf.verb = 0
conf.iface = self.interface
packet = RadioTap()/Dot11(type=0,subtype=12,addr1=client,addr2=bssid,addr3=bssid)/Dot11Deauth(reason=7)
deauth_ap = Dot11(addr1=bssid, addr2=bssid, addr3=bssid)/Dot11Deauth()
deauth_pkt2 = Dot11(addr1=bssid, addr2=client, addr3=client)/Dot11Deauth()
self.pacote.append(deauth_pkt2)
self.pacote.append(deauth_ap)
if count == 1:
while count != 0:
try:
sendp(packet)
print 'Deauth sent via: ' + conf.iface + ' to BSSID: ' + bssid + ' for Client: ' + client
if self.control == None:
pass
else:
self.attack_OFF()
count = 0
popen("clear")
except KeyboardInterrupt:
print "::"
sys.exit()
else:
for n in range(int(count)):
try:
sendp(packet)
print 'Deauth sent via: ' + conf.iface + ' to BSSID: ' + bssid + ' for Client: ' + client
if self.control == None:
pass
else:
self.attack_OFF()
popen("clear")
break
except KeyboardInterrupt:
print "::"
sys.exit()
self.attack_OFF()
def check_is_mac(self,value):
checked = re.compile(r"""(
^([0-9A-F]{2}[-]){5}([0-9A-F]{2})$
|^([0-9A-F]{2}[:]){5}([0-9A-F]{2})$
)""",
re.VERBOSE|re.IGNORECASE)
if checked.match(value) is None:
return False
else:
return True
@pyqtSlot(QModelIndex)
def list_clicked(self, index):
itms = self.list.selectedIndexes()
for i in itms:
attack = str(i.data().toString()).split()
for i in attack:
if self.check_is_mac(i.replace(" ", "")):
self.linetarget.setText(str(i))
if self.linetarget.text() == "":
QMessageBox.information(self, "MacAddress", "Error check the Mac Target, please set the mac valid.")
| mit | -4,174,787,915,079,112,000 | 42.449857 | 145 | 0.512662 | false |
loads/molotov | molotov/util.py | 1 | 5526 | from io import StringIO
import traceback
import sys
import functools
import json
import os
import asyncio
import time
import threading
import platform
from aiohttp import ClientSession, __version__
# this lib works for CPython 3.7+
if platform.python_implementation() == "PyPy" or sys.version_info.minor < 7:
import multiprocessing # noqa
else:
import multiprocessing_on_dill as multiprocessing # noqa
_DNS_CACHE = {}
_STOP = False
_STOP_WHY = []
_TIMER = None
if __version__[0] == "2":
raise ImportError("Molotov only supports aiohttp 3.x going forward")
def get_timer():
return _TIMER
def set_timer(value=None):
global _TIMER
if value is None:
value = int(time.time())
_TIMER = value
def stop(why=None):
global _STOP
if why is not None:
_STOP_WHY.append(why)
_STOP = True
def stop_reason():
return _STOP_WHY
def is_stopped():
return _STOP
class OptionError(Exception):
pass
def _expand_args(args, options):
for key, val in options.items():
setattr(args, key, val)
def expand_options(config, scenario, args):
if not isinstance(config, str):
try:
config = json.loads(config.read())
except Exception:
raise OptionError("Can't parse %r" % config)
else:
if not os.path.exists(config):
raise OptionError("Can't find %r" % config)
with open(config) as f:
try:
config = json.loads(f.read())
except ValueError:
raise OptionError("Can't parse %r" % config)
if "molotov" not in config:
raise OptionError("Bad config -- no molotov key")
if "tests" not in config["molotov"]:
raise OptionError("Bad config -- no molotov/tests key")
if scenario not in config["molotov"]["tests"]:
raise OptionError("Can't find %r in the config" % scenario)
_expand_args(args, config["molotov"]["tests"][scenario])
def _run_in_fresh_loop(coro, timeout=30):
thres = []
thexc = []
def run():
loop = asyncio.new_event_loop()
try:
task = loop.create_task(coro())
thres.append(loop.run_until_complete(task))
except Exception as e:
thexc.append(e)
finally:
loop.close()
th = threading.Thread(target=run)
th.start()
th.join(timeout=timeout)
# re-raise a thread exception
if len(thexc) > 0:
raise thexc[0]
return thres[0]
async def _request(endpoint, verb="GET", session_options=None, json=False, **options):
if session_options is None:
session_options = {}
async with ClientSession(**session_options) as session:
meth = getattr(session, verb.lower())
result = {}
async with meth(endpoint, **options) as resp:
if json:
result["content"] = await resp.json()
else:
result["content"] = await resp.text()
result["status"] = resp.status
result["headers"] = resp.headers
return result
def request(endpoint, verb="GET", session_options=None, **options):
"""Performs a synchronous request.
Uses a dedicated event loop and aiohttp.ClientSession object.
Options:
- endpoint: the endpoint to call
- verb: the HTTP verb to use (defaults: GET)
- session_options: a dict containing options to initialize the session
(defaults: None)
- options: extra options for the request (defaults: None)
Returns a dict object with the following keys:
- content: the content of the response
- status: the status
- headers: a dict with all the response headers
"""
req = functools.partial(_request, endpoint, verb, session_options, **options)
return _run_in_fresh_loop(req)
def json_request(endpoint, verb="GET", session_options=None, **options):
"""Like :func:`molotov.request` but extracts json from the response.
"""
req = functools.partial(
_request, endpoint, verb, session_options, json=True, **options
)
return _run_in_fresh_loop(req)
_VARS = {}
def set_var(name, value):
"""Sets a global variable.
Options:
- name: name of the variable
- value: object to set
"""
_VARS[name] = value
def get_var(name, factory=None):
"""Gets a global variable given its name.
If factory is not None and the variable is not set, factory
is a callable that will set the variable.
If not set, returns None.
"""
if name not in _VARS and factory is not None:
_VARS[name] = factory()
return _VARS.get(name)
# taken from https://stackoverflow.com/a/37211337
def _make_sleep():
async def sleep(delay, result=None, *, loop=None):
coro = asyncio.sleep(delay, result=result, loop=loop)
task = asyncio.ensure_future(coro, loop=loop)
sleep.tasks.add(task)
try:
return await task
except asyncio.CancelledError:
return result
finally:
sleep.tasks.remove(task)
sleep.tasks = set()
sleep.cancel_all = lambda: sum(task.cancel() for task in sleep.tasks)
return sleep
cancellable_sleep = _make_sleep()
def printable_error(error, tb=None):
printable = [repr(error)]
if tb is None:
tb = sys.exc_info()[2]
printed = StringIO()
traceback.print_tb(tb, file=printed)
printed.seek(0)
for line in printed.readlines():
printable.append(line.rstrip("\n"))
return printable
| apache-2.0 | 4,590,278,566,154,839,600 | 23.669643 | 86 | 0.621245 | false |
hb9kns/PyBitmessage | dev/ssltest.py | 1 | 3263 | import os
import select
import socket
import ssl
import sys
import traceback
HOST = "127.0.0.1"
PORT = 8912
def sslProtocolVersion():
# sslProtocolVersion
if sys.version_info >= (2,7,13):
# this means TLSv1 or higher
# in the future change to
# ssl.PROTOCOL_TLS1.2
return ssl.PROTOCOL_TLS
elif sys.version_info >= (2,7,9):
# this means any SSL/TLS. SSLv2 and 3 are excluded with an option after context is created
return ssl.PROTOCOL_SSLv23
else:
# this means TLSv1, there is no way to set "TLSv1 or higher" or
# "TLSv1.2" in < 2.7.9
return ssl.PROTOCOL_TLSv1
def sslProtocolCiphers():
if ssl.OPENSSL_VERSION_NUMBER >= 0x10100000:
return "AECDH-AES256-SHA@SECLEVEL=0"
else:
return "AECDH-AES256-SHA"
def connect():
sock = socket.create_connection((HOST, PORT))
return sock
def listen():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST, PORT))
sock.listen(0)
return sock
def sslHandshake(sock, server=False):
if sys.version_info >= (2,7,9):
context = ssl.SSLContext(sslProtocolVersion())
context.set_ciphers(sslProtocolCiphers())
context.set_ecdh_curve("secp256k1")
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
context.options = ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_SINGLE_ECDH_USE | ssl.OP_CIPHER_SERVER_PREFERENCE
sslSock = context.wrap_socket(sock, server_side = server, do_handshake_on_connect=False)
else:
sslSock = ssl.wrap_socket(sock, keyfile = os.path.join('src', 'sslkeys', 'key.pem'), certfile = os.path.join('src', 'sslkeys', 'cert.pem'), server_side = server, ssl_version=sslProtocolVersion(), do_handshake_on_connect=False, ciphers='AECDH-AES256-SHA')
while True:
try:
sslSock.do_handshake()
break
except ssl.SSLWantReadError:
print "Waiting for SSL socket handhake read"
select.select([sslSock], [], [], 10)
except ssl.SSLWantWriteError:
print "Waiting for SSL socket handhake write"
select.select([], [sslSock], [], 10)
except Exception:
print "SSL socket handhake failed, shutting down connection"
traceback.print_exc()
return
print "Success!"
return sslSock
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Usage: ssltest.py client|server"
sys.exit(0)
elif sys.argv[1] == "server":
serversock = listen()
while True:
print "Waiting for connection"
sock, addr = serversock.accept()
print "Got connection from %s:%i" % (addr[0], addr[1])
sslSock = sslHandshake(sock, True)
if sslSock:
sslSock.shutdown(socket.SHUT_RDWR)
sslSock.close()
elif sys.argv[1] == "client":
sock = connect()
sslSock = sslHandshake(sock, False)
if sslSock:
sslSock.shutdown(socket.SHUT_RDWR)
sslSock.close()
else:
print "Usage: ssltest.py client|server"
sys.exit(0)
| mit | 8,244,031,066,322,237,000 | 33.712766 | 262 | 0.611401 | false |
ilastikdev/ilastik | ilastik/plugins_default/vigra_objfeats_skeleton.py | 4 | 3155 | ###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
from ilastik.plugins import ObjectFeaturesPlugin
import ilastik.applets.objectExtraction.opObjectExtraction
#from ilastik.applets.objectExtraction.opObjectExtraction import make_bboxes, max_margin
import vigra
import numpy as np
from lazyflow.request import Request, RequestPool
import logging
logger = logging.getLogger(__name__)
def cleanup_value(val, nObjects):
"""ensure that the value is a numpy array with the correct shape."""
val = np.asarray(val)
if val.ndim == 1:
val = val.reshape(-1, 1)
assert val.shape[0] == nObjects
# remove background
val = val[1:]
return val
def cleanup(d, nObjects, features):
result = dict((k, cleanup_value(v, nObjects)) for k, v in d.iteritems())
newkeys = set(result.keys()) & set(features)
return dict((k, result[k]) for k in newkeys)
class VigraSkeletonObjFeats(ObjectFeaturesPlugin):
local_preffix = "Skeleton " #note the space at the end, it's important
ndim = None
def availableFeatures(self, image, labels):
try:
names = vigra.analysis.supportedSkeletonFeatures(labels)
logger.info('2D Skeleton Features: Supported Skeleton Features: done.')
except:
logger.error('2D Skeleton Features: Supported Skeleton Features: failed (Vigra commit must be f8e48031abb1158ea804ca3cbfe781ccc62d09a2 or newer).')
names = []
tooltips = {}
result = dict((n, {}) for n in names)
for f, v in result.iteritems():
v['tooltip'] = self.local_preffix + f
return result
def _do_4d(self, image, labels, features, axes):
result = vigra.analysis.extractSkeletonFeatures(labels.squeeze().astype(np.uint32))
# find the number of objects
nobj = result[features[0]].shape[0]
#NOTE: this removes the background object!!!
#The background object is always present (even if there is no 0 label) and is always removed here
return cleanup(result, nobj, features)
def compute_global(self, image, labels, features, axes):
return self._do_4d(image, labels, features.keys(), axes)
| gpl-3.0 | -2,984,747,932,094,494,000 | 36.559524 | 159 | 0.636767 | false |
chromium/chromium | third_party/protobuf/python/google/protobuf/internal/extension_dict.py | 20 | 8443 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains _ExtensionDict class to represent extensions.
"""
from google.protobuf.internal import type_checkers
from google.protobuf.descriptor import FieldDescriptor
def _VerifyExtensionHandle(message, extension_handle):
"""Verify that the given extension handle is valid."""
if not isinstance(extension_handle, FieldDescriptor):
raise KeyError('HasExtension() expects an extension handle, got: %s' %
extension_handle)
if not extension_handle.is_extension:
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
if not extension_handle.containing_type:
raise KeyError('"%s" is missing a containing_type.'
% extension_handle.full_name)
if extension_handle.containing_type is not message.DESCRIPTOR:
raise KeyError('Extension "%s" extends message type "%s", but this '
'message is of type "%s".' %
(extension_handle.full_name,
extension_handle.containing_type.full_name,
message.DESCRIPTOR.full_name))
# TODO(robinson): Unify error handling of "unknown extension" crap.
# TODO(robinson): Support iteritems()-style iteration over all
# extensions with the "has" bits turned on?
class _ExtensionDict(object):
"""Dict-like container for Extension fields on proto instances.
Note that in all cases we expect extension handles to be
FieldDescriptors.
"""
def __init__(self, extended_message):
"""
Args:
extended_message: Message instance for which we are the Extensions dict.
"""
self._extended_message = extended_message
def __getitem__(self, extension_handle):
"""Returns the current value of the given extension handle."""
_VerifyExtensionHandle(self._extended_message, extension_handle)
result = self._extended_message._fields.get(extension_handle)
if result is not None:
return result
if extension_handle.label == FieldDescriptor.LABEL_REPEATED:
result = extension_handle._default_constructor(self._extended_message)
elif extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
message_type = extension_handle.message_type
if not hasattr(message_type, '_concrete_class'):
# pylint: disable=protected-access
self._extended_message._FACTORY.GetPrototype(message_type)
assert getattr(extension_handle.message_type, '_concrete_class', None), (
'Uninitialized concrete class found for field %r (message type %r)'
% (extension_handle.full_name,
extension_handle.message_type.full_name))
result = extension_handle.message_type._concrete_class()
try:
result._SetListener(self._extended_message._listener_for_children)
except ReferenceError:
pass
else:
# Singular scalar -- just return the default without inserting into the
# dict.
return extension_handle.default_value
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
result = self._extended_message._fields.setdefault(
extension_handle, result)
return result
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
my_fields = self._extended_message.ListFields()
other_fields = other._extended_message.ListFields()
# Get rid of non-extension fields.
my_fields = [field for field in my_fields if field.is_extension]
other_fields = [field for field in other_fields if field.is_extension]
return my_fields == other_fields
def __ne__(self, other):
return not self == other
def __len__(self):
fields = self._extended_message.ListFields()
# Get rid of non-extension fields.
extension_fields = [field for field in fields if field[0].is_extension]
return len(extension_fields)
def __hash__(self):
raise TypeError('unhashable object')
# Note that this is only meaningful for non-repeated, scalar extension
# fields. Note also that we may have to call _Modified() when we do
# successfully set a field this way, to set any necessary "has" bits in the
# ancestors of the extended message.
def __setitem__(self, extension_handle, value):
"""If extension_handle specifies a non-repeated, scalar extension
field, sets the value of that field.
"""
_VerifyExtensionHandle(self._extended_message, extension_handle)
if (extension_handle.label == FieldDescriptor.LABEL_REPEATED or
extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE):
raise TypeError(
'Cannot assign to extension "%s" because it is a repeated or '
'composite type.' % extension_handle.full_name)
# It's slightly wasteful to lookup the type checker each time,
# but we expect this to be a vanishingly uncommon case anyway.
type_checker = type_checkers.GetTypeChecker(extension_handle)
# pylint: disable=protected-access
self._extended_message._fields[extension_handle] = (
type_checker.CheckValue(value))
self._extended_message._Modified()
def __delitem__(self, extension_handle):
self._extended_message.ClearExtension(extension_handle)
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._extended_message._extensions_by_name.get(name, None)
def _FindExtensionByNumber(self, number):
"""Tries to find a known extension with the field number.
Args:
number: Extension field number.
Returns:
Extension field descriptor.
"""
return self._extended_message._extensions_by_number.get(number, None)
def __iter__(self):
# Return a generator over the populated extension fields
return (f[0] for f in self._extended_message.ListFields()
if f[0].is_extension)
def __contains__(self, extension_handle):
_VerifyExtensionHandle(self._extended_message, extension_handle)
if extension_handle not in self._extended_message._fields:
return False
if extension_handle.label == FieldDescriptor.LABEL_REPEATED:
return bool(self._extended_message._fields.get(extension_handle))
if extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
value = self._extended_message._fields.get(extension_handle)
# pylint: disable=protected-access
return value is not None and value._is_present_in_parent
return True
| bsd-3-clause | 3,795,216,932,513,353,700 | 38.638498 | 79 | 0.710648 | false |
dymkowsk/mantid | scripts/Interface/reduction_gui/widgets/output.py | 3 | 1650 | from __future__ import (absolute_import, division, print_function)
from PyQt4 import QtGui
from reduction_gui.reduction.output_script import Output
from reduction_gui.widgets.base_widget import BaseWidget
import ui.ui_hfir_output
class OutputWidget(BaseWidget):
"""
Widget that presents the transmission options to the user
"""
_plot = None
## Widget name
name = "Output"
def __init__(self, parent=None, state=None, settings=None):
BaseWidget.__init__(self, parent=parent, state=state, settings=settings)
class OutputFrame(QtGui.QFrame, ui.ui_hfir_output.Ui_Frame):
def __init__(self, parent=None):
QtGui.QFrame.__init__(self, parent)
self.setupUi(self)
self._content = OutputFrame(self)
self._layout.addWidget(self._content)
self.initialize_content()
def initialize_content(self):
"""
Declare the validators and event connections for the
widgets loaded through the .ui file.
"""
# Clear data list
self._content.output_text_edit.clear()
# Clear rebin
self._content.rebin_groupbox.deleteLater()
self._content.n_q_bins_label.hide()
self._content.n_q_bins_edit.hide()
self._content.rebin_button.hide()
self._content.lin_binning_radio.hide()
self._content.log_binning_radio.hide()
def set_state(self, state):
self._content.output_text_edit.setText(state.log_text)
def get_state(self):
"""
Returns an object with the state of the interface
"""
return Output()
| gpl-3.0 | 1,884,327,385,509,955,600 | 30.132075 | 80 | 0.625455 | false |
TimeWz667/Kamanian | example/OOP/O2.2 SS, CTBN.py | 1 | 1302 | import complexism as cx
import epidag as dag
__author__ = 'TimeWz667'
# Create a new blueprint of CTBN
bp = cx.BlueprintCTBN('Test')
# Add microstates
bp.add_microstate('A', ['N', 'Y'])
bp.add_microstate('B', ['N', 'Y'])
# Name combinations of microstates as states
bp.add_state('A', A='Y')
bp.add_state('a', A='N')
bp.add_state('B', B='Y')
bp.add_state('b', B='N')
bp.add_state('ab', A='N', B='N')
bp.add_state('AB', A='Y', B='Y')
# Add transitions
bp.add_transition('TrA', 'A', 'exp(0.1)')
bp.add_transition('TrB', 'B')
# Link transitions to states
bp.link_state_transition('a', 'TrA')
bp.link_state_transition('b', 'TrB')
psc = """
PCore ABC{
beta ~ exp(0.5)
TrA ~ lnorm(beta, 1)
TrB ~ gamma(beta, 100)
}
"""
# Sample root nodes
pc = dag.quick_build_parameter_core(psc)
print('\nUse a parameter model to support samplers')
print(pc.Actors.keys())
# Use pc to generate a dynamic core
dc = bp.generate_model('TestCTBN', **pc.Actors)
print('\nCombining parameter model and dynamic model')
print(dc)
state_ab = dc['ab']
state_a = dc['a']
state_A = dc['A']
print('\nTest inclusions')
# print('ab have a:', state_ab.isa(state_a))
print('ab have a:', state_a in state_ab)
print('ab have A:', state_A in state_ab)
print('\nTransitions follows ab')
print(state_ab.next_transitions())
| mit | -1,086,134,380,534,202,600 | 21.842105 | 54 | 0.64977 | false |
virtualopensystems/neutron | neutron/plugins/bigswitch/plugin.py | 1 | 51020 | # Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
# @author: Sumit Naiksatam, [email protected], Big Switch Networks, Inc.
"""
Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers.
NeutronRestProxy provides a generic neutron plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all neutron
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between neutron and the
network controller
- independent upgrade/development cycles between neutron and the controller
as it limits the proxy code upgrade requirement to neutron release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with neutron for independent recovery/reset
External REST API used by proxy is the same API as defined for neutron (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import copy
import functools
import httplib
import re
import eventlet
from oslo.config import cfg
from sqlalchemy.orm import exc as sqlexc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api import extensions as neutron_extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context as qcontext
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.db import securitygroups_rpc_base as sg_rpc_base
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import external_net
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.bigswitch import config as pl_config
from neutron.plugins.bigswitch.db import porttracker_db
from neutron.plugins.bigswitch import extensions
from neutron.plugins.bigswitch import routerrule_db
from neutron.plugins.bigswitch import servermanager
from neutron.plugins.bigswitch import version
LOG = logging.getLogger(__name__)
SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin')
METADATA_SERVER_IP = '169.254.169.254'
class AgentNotifierApi(n_rpc.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_port_update = topics.get_topic_name(
topic, topics.PORT, topics.UPDATE)
def port_update(self, context, port):
self.fanout_cast(context,
self.make_msg('port_update',
port=port),
topic=self.topic_port_update)
class RestProxyCallbacks(n_rpc.RpcCallback,
sg_rpc_base.SecurityGroupServerRpcCallbackMixin,
dhcp_rpc_base.DhcpRpcCallbackMixin):
RPC_API_VERSION = '1.1'
def get_port_from_device(self, device):
port_id = re.sub(r"^tap", "", device)
port = self.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def get_port_and_sgs(self, port_id):
"""Get port from database with security group info."""
LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
with session.begin(subtransactions=True):
query = session.query(
models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id
)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(port_id))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
routerrule_db.RouterRule_db_mixin):
supported_extension_aliases = ["binding"]
servers = None
def _get_all_data(self, get_ports=True, get_floating_ips=True,
get_routers=True):
admin_context = qcontext.get_admin_context()
networks = []
# this method is used by the ML2 driver so it can't directly invoke
# the self.get_(ports|networks) methods
plugin = manager.NeutronManager.get_plugin()
all_networks = plugin.get_networks(admin_context) or []
for net in all_networks:
mapped_network = self._get_mapped_network_with_subnets(net)
flips_n_ports = mapped_network
if get_floating_ips:
flips_n_ports = self._get_network_with_floatingips(
mapped_network)
if get_ports:
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = plugin.get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
mapped_port = self._extend_port_dict_binding(admin_context,
mapped_port)
ports.append(mapped_port)
flips_n_ports['ports'] = ports
if flips_n_ports:
networks.append(flips_n_ports)
data = {'networks': networks}
if get_routers:
routers = []
all_routers = self.get_routers(admin_context) or []
for router in all_routers:
interfaces = []
mapped_router = self._map_state_and_status(router)
router_filter = {
'device_owner': [const.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router.get('id')]
}
router_ports = self.get_ports(admin_context,
filters=router_filter) or []
for port in router_ports:
net_id = port.get('network_id')
subnet_id = port['fixed_ips'][0]['subnet_id']
intf_details = self._get_router_intf_details(admin_context,
net_id,
subnet_id)
interfaces.append(intf_details)
mapped_router['interfaces'] = interfaces
routers.append(mapped_router)
data.update({'routers': routers})
return data
def _send_all_data(self, send_ports=True, send_floating_ips=True,
send_routers=True, timeout=None,
triggered_by_tenant=None):
"""Pushes all data to network ctrl (networks/ports, ports/attachments).
This gives the controller an option to re-sync it's persistent store
with neutron's current view of that data.
"""
data = self._get_all_data(send_ports, send_floating_ips, send_routers)
data['triggered_by_tenant'] = triggered_by_tenant
errstr = _("Unable to update remote topology: %s")
return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH,
data, errstr, timeout=timeout)
def _get_network_with_floatingips(self, network, context=None):
if context is None:
context = qcontext.get_admin_context()
net_id = network['id']
net_filter = {'floating_network_id': [net_id]}
fl_ips = self.get_floatingips(context,
filters=net_filter) or []
network['floatingips'] = fl_ips
return network
def _get_all_subnets_json_for_network(self, net_id, context=None):
if context is None:
context = qcontext.get_admin_context()
# start a sub-transaction to avoid breaking parent transactions
with context.session.begin(subtransactions=True):
subnets = self._get_subnets_by_network(context,
net_id)
subnets_details = []
if subnets:
for subnet in subnets:
subnet_dict = self._make_subnet_dict(subnet)
mapped_subnet = self._map_state_and_status(subnet_dict)
subnets_details.append(mapped_subnet)
return subnets_details
def _get_mapped_network_with_subnets(self, network, context=None):
# if context is not provided, admin context is used
if context is None:
context = qcontext.get_admin_context()
network = self._map_state_and_status(network)
subnets = self._get_all_subnets_json_for_network(network['id'],
context)
network['subnets'] = subnets
for subnet in (subnets or []):
if subnet['gateway_ip']:
# FIX: For backward compatibility with wire protocol
network['gateway'] = subnet['gateway_ip']
break
else:
network['gateway'] = ''
network[external_net.EXTERNAL] = self._network_is_external(
context, network['id'])
# include ML2 segmentation types
network['segmentation_types'] = getattr(self, "segmentation_types", "")
return network
def _send_create_network(self, network, context=None):
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
self.servers.rest_create_network(tenant_id, mapped_network)
def _send_update_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
net_fl_ips = self._get_network_with_floatingips(mapped_network,
context)
self.servers.rest_update_network(tenant_id, net_id, net_fl_ips)
def _send_delete_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
self.servers.rest_delete_network(tenant_id, net_id)
def _map_state_and_status(self, resource):
resource = copy.copy(resource)
resource['state'] = ('UP' if resource.pop('admin_state_up',
True) else 'DOWN')
resource.pop('status', None)
return resource
def _warn_on_state_status(self, resource):
if resource.get('admin_state_up', True) is False:
LOG.warning(_("Setting admin_state_up=False is not supported "
"in this plugin version. Ignoring setting for "
"resource: %s"), resource)
if 'status' in resource:
if resource['status'] != const.NET_STATUS_ACTIVE:
LOG.warning(_("Operational status is internally set by the "
"plugin. Ignoring setting status=%s."),
resource['status'])
def _get_router_intf_details(self, context, intf_id, subnet_id):
# we will use the network id as interface's id
net_id = intf_id
network = self.get_network(context, net_id)
subnet = self.get_subnet(context, subnet_id)
mapped_network = self._get_mapped_network_with_subnets(network)
mapped_subnet = self._map_state_and_status(subnet)
data = {
'id': intf_id,
"network": mapped_network,
"subnet": mapped_subnet
}
return data
def _extend_port_dict_binding(self, context, port):
cfg_vif_type = cfg.CONF.NOVA.vif_type.lower()
if not cfg_vif_type in (portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_IVS):
LOG.warning(_("Unrecognized vif_type in configuration "
"[%s]. Defaulting to ovs."),
cfg_vif_type)
cfg_vif_type = portbindings.VIF_TYPE_OVS
# In ML2, the host_id is already populated
if portbindings.HOST_ID in port:
hostid = port[portbindings.HOST_ID]
else:
hostid = porttracker_db.get_port_hostid(context, port['id'])
if hostid:
port[portbindings.HOST_ID] = hostid
override = self._check_hostvif_override(hostid)
if override:
cfg_vif_type = override
port[portbindings.VIF_TYPE] = cfg_vif_type
port[portbindings.VIF_DETAILS] = {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases,
portbindings.OVS_HYBRID_PLUG: True
}
return port
def _check_hostvif_override(self, hostid):
for v in cfg.CONF.NOVA.vif_types:
if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []):
return v
return False
def _get_port_net_tenantid(self, context, port):
net = super(NeutronRestProxyV2Base,
self).get_network(context, port["network_id"])
return net['tenant_id']
def async_port_create(self, tenant_id, net_id, port):
try:
self.servers.rest_create_port(tenant_id, net_id, port)
except servermanager.RemoteRestError as e:
# 404 should never be received on a port create unless
# there are inconsistencies between the data in neutron
# and the data in the backend.
# Run a sync to get it consistent.
if (cfg.CONF.RESTPROXY.auto_sync_on_failure and
e.status == httplib.NOT_FOUND and
servermanager.NXNETWORK in e.reason):
LOG.error(_("Iconsistency with backend controller "
"triggering full synchronization."))
# args depend on if we are operating in ML2 driver
# or as the full plugin
topoargs = self.servers.get_topo_function_args
self._send_all_data(
send_ports=topoargs['get_ports'],
send_floating_ips=topoargs['get_floating_ips'],
send_routers=topoargs['get_routers'],
triggered_by_tenant=tenant_id
)
# If the full sync worked, the port will be created
# on the controller so it can be safely marked as active
else:
# Any errors that don't result in a successful auto-sync
# require that the port be placed into the error state.
LOG.error(
_("NeutronRestProxyV2: Unable to create port: %s"), e)
try:
self._set_port_status(port['id'], const.PORT_STATUS_ERROR)
except exceptions.PortNotFound:
# If port is already gone from DB and there was an error
# creating on the backend, everything is already consistent
pass
return
new_status = (const.PORT_STATUS_ACTIVE if port['state'] == 'UP'
else const.PORT_STATUS_DOWN)
try:
self._set_port_status(port['id'], new_status)
except exceptions.PortNotFound:
# This port was deleted before the create made it to the controller
# so it now needs to be deleted since the normal delete request
# would have deleted an non-existent port.
self.servers.rest_delete_port(tenant_id, net_id, port['id'])
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
def _set_port_status(self, port_id, status):
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.flush()
except sqlexc.NoResultFound:
raise exceptions.PortNotFound(port_id=port_id)
def put_context_in_serverpool(f):
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
self.servers.set_context(context)
return f(self, context, *args, **kwargs)
return wrapper
class NeutronRestProxyV2(NeutronRestProxyV2Base,
addr_pair_db.AllowedAddressPairsMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
sg_rpc_base.SecurityGroupServerRpcMixin):
_supported_extension_aliases = ["external-net", "router", "binding",
"router_rules", "extra_dhcp_opt", "quotas",
"dhcp_agent_scheduler", "agent",
"security-group", "allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
super(NeutronRestProxyV2, self).__init__()
LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'),
version.version_string_with_vcs())
pl_config.register_config()
self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size)
# Include the Big Switch Extensions path in the api_extensions
neutron_extensions.append_api_extensions_path(extensions.__path__)
self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route
# init network ctrl connections
self.servers = servermanager.ServerPool()
self.servers.get_topo_function = self._get_all_data
self.servers.get_topo_function_args = {'get_ports': True,
'get_floating_ips': True,
'get_routers': True}
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
# setup rpc for security and DHCP agents
self._setup_rpc()
if cfg.CONF.RESTPROXY.sync_data:
self._send_all_data()
LOG.debug(_("NeutronRestProxyV2: initialization done"))
def _setup_rpc(self):
self.conn = n_rpc.create_connection(new=True)
self.topic = topics.PLUGIN
self.notifier = AgentNotifierApi(topics.AGENT)
# init dhcp agent support
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
self._dhcp_agent_notifier
)
self.endpoints = [RestProxyCallbacks(),
agents_db.AgentExtRpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
@put_context_in_serverpool
def create_network(self, context, network):
"""Create a network.
Network represents an L2 network segment which can have a set of
subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can specify
a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_network() called"))
self._warn_on_state_status(network['network'])
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(
context,
network['network']["tenant_id"]
)
# create network in DB
new_net = super(NeutronRestProxyV2, self).create_network(context,
network)
self._process_l3_create(context, new_net, network['network'])
# create network on the network controller
self._send_create_network(new_net, context)
# return created network
return new_net
@put_context_in_serverpool
def update_network(self, context, net_id, network):
"""Updates the properties of a particular Virtual Network.
:param context: neutron api request context
:param net_id: uuid of the network to update
:param network: dictionary describing the updates
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can
specify a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2.update_network() called"))
self._warn_on_state_status(network['network'])
session = context.session
with session.begin(subtransactions=True):
new_net = super(NeutronRestProxyV2, self).update_network(
context, net_id, network)
self._process_l3_update(context, new_net, network['network'])
# update network on network controller
self._send_update_network(new_net, context)
return new_net
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_network(self, context, net_id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
:returns: None
:raises: exceptions.NetworkInUse
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_network() called"))
# Validate args
orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id)
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, net_id)
ret_val = super(NeutronRestProxyV2, self).delete_network(context,
net_id)
self._send_delete_network(orig_net, context)
return ret_val
@put_context_in_serverpool
def create_port(self, context, port):
"""Create a port, which is a connection point of a device
(e.g., a VM NIC) to attach an L2 Neutron network.
:param context: neutron api request context
:param port: dictionary describing the port
:returns:
{
"id": uuid representing the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": Sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet IDs and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.NetworkNotFound
:raises: exceptions.StateInvalid
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_port() called"))
# Update DB in new session so exceptions rollback changes
with context.session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
# non-router port status is set to pending. it is then updated
# after the async rest call completes. router ports are synchronous
if port['port']['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
port['port']['status'] = const.PORT_STATUS_ACTIVE
else:
port['port']['status'] = const.PORT_STATUS_BUILD
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
new_port = super(NeutronRestProxyV2, self).create_port(context,
port)
self._process_port_create_security_group(context, new_port, sgids)
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
new_port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, new_port,
port['port'].get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, new_port,
dhcp_opts)
new_port = self._extend_port_dict_binding(context, new_port)
net = super(NeutronRestProxyV2,
self).get_network(context, new_port["network_id"])
if self.add_meta_server_route:
if new_port['device_owner'] == const.DEVICE_OWNER_DHCP:
destination = METADATA_SERVER_IP + '/32'
self._add_host_route(context, destination, new_port)
# create on network ctrl
mapped_port = self._map_state_and_status(new_port)
# ports have to be created synchronously when creating a router
# port since adding router interfaces is a multi-call process
if mapped_port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
self.servers.rest_create_port(net["tenant_id"],
new_port["network_id"],
mapped_port)
else:
self.evpool.spawn_n(self.async_port_create, net["tenant_id"],
new_port["network_id"], mapped_port)
self.notify_security_groups_member_updated(context, new_port)
return new_port
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port = super(NeutronRestProxyV2, self).get_port(context, id,
fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports = super(NeutronRestProxyV2, self).get_ports(context, filters,
fields)
for port in ports:
self._extend_port_dict_binding(context, port)
return [self._fields(port, fields) for port in ports]
@put_context_in_serverpool
def update_port(self, context, port_id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
:returns: a mapping sequence with the following signature:
{
"id": uuid representing the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet IDs and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.StateInvalid
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: update_port() called"))
self._warn_on_state_status(port['port'])
# Validate Args
orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)
with context.session.begin(subtransactions=True):
# Update DB
new_port = super(NeutronRestProxyV2,
self).update_port(context, port_id, port)
ctrl_update_required = False
if addr_pair.ADDRESS_PAIRS in port['port']:
ctrl_update_required |= (
self.update_address_pairs_on_port(context, port_id, port,
orig_port, new_port))
self._update_extra_dhcp_opts_on_port(context, port_id, port,
new_port)
old_host_id = porttracker_db.get_port_hostid(context,
orig_port['id'])
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
if old_host_id != host_id:
ctrl_update_required = True
if (new_port.get("device_id") != orig_port.get("device_id") and
orig_port.get("device_id")):
ctrl_update_required = True
if ctrl_update_required:
# tenant_id must come from network in case network is shared
net_tenant_id = self._get_port_net_tenantid(context, new_port)
new_port = self._extend_port_dict_binding(context, new_port)
mapped_port = self._map_state_and_status(new_port)
self.servers.rest_update_port(net_tenant_id,
new_port["network_id"],
mapped_port)
agent_update_required = self.update_security_group_on_port(
context, port_id, port, orig_port, new_port)
agent_update_required |= self.is_security_group_member_updated(
context, orig_port, new_port)
# return new_port
return new_port
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_port(self, context, port_id, l3_port_check=True):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
:raises: exceptions.PortInUse
:raises: exceptions.PortNotFound
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_port() called"))
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, port_id)
with context.session.begin(subtransactions=True):
router_ids = self.disassociate_floatingips(
context, port_id, do_notify=False)
self._delete_port_security_group_bindings(context, port_id)
port = super(NeutronRestProxyV2, self).get_port(context, port_id)
# Tenant ID must come from network in case the network is shared
tenid = self._get_port_net_tenantid(context, port)
self._delete_port(context, port_id)
self.servers.rest_delete_port(tenid, port['network_id'], port_id)
# now that we've left db transaction, we are safe to notify
self.notify_routers_updated(context, router_ids)
@put_context_in_serverpool
def create_subnet(self, context, subnet):
LOG.debug(_("NeutronRestProxyV2: create_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# create subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).create_subnet(context, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
@put_context_in_serverpool
def update_subnet(self, context, id, subnet):
LOG.debug(_("NeutronRestProxyV2: update_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# update subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).update_subnet(context, id, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_subnet(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_subnet() called"))
orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id)
net_id = orig_subnet['network_id']
with context.session.begin(subtransactions=True):
# delete subnet in DB
super(NeutronRestProxyV2, self).delete_subnet(context, id)
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller - exception will rollback
self._send_update_network(orig_net, context)
def _get_tenant_default_router_rules(self, tenant):
rules = cfg.CONF.ROUTER.tenant_default_router_rule
defaultset = []
tenantset = []
for rule in rules:
items = rule.split(':')
if len(items) == 5:
(tenantid, source, destination, action, nexthops) = items
elif len(items) == 4:
(tenantid, source, destination, action) = items
nexthops = ''
else:
continue
parsedrule = {'source': source,
'destination': destination, 'action': action,
'nexthops': nexthops.split(',')}
if parsedrule['nexthops'][0] == '':
parsedrule['nexthops'] = []
if tenantid == '*':
defaultset.append(parsedrule)
if tenantid == tenant:
tenantset.append(parsedrule)
if tenantset:
return tenantset
return defaultset
@put_context_in_serverpool
def create_router(self, context, router):
LOG.debug(_("NeutronRestProxyV2: create_router() called"))
self._warn_on_state_status(router['router'])
tenant_id = self._get_tenant_id_for_create(context, router["router"])
# set default router rules
rules = self._get_tenant_default_router_rules(tenant_id)
router['router']['router_rules'] = rules
with context.session.begin(subtransactions=True):
# create router in DB
new_router = super(NeutronRestProxyV2, self).create_router(context,
router)
mapped_router = self._map_state_and_status(new_router)
self.servers.rest_create_router(tenant_id, mapped_router)
# return created router
return new_router
@put_context_in_serverpool
def update_router(self, context, router_id, router):
LOG.debug(_("NeutronRestProxyV2.update_router() called"))
self._warn_on_state_status(router['router'])
orig_router = super(NeutronRestProxyV2, self).get_router(context,
router_id)
tenant_id = orig_router["tenant_id"]
with context.session.begin(subtransactions=True):
new_router = super(NeutronRestProxyV2,
self).update_router(context, router_id, router)
router = self._map_state_and_status(new_router)
# update router on network controller
self.servers.rest_update_router(tenant_id, router, router_id)
# return updated router
return new_router
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock.
# delete_router ends up calling _delete_port instead of delete_port.
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_router(self, context, router_id):
LOG.debug(_("NeutronRestProxyV2: delete_router() called"))
with context.session.begin(subtransactions=True):
orig_router = self._get_router(context, router_id)
tenant_id = orig_router["tenant_id"]
# Ensure that the router is not used
router_filter = {'router_id': [router_id]}
fips = self.get_floatingips_count(context.elevated(),
filters=router_filter)
if fips:
raise l3.RouterInUse(router_id=router_id)
device_owner = l3_db.DEVICE_OWNER_ROUTER_INTF
device_filter = {'device_id': [router_id],
'device_owner': [device_owner]}
ports = self.get_ports_count(context.elevated(),
filters=device_filter)
if ports:
raise l3.RouterInUse(router_id=router_id)
ret_val = super(NeutronRestProxyV2,
self).delete_router(context, router_id)
# delete from network ctrl
self.servers.rest_delete_router(tenant_id, router_id)
return ret_val
@put_context_in_serverpool
def add_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: add_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
with context.session.begin(subtransactions=True):
# create interface in DB
new_intf_info = super(NeutronRestProxyV2,
self).add_router_interface(context,
router_id,
interface_info)
port = self._get_port(context, new_intf_info['port_id'])
net_id = port['network_id']
subnet_id = new_intf_info['subnet_id']
# we will use the port's network id as interface's id
interface_id = net_id
intf_details = self._get_router_intf_details(context,
interface_id,
subnet_id)
# create interface on the network controller
self.servers.rest_add_router_interface(tenant_id, router_id,
intf_details)
return new_intf_info
@put_context_in_serverpool
def remove_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: remove_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
# we will first get the interface identifier before deleting in the DB
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise exceptions.BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
port = self._get_port(context, interface_info['port_id'])
interface_id = port['network_id']
elif 'subnet_id' in interface_info:
subnet = self._get_subnet(context, interface_info['subnet_id'])
interface_id = subnet['network_id']
else:
msg = _("Either subnet_id or port_id must be specified")
raise exceptions.BadRequest(resource='router', msg=msg)
with context.session.begin(subtransactions=True):
# remove router in DB
del_ret = super(NeutronRestProxyV2,
self).remove_router_interface(context,
router_id,
interface_info)
# create router on the network controller
self.servers.rest_remove_router_interface(tenant_id, router_id,
interface_id)
return del_ret
@put_context_in_serverpool
def create_floatingip(self, context, floatingip):
LOG.debug(_("NeutronRestProxyV2: create_floatingip() called"))
with context.session.begin(subtransactions=True):
# create floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).create_floatingip(context, floatingip)
# create floatingip on the network controller
try:
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_create_floatingip(
new_fl_ip['tenant_id'], new_fl_ip)
else:
self._send_floatingip_update(context)
except servermanager.RemoteRestError as e:
with excutils.save_and_reraise_exception():
LOG.error(
_("NeutronRestProxyV2: Unable to create remote "
"floating IP: %s"), e)
# return created floating IP
return new_fl_ip
@put_context_in_serverpool
def update_floatingip(self, context, id, floatingip):
LOG.debug(_("NeutronRestProxyV2: update_floatingip() called"))
with context.session.begin(subtransactions=True):
# update floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).update_floatingip(context, id, floatingip)
# update network on network controller
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_update_floatingip(new_fl_ip['tenant_id'],
new_fl_ip, id)
else:
self._send_floatingip_update(context)
return new_fl_ip
@put_context_in_serverpool
def delete_floatingip(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_floatingip() called"))
with context.session.begin(subtransactions=True):
# delete floating IP in DB
old_fip = super(NeutronRestProxyV2, self).get_floatingip(context,
id)
super(NeutronRestProxyV2, self).delete_floatingip(context, id)
# update network on network controller
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_delete_floatingip(old_fip['tenant_id'], id)
else:
self._send_floatingip_update(context)
@put_context_in_serverpool
def disassociate_floatingips(self, context, port_id, do_notify=True):
LOG.debug(_("NeutronRestProxyV2: diassociate_floatingips() called"))
router_ids = super(NeutronRestProxyV2, self).disassociate_floatingips(
context, port_id, do_notify=do_notify)
self._send_floatingip_update(context)
return router_ids
# overriding method from l3_db as original method calls
# self.delete_floatingip() which in turn calls self.delete_port() which
# is locked with 'bsn-port-barrier'
@put_context_in_serverpool
def delete_disassociated_floatingips(self, context, network_id):
query = self._model_query(context, l3_db.FloatingIP)
query = query.filter_by(floating_network_id=network_id,
fixed_port_id=None,
router_id=None)
for fip in query:
context.session.delete(fip)
self._delete_port(context.elevated(), fip['floating_port_id'])
def _send_floatingip_update(self, context):
try:
ext_net_id = self.get_external_network_id(context)
if ext_net_id:
# Use the elevated state of the context for the ext_net query
admin_context = context.elevated()
ext_net = super(NeutronRestProxyV2,
self).get_network(admin_context, ext_net_id)
# update external network on network controller
self._send_update_network(ext_net, admin_context)
except exceptions.TooManyExternalNetworks:
# get_external_network can raise errors when multiple external
# networks are detected, which isn't supported by the Plugin
LOG.error(_("NeutronRestProxyV2: too many external networks"))
def _add_host_route(self, context, destination, port):
subnet = {}
for fixed_ip in port['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
nexthop = fixed_ip['ip_address']
subnet['host_routes'] = [{'destination': destination,
'nexthop': nexthop}]
updated_subnet = self.update_subnet(context,
subnet_id,
{'subnet': subnet})
payload = {'subnet': updated_subnet}
self._dhcp_agent_notifier.notify(context, payload,
'subnet.update.end')
LOG.debug(_("Adding host route: "))
LOG.debug(_("Destination:%(dst)s nexthop:%(next)s"),
{'dst': destination, 'next': nexthop})
| apache-2.0 | -4,382,929,999,179,187,700 | 43.481255 | 79 | 0.575853 | false |
vmindru/ansible | lib/ansible/modules/network/aci/aci_switch_leaf_selector.py | 12 | 10381 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_switch_leaf_selector
short_description: Bind leaf selectors to switch policy leaf profiles (infra:LeafS, infra:NodeBlk, infra:RsAccNodePGrep)
description:
- Bind leaf selectors (with node block range and policy group) to switch policy leaf profiles on Cisco ACI fabrics.
notes:
- This module is to be used with M(aci_switch_policy_leaf_profile).
One first creates a leaf profile (infra:NodeP) and then creates an associated selector (infra:LeafS),
seealso:
- module: aci_switch_policy_leaf_profile
- name: APIC Management Information Model reference
description: More information about the internal APIC classes B(infra:LeafS),
B(infra:NodeBlk) and B(infra:RsAccNodePGrp).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Bruno Calogero (@brunocalogero)
version_added: '2.5'
options:
description:
description:
- The description to assign to the C(leaf).
type: str
leaf_profile:
description:
- Name of the Leaf Profile to which we add a Selector.
type: str
aliases: [ leaf_profile_name ]
leaf:
description:
- Name of Leaf Selector.
type: str
aliases: [ name, leaf_name, leaf_profile_leaf_name, leaf_selector_name ]
leaf_node_blk:
description:
- Name of Node Block range to be added to Leaf Selector of given Leaf Profile.
type: str
aliases: [ leaf_node_blk_name, node_blk_name ]
leaf_node_blk_description:
description:
- The description to assign to the C(leaf_node_blk)
type: str
from:
description:
- Start of Node Block range.
type: int
aliases: [ node_blk_range_from, from_range, range_from ]
to:
description:
- Start of Node Block range.
type: int
aliases: [ node_blk_range_to, to_range, range_to ]
policy_group:
description:
- Name of the Policy Group to be added to Leaf Selector of given Leaf Profile.
type: str
aliases: [ name, policy_group_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: adding a switch policy leaf profile selector associated Node Block range (w/ policy group)
aci_switch_leaf_selector:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
leaf: leaf_selector_name
leaf_node_blk: node_blk_name
from: 1011
to: 1011
policy_group: somepolicygroupname
state: present
delegate_to: localhost
- name: adding a switch policy leaf profile selector associated Node Block range (w/o policy group)
aci_switch_leaf_selector:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
leaf: leaf_selector_name
leaf_node_blk: node_blk_name
from: 1011
to: 1011
state: present
delegate_to: localhost
- name: Removing a switch policy leaf profile selector
aci_switch_leaf_selector:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
leaf: leaf_selector_name
state: absent
delegate_to: localhost
- name: Querying a switch policy leaf profile selector
aci_switch_leaf_selector:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
leaf: leaf_selector_name
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update({
'description': dict(type='str'),
'leaf_profile': dict(type='str', aliases=['leaf_profile_name']), # Not required for querying all objects
'leaf': dict(type='str', aliases=['name', 'leaf_name', 'leaf_profile_leaf_name', 'leaf_selector_name']), # Not required for querying all objects
'leaf_node_blk': dict(type='str', aliases=['leaf_node_blk_name', 'node_blk_name']),
'leaf_node_blk_description': dict(type='str'),
# NOTE: Keyword 'from' is a reserved word in python, so we need it as a string
'from': dict(type='int', aliases=['node_blk_range_from', 'from_range', 'range_from']),
'to': dict(type='int', aliases=['node_blk_range_to', 'to_range', 'range_to']),
'policy_group': dict(type='str', aliases=['policy_group_name']),
'state': dict(type='str', default='present', choices=['absent', 'present', 'query']),
})
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['leaf_profile', 'leaf']],
['state', 'present', ['leaf_profile', 'leaf', 'leaf_node_blk', 'from', 'to']]
]
)
description = module.params['description']
leaf_profile = module.params['leaf_profile']
leaf = module.params['leaf']
leaf_node_blk = module.params['leaf_node_blk']
leaf_node_blk_description = module.params['leaf_node_blk_description']
from_ = module.params['from']
to_ = module.params['to']
policy_group = module.params['policy_group']
state = module.params['state']
# Build child_configs dynamically
child_configs = [
dict(
infraNodeBlk=dict(
attributes=dict(
descr=leaf_node_blk_description,
name=leaf_node_blk,
from_=from_,
to_=to_,
),
),
),
]
# Add infraRsAccNodePGrp only when policy_group was defined
if policy_group is not None:
child_configs.append(dict(
infraRsAccNodePGrp=dict(
attributes=dict(
tDn='uni/infra/funcprof/accnodepgrp-{0}'.format(policy_group),
),
),
))
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraNodeP',
aci_rn='infra/nprof-{0}'.format(leaf_profile),
module_object=leaf_profile,
target_filter={'name': leaf_profile},
),
subclass_1=dict(
aci_class='infraLeafS',
# NOTE: normal rn: leaves-{name}-typ-{type}, hence here hardcoded to range for purposes of module
aci_rn='leaves-{0}-typ-range'.format(leaf),
module_object=leaf,
target_filter={'name': leaf},
),
# NOTE: infraNodeBlk is not made into a subclass because there is a 1-1 mapping between node block and leaf selector name
child_classes=['infraNodeBlk', 'infraRsAccNodePGrp'],
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraLeafS',
class_config=dict(
descr=description,
name=leaf,
),
child_configs=child_configs,
)
aci.get_diff(aci_class='infraLeafS')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 | -2,787,632,725,632,114,000 | 29.442815 | 153 | 0.610731 | false |
taynaud/sparkit-learn | splearn/preprocessing/tests/test_data.py | 2 | 1915 | # -*- coding: utf-8 -*-
"""
"""
from numpy.testing import assert_array_almost_equal
from sklearn.preprocessing import StandardScaler
from splearn.preprocessing.data import SparkStandardScaler
from splearn.utils.testing import SplearnTestCase
class TestSparkStandardScaler(SplearnTestCase):
def test_same_fit_transform(self):
X, X_rdd = self.make_dense_rdd()
local = StandardScaler()
dist = SparkStandardScaler()
X_trans = local.fit_transform(X)
X_rdd_trans = dist.fit_transform(X_rdd).toarray()
X_converted = dist.to_scikit().transform(X)
assert_array_almost_equal(X_trans, X_rdd_trans)
assert_array_almost_equal(X_trans, X_converted)
local = StandardScaler(with_mean=False)
dist = SparkStandardScaler(with_mean=False)
X_trans = local.fit_transform(X)
X_rdd_trans = dist.fit_transform(X_rdd).toarray()
X_converted = dist.to_scikit().transform(X)
assert_array_almost_equal(X_trans, X_rdd_trans)
assert_array_almost_equal(X_trans, X_converted)
local = StandardScaler(with_std=False)
dist = SparkStandardScaler(with_std=False)
X_trans = local.fit_transform(X)
X_rdd_trans = dist.fit_transform(X_rdd).toarray()
X_converted = dist.to_scikit().transform(X)
assert_array_almost_equal(X_trans, X_rdd_trans)
assert_array_almost_equal(X_trans, X_converted)
def test_same_fit_transform_sparse(self):
X, X_rdd = self.make_sparse_rdd()
local = StandardScaler(with_mean=False)
dist = SparkStandardScaler(with_mean=False)
X_trans = local.fit_transform(X).toarray()
X_rdd_trans = dist.fit_transform(X_rdd).toarray()
X_converted = dist.to_scikit().transform(X).toarray()
assert_array_almost_equal(X_trans, X_rdd_trans)
assert_array_almost_equal(X_trans, X_converted)
| apache-2.0 | 4,894,530,815,103,312,000 | 32.596491 | 61 | 0.661619 | false |
alexgorban/models | research/delf/delf/python/examples/extract_boxes.py | 1 | 8602 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts bounding boxes from a list of images, saving them to files.
The images must be in JPG format. The program checks if boxes already
exist, and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import app
from delf import box_io
from delf import detector
cmd_args = None
# Extension/suffix of produced files.
_BOX_EXT = '.boxes'
_VIZ_SUFFIX = '_viz.jpg'
# Used for plotting boxes.
_BOX_EDGE_COLORS = ['r', 'y', 'b', 'm', 'k', 'g', 'c', 'w']
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def _FilterBoxesByScore(boxes, scores, class_indices, score_threshold):
"""Filter boxes based on detection scores.
Boxes with detection score >= score_threshold are returned.
Args:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
score_threshold: Float detection score threshold to use.
Returns:
selected_boxes: selected `boxes`.
selected_scores: selected `scores`.
selected_class_indices: selected `class_indices`.
"""
selected_boxes = []
selected_scores = []
selected_class_indices = []
for i, box in enumerate(boxes):
if scores[i] >= score_threshold:
selected_boxes.append(box)
selected_scores.append(scores[i])
selected_class_indices.append(class_indices[i])
return np.array(selected_boxes), np.array(selected_scores), np.array(
selected_class_indices)
def _PlotBoxesAndSaveImage(image, boxes, output_path):
"""Plot boxes on image and save to output path.
Args:
image: Numpy array containing image.
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
output_path: String containing output path.
"""
height = image.shape[0]
width = image.shape[1]
fig, ax = plt.subplots(1)
ax.imshow(image)
for i, box in enumerate(boxes):
scaled_box = [
box[0] * height, box[1] * width, box[2] * height, box[3] * width
]
rect = patches.Rectangle([scaled_box[1], scaled_box[0]],
scaled_box[3] - scaled_box[1],
scaled_box[2] - scaled_box[0],
linewidth=3,
edgecolor=_BOX_EDGE_COLORS[i %
len(_BOX_EDGE_COLORS)],
facecolor='none')
ax.add_patch(rect)
ax.axis('off')
plt.savefig(output_path, bbox_inches='tight')
plt.close(fig)
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
tf.logging.set_verbosity(tf.logging.INFO)
# Read list of images.
tf.logging.info('Reading list of images...')
image_paths = _ReadImageList(cmd_args.list_images_path)
num_images = len(image_paths)
tf.logging.info('done! Found %d images', num_images)
# Create output directories if necessary.
if not tf.gfile.Exists(cmd_args.output_dir):
tf.gfile.MakeDirs(cmd_args.output_dir)
if cmd_args.output_viz_dir and not tf.gfile.Exists(cmd_args.output_viz_dir):
tf.gfile.MakeDirs(cmd_args.output_viz_dir)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Reading list of images.
filename_queue = tf.train.string_input_producer(image_paths, shuffle=False)
reader = tf.WholeFileReader()
_, value = reader.read(filename_queue)
image_tf = tf.image.decode_jpeg(value, channels=3)
image_tf = tf.expand_dims(image_tf, 0)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
detector_fn = detector.MakeDetector(sess, cmd_args.detector_path)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
start = time.clock()
for i, image_path in enumerate(image_paths):
# Write to log-info once in a while.
if i == 0:
tf.logging.info('Starting to detect objects in images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.clock() - start)
tf.logging.info(
'Processing image %d out of %d, last %d '
'images took %f seconds', i, num_images, _STATUS_CHECK_ITERATIONS,
elapsed)
start = time.clock()
# # Get next image.
im = sess.run(image_tf)
# If descriptor already exists, skip its computation.
base_boxes_filename, _ = os.path.splitext(os.path.basename(image_path))
out_boxes_filename = base_boxes_filename + _BOX_EXT
out_boxes_fullpath = os.path.join(cmd_args.output_dir,
out_boxes_filename)
if tf.gfile.Exists(out_boxes_fullpath):
tf.logging.info('Skipping %s', image_path)
continue
# Extract and save boxes.
(boxes_out, scores_out, class_indices_out) = detector_fn(im)
(selected_boxes, selected_scores,
selected_class_indices) = _FilterBoxesByScore(boxes_out[0],
scores_out[0],
class_indices_out[0],
cmd_args.detector_thresh)
box_io.WriteToFile(out_boxes_fullpath, selected_boxes, selected_scores,
selected_class_indices)
if cmd_args.output_viz_dir:
out_viz_filename = base_boxes_filename + _VIZ_SUFFIX
out_viz_fullpath = os.path.join(cmd_args.output_viz_dir,
out_viz_filename)
_PlotBoxesAndSaveImage(im[0], selected_boxes, out_viz_fullpath)
# Finalize enqueue threads.
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--detector_path',
type=str,
default='/tmp/d2r_frcnn_20190411/',
help="""
Path to exported detector model.
""")
parser.add_argument(
'--detector_thresh',
type=float,
default=.0,
help="""
Detector threshold. Any box with confidence score lower than this is not
returned.
""")
parser.add_argument(
'--list_images_path',
type=str,
default='list_images.txt',
help="""
Path to list of images to undergo object detection.
""")
parser.add_argument(
'--output_dir',
type=str,
default='test_boxes',
help="""
Directory where bounding boxes will be written to. Each image's boxes
will be written to a file with same name, and extension replaced by
.boxes.
""")
parser.add_argument(
'--output_viz_dir',
type=str,
default='',
help="""
Optional. If set, a visualization of the detected boxes overlaid on the
image is produced, and saved to this directory. Each image is saved with
_viz.jpg suffix.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | 4,637,331,231,244,665,000 | 32.733333 | 80 | 0.622646 | false |
pierreberthet/local-scripts | anova2.py | 1 | 7624 | import numpy as np
import json
import matplotlib
#matplotlib.use('Agg')
import pylab as pl
import sys
import pprint as pp
import difflib
#from difflib_data import *
# Plot the different figures for the merged spikes and voltages recordings.
# This file, as the MergeSpikefiles.py should be one level up than Test/..., the output of a simulation.
import glob
import scipy.stats as stats
def get_weights(folder):
fparam = folder+'Test/Parameters/simulation_parameters.json'
f = open(fparam, 'r')
params = json.load(f)
params['multi_n']+=1
source_rew= folder+params['rewards_multi_fn']+'_'
rewards = np.zeros((params['multi_n'], params['n_iterations']))
for i in xrange(params['multi_n']):
rewards[i] = np.loadtxt(source_rew+str(i))
#for i in xrange(lend1):
# for j in xrange(params['n_actions']):
# wd1_m[i,j] = np.mean(wd1[:,i,j])
# wd1_std[i,j] = np.std(wd1[:,i,j])
# wd2_m[i,j] = np.mean(wd2[:,i,j])
# wd2_std[i,j] = np.std(wd2[:,i,j])
#for i in xrange(lenrp):
# for j in xrange(params['n_actions']*params['n_states']):
# wrp_m[i,j] = np.mean(wrp[:,i,j])
# wrp_std[i,j] = np.std(wrp[:,i,j])
return rewards
def stars(p):
if p < 0.0001:
return "****"
elif (p < 0.001):
return "***"
elif (p < 0.01):
return "**"
elif (p < 0.05):
return "*"
else:
return "-"
######################################
######################################
if len(sys.argv)<2:
print "Need 2 folders for comparison"
pass
fname ={}
for i in xrange(1,len(sys.argv)):
fname[i-1] = sys.argv[i]+'/'
params={}
for i in xrange(len(fname)):
params[i]=json.load(open(fname[i]+'Test/Parameters/simulation_parameters.json'))
#print 'Do the simulations match? ', params[:]['n_recordings']==params[:]['n_recordings']
#diff = difflib.ndiff(open(fparam1,'r').readlines(), open(fparam2,'r').readlines())
#print ''.join(diff)
rew = {}
for i in xrange(len(fname)):
rew[i] = get_weights(fname[i])
start = 4
startpd = 11
#shift = start*params[1]['block_len']*params[1]['t_iteration']/params[1]['resolution']
#shift_rew = start*params[1]['block_len']
#shiftpd = startpd*params[1]['block_len']*params[1]['t_iteration']/params[1]['resolution']
#shiftpd_rew = startpd*params[1]['block_len']
p = len(fname)-1
perf = {}
for i in xrange(len(fname)):
#perf[i] = np.zeros(params[1]['multi_n'], dtype=float)
perf[i] = []
j=0
#for i in xrange(shift_rew, params1['n_iterations']):
# r1[j]=sum(rewa[:,i])
# r2[j]=sum(rewb[:,i])
# j+=1
#for i in xrange(start, params1['n_blocks']):
for f in xrange(len(fname)-1):
j=0
for i in xrange(params[f]['multi_n']):
for q in xrange(start, params[f]['n_blocks']):
perf[f]= np.append(perf[f],rew[f][i,q*params[f]['block_len']-6:q*params[f]['block_len']-1])
j+=1
j=0
for i in xrange(params[p]['multi_n']):
for q in xrange(startpd, params[p]['n_blocks']):
perf[p] = np.append(perf[p], rew[p][i,q*params[p]['block_len']-6:q*params[p]['block_len']-1])
j+=1
#for f in xrange(len(fname)-1):
# perf[f] = perf[f]/((params[f]['n_blocks']-start)*5.)
#
#perf[p] = perf[p]/((params[p]['n_blocks']-startpd)*5.)
fig = pl.figure()
ax = fig.add_subplot(111)
ax.hlines(1./3., 0, 8, colors='gray', linestyles='dotted', label='chance')
print 'PERF'
for i in xrange(len(fname)):
print fname[i], 'mean= ', np.mean(perf[i]), 'SD=', np.std(perf[i])
for j in xrange(len(fname)):
print fname[j], 'mean ', np.mean(perf[j]), 'SD=', np.std(perf[j])
print 'T-TEST: ', stats.ttest_ind(perf[i],perf[j])
print 'F-TEST: ', stats.f_oneway(perf[i], perf[j])
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print '\n'
print 'F-TEST: ', stats.f_oneway(perf[i], [.33333])
print '+++++++++++++++++++++++++++++++'
print '\n'
print '\n'
bp = ax.boxplot([perf[v] for v in perf.iterkeys()])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.tick_params(axis='x', direction='out')
ax.tick_params(axis='y', length=0)
ax.grid(axis='y', color="0.9", linestyle='-', linewidth=1)
ax.set_axisbelow(True)
# colors, as before
import brewer2mpl
bmap = brewer2mpl.get_map('Set2', 'qualitative', len(fname))
colors = bmap.mpl_colors
for i in range(0, len(bp['boxes'])):
bp['boxes'][i].set_color(colors[i])
# we have two whiskers!
bp['whiskers'][i*2].set_color(colors[i])
bp['whiskers'][i*2 + 1].set_color(colors[i])
bp['whiskers'][i*2].set_linewidth(2)
bp['whiskers'][i*2 + 1].set_linewidth(2)
# top and bottom fliers
# (set allows us to set many parameters at once)
bp['fliers'][i * 2].set(markerfacecolor=colors[i],
marker='o', alpha=0.75, markersize=6,
markeredgecolor='none')
bp['fliers'][i * 2 + 1].set(markerfacecolor=colors[i],
marker='o', alpha=0.75, markersize=6,
markeredgecolor='none')
bp['medians'][i].set_color('black')
bp['medians'][i].set_linewidth(3)
# and 4 caps to remove
for c in bp['caps']:
c.set_linewidth(0)
for i in range(len(bp['boxes'])):
box = bp['boxes'][i]
box.set_linewidth(0)
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
boxPolygon = pl.Polygon(boxCoords, facecolor = colors[i], linewidth=0)
ax.add_patch(boxPolygon)
#y_max = np.max(np.concatenate((low_mut_100, high_mut_100)))
#y_min = np.min(np.concatenate((low_mut_100, high_mut_100)))
#print y_max
y = 0.26
yA = 1.1
yB = 0.
ax.annotate("", xy=(3, .83), xycoords='data',
xytext=(2, .83), textcoords='data',
arrowprops=dict(arrowstyle="-", ec='#aaaaaa',
connectionstyle="bar,fraction=0.1"))
ax.text(2.5, .79, '$ns$',
horizontalalignment='center',
verticalalignment='center', fontsize='large')
ax.annotate("", xy=(5, y), xycoords='data',
xytext=(4, y), textcoords='data',
arrowprops=dict(arrowstyle="-", ec='#aaaaaa',
connectionstyle="bar,fraction=0.1"))
ax.text(4.5, .22, '$ns$',
horizontalalignment='center',
verticalalignment='center', fontsize='large')
ax.annotate("", xy=(8, .59), xycoords='data',
xytext=(7, .59), textcoords='data',
arrowprops=dict(arrowstyle="-", ec='#aaaaaa',
connectionstyle="bar,fraction=0.1"))
ax.text(7.5, .55, '$ns$',
horizontalalignment='center',
verticalalignment='center', fontsize='large')
si = 12
parms = {
'axes.labelsize': si,
'text.fontsize': si,
'legend.fontsize': si,
'xtick.labelsize': si,
'ytick.labelsize': si,
'text.usetex': False,
'figure.figsize': [6., 7.]
}
pl.rcParams.update(parms)
ax.set_ylim([0.,1.05])
ax.set_xticklabels(['Full','no\nD1', 'no\nD2', 'no\nRP', 'no\nEfference', 'no\nLI', 'no\nSF', 'PD'])
ax.set_xticklabels(['Full', 'no\nSF', 'no\nD1', 'no\nD2', 'no\nEfference', 'no\nLI', 'no\nRP', 'PD'])
ax.set_ylabel('Average success ratio for the last 5 trials of each block')
ax.set_ylabel('Average success ratio for the last 5 trials of the last 5 blocks')
pl.savefig('anovatest_.pdf', bbox_inches='tight', dpi=600)
pl.savefig('anovatest_.eps', bbox_inches='tight', dpi=600)
pl.savefig('anovatest_.tiff', bbox_inches='tight', dpi=600)
pl.savefig('anovatest_.png', bbox_inches='tight', dpi=600)
#pl.show()
| gpl-2.0 | -2,175,607,239,564,980,700 | 28.099237 | 104 | 0.586044 | false |
duduniao85/fundbi | PythonBasic/helper/meta_01.py | 1 | 1777 | #!/usr/bin/env python
# coding: utf-8
#http://python-3-patterns-idioms-test.readthedocs.org/en/latest/Metaprogramming.html
class Event(object):
events = [] # static
def __init__(self, action, time):
self.action = action
self.time = time
Event.events.append(self)
def __cmp__ (self, other):
"So sort() will compare only on time."
return cmp(self.time, other.time)
def run(self):
print("%.2f: %s" % (self.time, self.action))
@staticmethod
def run_events():
Event.events.sort();
for e in Event.events:
e.run()
def create_mc(description):
"Create subclass using the 'type' metaclass"
class_name = "".join(x.capitalize() for x in description.split())
def __init__(self, time):
Event.__init__(self, description + " [mc]", time)
globals()[class_name] = type(class_name, (Event,), dict(__init__ = __init__))
def create_exec(description):
"Create subclass by exec-ing a string"
class_name = "".join(x.capitalize() for x in description.split())
klass = """
class %s(Event):
def __init__(self, time):
Event.__init__(self, "%s [exec]", time)""" % (class_name, description)
exec klass in globals()
if __name__ == "__main__":
descriptions = ["Light on", "Light off", "Water on", "Water off",
"Thermostat night", "Thermostat day", "Ring bell"]
initializations = "ThermostatNight(5.00); LightOff(2.00); \
WaterOn(3.30); WaterOff(4.45); LightOn(1.00); \
RingBell(7.00); ThermostatDay(6.00)"
[create_mc(dsc) for dsc in descriptions]
exec initializations in globals()
[create_exec(dsc) for dsc in descriptions]
exec initializations in globals()
Event.run_events() | gpl-3.0 | 6,692,018,167,835,117,000 | 32.54717 | 84 | 0.599887 | false |
Subsets and Splits