repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
aquemy/HCBR | data/process_audiology.py | 1 | 2309 | import csv
import os
import sys
def feature_to_index(cases, column_nb, offset=0):
feature_set = set()
for case in cases:
feature_set.add(case[column_nb])
l = list(feature_set)
index = {}
i = 0
for f in l:
if f != '':
index[f] = i + offset
i += 1
return index
def filter_cases(cases, except_features):
features_index = []
features_mapping = {}
offset = 0
for j, f in enumerate([i for i in range(0, len(cases[0])) if i not in except_features]):
feat = feature_to_index(cases, f, offset)
features_index.append(feat)
features_mapping[f] = j
offset += len(feat)
final_cases = []
i = 0
for case in cases:
final_cases.append([])
for j, f in enumerate(case):
if j not in except_features:
if case[j] is None or case[j] not in ['']:
translation = features_index[features_mapping[j]][case[j]]
final_cases[i].append(translation)
i += 1
return final_cases
def read_cases(path):
cases = []
headers = []
with open(path, 'rb') as csvfile:
reader = csvfile.readlines()
n = len(reader[0].split(','))
for i, row in enumerate(reader):
if len(row.split(',')) == n:
cases.append(row.split(','))
return cases
def main():
path = sys.argv[1]
file_name = path.split('/')[-1].split('.')[0]
base_name = file_name.split('.')[0]
cases = read_cases(path)
outcome_row = -1
except_features_no_outcomes = [-1]
final_cases = filter_cases(cases, except_features_no_outcomes)
casebase_output = '{}_casebase.txt'.format(base_name)
outcomes_output = '{}_outcomes.txt'.format(base_name)
try:
os.remove(casebase_output)
os.remove(outcomes_output)
except:
pass
with open(casebase_output, 'a') as file:
for case in final_cases:
for e in case:
file.write('{} '.format(e))
file.write('\n')
with open(outcomes_output, 'a') as file:
for case in cases:
file.write('{}\n'.format('0' if case[outcome_row].strip() in ['normal_ear', 'cochlear_unknown'] else '1'))
if __name__ == '__main__':
main() | mit | 4,552,662,785,093,896,000 | 26.176471 | 118 | 0.543525 | false |
psiwczak/openstack | nova/tests/rpc/test_qpid.py | 1 | 13965 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using qpid
"""
import mox
from nova import context
from nova import flags
from nova import log as logging
from nova.rpc import amqp as rpc_amqp
from nova import test
try:
import qpid
from nova.rpc import impl_qpid
except ImportError:
qpid = None
impl_qpid = None
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class RpcQpidTestCase(test.TestCase):
"""
Exercise the public API of impl_qpid utilizing mox.
This set of tests utilizes mox to replace the Qpid objects and ensures
that the right operations happen on them when the various public rpc API
calls are exercised. The API calls tested here include:
nova.rpc.create_connection()
nova.rpc.common.Connection.create_consumer()
nova.rpc.common.Connection.close()
nova.rpc.cast()
nova.rpc.fanout_cast()
nova.rpc.call()
nova.rpc.multicall()
"""
def setUp(self):
super(RpcQpidTestCase, self).setUp()
self.mock_connection = None
self.mock_session = None
self.mock_sender = None
self.mock_receiver = None
if qpid:
impl_qpid.register_opts(FLAGS)
self.orig_connection = qpid.messaging.Connection
self.orig_session = qpid.messaging.Session
self.orig_sender = qpid.messaging.Sender
self.orig_receiver = qpid.messaging.Receiver
qpid.messaging.Connection = lambda *_x, **_y: self.mock_connection
qpid.messaging.Session = lambda *_x, **_y: self.mock_session
qpid.messaging.Sender = lambda *_x, **_y: self.mock_sender
qpid.messaging.Receiver = lambda *_x, **_y: self.mock_receiver
def tearDown(self):
if qpid:
qpid.messaging.Connection = self.orig_connection
qpid.messaging.Session = self.orig_session
qpid.messaging.Sender = self.orig_sender
qpid.messaging.Receiver = self.orig_receiver
if impl_qpid:
# Need to reset this in case we changed the connection_cls
# in self._setup_to_server_tests()
impl_qpid.Connection.pool.connection_cls = impl_qpid.Connection
super(RpcQpidTestCase, self).tearDown()
@test.skip_if(qpid is None, "Test requires qpid")
def test_create_connection(self):
self.mock_connection = self.mox.CreateMock(self.orig_connection)
self.mock_session = self.mox.CreateMock(self.orig_session)
self.mock_connection.opened().AndReturn(False)
self.mock_connection.open()
self.mock_connection.session().AndReturn(self.mock_session)
self.mock_connection.close()
self.mox.ReplayAll()
connection = impl_qpid.create_connection(FLAGS)
connection.close()
def _test_create_consumer(self, fanout):
self.mock_connection = self.mox.CreateMock(self.orig_connection)
self.mock_session = self.mox.CreateMock(self.orig_session)
self.mock_receiver = self.mox.CreateMock(self.orig_receiver)
self.mock_connection.opened().AndReturn(False)
self.mock_connection.open()
self.mock_connection.session().AndReturn(self.mock_session)
if fanout:
# The link name includes a UUID, so match it with a regex.
expected_address = mox.Regex(r'^impl_qpid_test_fanout ; '
'{"node": {"x-declare": {"auto-delete": true, "durable": '
'false, "type": "fanout"}, "type": "topic"}, "create": '
'"always", "link": {"x-declare": {"auto-delete": true, '
'"exclusive": true, "durable": false}, "durable": true, '
'"name": "impl_qpid_test_fanout_.*"}}$')
else:
expected_address = ('nova/impl_qpid_test ; {"node": {"x-declare": '
'{"auto-delete": true, "durable": true}, "type": "topic"}, '
'"create": "always", "link": {"x-declare": {"auto-delete": '
'true, "exclusive": false, "durable": false}, "durable": '
'true, "name": "impl_qpid_test"}}')
self.mock_session.receiver(expected_address).AndReturn(
self.mock_receiver)
self.mock_receiver.capacity = 1
self.mock_connection.close()
self.mox.ReplayAll()
connection = impl_qpid.create_connection(FLAGS)
connection.create_consumer("impl_qpid_test",
lambda *_x, **_y: None,
fanout)
connection.close()
@test.skip_if(qpid is None, "Test requires qpid")
def test_create_consumer(self):
self._test_create_consumer(fanout=False)
@test.skip_if(qpid is None, "Test requires qpid")
def test_create_consumer_fanout(self):
self._test_create_consumer(fanout=True)
def _test_cast(self, fanout, server_params=None):
self.mock_connection = self.mox.CreateMock(self.orig_connection)
self.mock_session = self.mox.CreateMock(self.orig_session)
self.mock_sender = self.mox.CreateMock(self.orig_sender)
self.mock_connection.opened().AndReturn(False)
self.mock_connection.open()
self.mock_connection.session().AndReturn(self.mock_session)
if fanout:
expected_address = ('impl_qpid_test_fanout ; '
'{"node": {"x-declare": {"auto-delete": true, '
'"durable": false, "type": "fanout"}, '
'"type": "topic"}, "create": "always"}')
else:
expected_address = ('nova/impl_qpid_test ; {"node": {"x-declare": '
'{"auto-delete": true, "durable": false}, "type": "topic"}, '
'"create": "always"}')
self.mock_session.sender(expected_address).AndReturn(self.mock_sender)
self.mock_sender.send(mox.IgnoreArg())
if not server_params:
# This is a pooled connection, so instead of closing it, it
# gets reset, which is just creating a new session on the
# connection.
self.mock_session.close()
self.mock_connection.session().AndReturn(self.mock_session)
self.mox.ReplayAll()
try:
ctx = context.RequestContext("user", "project")
args = [FLAGS, ctx, "impl_qpid_test",
{"method": "test_method", "args": {}}]
if server_params:
args.insert(2, server_params)
if fanout:
method = impl_qpid.fanout_cast_to_server
else:
method = impl_qpid.cast_to_server
else:
if fanout:
method = impl_qpid.fanout_cast
else:
method = impl_qpid.cast
method(*args)
finally:
while impl_qpid.Connection.pool.free_items:
# Pull the mock connection object out of the connection pool so
# that it doesn't mess up other test cases.
impl_qpid.Connection.pool.get()
@test.skip_if(qpid is None, "Test requires qpid")
def test_cast(self):
self._test_cast(fanout=False)
@test.skip_if(qpid is None, "Test requires qpid")
def test_fanout_cast(self):
self._test_cast(fanout=True)
def _setup_to_server_tests(self, server_params):
class MyConnection(impl_qpid.Connection):
def __init__(myself, *args, **kwargs):
super(MyConnection, myself).__init__(*args, **kwargs)
self.assertEqual(myself.connection.username,
server_params['username'])
self.assertEqual(myself.connection.password,
server_params['password'])
self.assertEqual(myself.broker,
server_params['hostname'] + ':' +
str(server_params['port']))
MyConnection.pool = rpc_amqp.Pool(FLAGS, MyConnection)
self.stubs.Set(impl_qpid, 'Connection', MyConnection)
@test.skip_if(qpid is None, "Test requires qpid")
def test_cast_to_server(self):
server_params = {'username': 'fake_username',
'password': 'fake_password',
'hostname': 'fake_hostname',
'port': 31337}
self._setup_to_server_tests(server_params)
self._test_cast(fanout=False, server_params=server_params)
@test.skip_if(qpid is None, "Test requires qpid")
def test_fanout_cast_to_server(self):
server_params = {'username': 'fake_username',
'password': 'fake_password',
'hostname': 'fake_hostname',
'port': 31337}
self._setup_to_server_tests(server_params)
self._test_cast(fanout=True, server_params=server_params)
def _test_call(self, multi):
self.mock_connection = self.mox.CreateMock(self.orig_connection)
self.mock_session = self.mox.CreateMock(self.orig_session)
self.mock_sender = self.mox.CreateMock(self.orig_sender)
self.mock_receiver = self.mox.CreateMock(self.orig_receiver)
self.mock_connection.opened().AndReturn(False)
self.mock_connection.open()
self.mock_connection.session().AndReturn(self.mock_session)
rcv_addr = mox.Regex(r'^.*/.* ; {"node": {"x-declare": {"auto-delete":'
' true, "durable": true, "type": "direct"}, "type": '
'"topic"}, "create": "always", "link": {"x-declare": '
'{"auto-delete": true, "exclusive": true, "durable": '
'false}, "durable": true, "name": ".*"}}')
self.mock_session.receiver(rcv_addr).AndReturn(self.mock_receiver)
self.mock_receiver.capacity = 1
send_addr = ('nova/impl_qpid_test ; {"node": {"x-declare": '
'{"auto-delete": true, "durable": false}, "type": "topic"}, '
'"create": "always"}')
self.mock_session.sender(send_addr).AndReturn(self.mock_sender)
self.mock_sender.send(mox.IgnoreArg())
self.mock_session.next_receiver(timeout=mox.IsA(int)).AndReturn(
self.mock_receiver)
self.mock_receiver.fetch().AndReturn(qpid.messaging.Message(
{"result": "foo", "failure": False, "ending": False}))
if multi:
self.mock_session.next_receiver(timeout=mox.IsA(int)).AndReturn(
self.mock_receiver)
self.mock_receiver.fetch().AndReturn(
qpid.messaging.Message(
{"result": "bar", "failure": False,
"ending": False}))
self.mock_session.next_receiver(timeout=mox.IsA(int)).AndReturn(
self.mock_receiver)
self.mock_receiver.fetch().AndReturn(
qpid.messaging.Message(
{"result": "baz", "failure": False,
"ending": False}))
self.mock_session.next_receiver(timeout=mox.IsA(int)).AndReturn(
self.mock_receiver)
self.mock_receiver.fetch().AndReturn(qpid.messaging.Message(
{"failure": False, "ending": True}))
self.mock_session.close()
self.mock_connection.session().AndReturn(self.mock_session)
self.mox.ReplayAll()
try:
ctx = context.RequestContext("user", "project")
if multi:
method = impl_qpid.multicall
else:
method = impl_qpid.call
res = method(FLAGS, ctx, "impl_qpid_test",
{"method": "test_method", "args": {}})
if multi:
self.assertEquals(list(res), ["foo", "bar", "baz"])
else:
self.assertEquals(res, "foo")
finally:
while impl_qpid.Connection.pool.free_items:
# Pull the mock connection object out of the connection pool so
# that it doesn't mess up other test cases.
impl_qpid.Connection.pool.get()
@test.skip_if(qpid is None, "Test requires qpid")
def test_call(self):
self._test_call(multi=False)
@test.skip_if(qpid is None, "Test requires qpid")
def test_multicall(self):
self._test_call(multi=True)
#
#from nova.tests.rpc import common
#
# Qpid does not have a handy in-memory transport like kombu, so it's not
# terribly straight forward to take advantage of the common unit tests.
# However, at least at the time of this writing, the common unit tests all pass
# with qpidd running.
#
# class RpcQpidCommonTestCase(common._BaseRpcTestCase):
# def setUp(self):
# self.rpc = impl_qpid
# super(RpcQpidCommonTestCase, self).setUp()
#
# def tearDown(self):
# super(RpcQpidCommonTestCase, self).tearDown()
#
| apache-2.0 | 4,080,920,901,437,090,000 | 40.316568 | 79 | 0.576083 | false |
tonybenny2004/or-tools | examples/python/send_most_money.py | 34 | 3489 | # Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SEND+MOST=MONEY in Google CP Solver.
Alphametic problem were we maximize MONEY.
Problem from the lecture notes:
http://www.ict.kth.se/courses/ID2204/notes/L01.pdf
Compare with the following models:
* Comet : http://www.hakank.org/comet/send_most_money.co
* Comet : http://www.hakank.org/comet/send_most_money2.co
* ECLiPSE : http://www.hakank.org/eclipse/send_most_money.ecl
* SICStus: http://hakank.org/sicstus/send_most_money.pl
* MiniZinc: http://www.hakank.org/minizinc/send_most_money.mzn
* Gecode/R: http://www.hakank.org/gecode_r/send_most_money2.rb
* Tailor/Essence': http://www.hakank.org/tailor/send_most_money.eprime
* Zinc: http://www.hakank.org/minizinc/send_most_money.zinc
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.constraint_solver import pywrapcp
def main(MONEY=0):
# Create the solver.
solver = pywrapcp.Solver('Send most money')
# data
# declare variables
s = solver.IntVar(0, 9, 's')
e = solver.IntVar(0, 9, 'e')
n = solver.IntVar(0, 9, 'n')
d = solver.IntVar(0, 9, 'd')
m = solver.IntVar(0, 9, 'm')
o = solver.IntVar(0, 9, 'o')
t = solver.IntVar(0, 9, 't')
y = solver.IntVar(0, 9, 'y')
money = solver.IntVar(0, 100000, 'money')
x = [s, e, n, d, m, o, t, y]
#
# constraints
#
if MONEY > 0:
solver.Add(money == MONEY)
solver.Add(solver.AllDifferent(x))
solver.Add(money == m * 10000 + o * 1000 + n * 100 + e * 10 + y)
solver.Add(money > 0)
solver.Add(1000 * s + 100 * e + 10 * n + d +
1000 * m + 100 * o + 10 * s + t ==
money)
solver.Add(s > 0)
solver.Add(m > 0)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x)
solution.Add(money)
collector = solver.AllSolutionCollector(solution)
objective = solver.Maximize(money, 100)
cargs = [collector]
if MONEY == 0:
objective = solver.Maximize(money, 1)
cargs.extend([objective])
solver.Solve(solver.Phase(x,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MAX_VALUE),
cargs)
num_solutions = collector.SolutionCount()
money_val = 0
for s in range(num_solutions):
print 'x:', [collector.Value(s, x[i]) for i in range(len(x))]
money_val = collector.Value(s, money)
print 'money:', money_val
print
print 'num_solutions:', num_solutions
print 'failures:', solver.Failures()
print 'branches:', solver.Branches()
print 'WallTime:', solver.WallTime()
if MONEY == 0:
return money_val
if __name__ == '__main__':
# First get the maximised MONEY, and then show all solutions for
# this value
print 'Minimize money...'
money = main(0)
print '\nCheck all solutions for money=%i' % money
main(money)
| apache-2.0 | 3,532,085,075,948,093,400 | 27.365854 | 74 | 0.656062 | false |
renyi533/tensorflow | tensorflow/lite/testing/op_tests/resolve_constant_strided_slice.py | 11 | 2039 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for resolve_constant_strided_slice."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
# TODO(chaomei): refactor the test to cover more cases, like negative stride,
# negative array index etc.
@register_make_test_function()
def make_resolve_constant_strided_slice_tests(options):
"""Make a set of tests to show strided_slice yields incorrect results."""
test_parameters = [{
"unused_iteration_counter": [1],
}]
def build_graph(parameters):
"""Build the strided_slice op testing graph."""
del parameters
input_values = tf.compat.v1.placeholder(dtype=tf.float32, shape=[4, 2])
data = tf.constant(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]],
tf.float32)
return [input_values], [input_values + data[:, :2]]
def build_inputs(parameters, sess, inputs, outputs):
del parameters
input_values = np.zeros([4, 2], dtype=np.float32)
return [input_values], sess.run(
outputs, feed_dict={inputs[0]: input_values})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| apache-2.0 | 1,413,380,974,479,435,800 | 38.980392 | 80 | 0.687592 | false |
RAtechntukan/CouchPotatoServer | libs/unrar2/windows.py | 54 | 10914 | # Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Low level interface - see UnRARDLL\UNRARDLL.TXT
from __future__ import generators
from couchpotato.environment import Env
from shutil import copyfile
import ctypes.wintypes
import os.path
import time
from rar_exceptions import *
ERAR_END_ARCHIVE = 10
ERAR_NO_MEMORY = 11
ERAR_BAD_DATA = 12
ERAR_BAD_ARCHIVE = 13
ERAR_UNKNOWN_FORMAT = 14
ERAR_EOPEN = 15
ERAR_ECREATE = 16
ERAR_ECLOSE = 17
ERAR_EREAD = 18
ERAR_EWRITE = 19
ERAR_SMALL_BUF = 20
ERAR_UNKNOWN = 21
RAR_OM_LIST = 0
RAR_OM_EXTRACT = 1
RAR_SKIP = 0
RAR_TEST = 1
RAR_EXTRACT = 2
RAR_VOL_ASK = 0
RAR_VOL_NOTIFY = 1
RAR_DLL_VERSION = 3
# enum UNRARCALLBACK_MESSAGES
UCM_CHANGEVOLUME = 0
UCM_PROCESSDATA = 1
UCM_NEEDPASSWORD = 2
architecture_bits = ctypes.sizeof(ctypes.c_voidp)*8
dll_name = "unrar.dll"
if architecture_bits == 64:
dll_name = "unrar64.dll"
# Copy dll first
dll_file = os.path.join(os.path.dirname(__file__), dll_name)
dll_copy = os.path.join(Env.get('cache_dir'), 'copied.dll')
if os.path.isfile(dll_copy):
os.remove(dll_copy)
copyfile(dll_file, dll_copy)
unrar = ctypes.WinDLL(dll_copy)
class RAROpenArchiveDataEx(ctypes.Structure):
def __init__(self, ArcName=None, ArcNameW=u'', OpenMode=RAR_OM_LIST):
self.CmtBuf = ctypes.c_buffer(64*1024)
ctypes.Structure.__init__(self, ArcName=ArcName, ArcNameW=ArcNameW, OpenMode=OpenMode, _CmtBuf=ctypes.addressof(self.CmtBuf), CmtBufSize=ctypes.sizeof(self.CmtBuf))
_fields_ = [
('ArcName', ctypes.c_char_p),
('ArcNameW', ctypes.c_wchar_p),
('OpenMode', ctypes.c_uint),
('OpenResult', ctypes.c_uint),
('_CmtBuf', ctypes.c_voidp),
('CmtBufSize', ctypes.c_uint),
('CmtSize', ctypes.c_uint),
('CmtState', ctypes.c_uint),
('Flags', ctypes.c_uint),
('Reserved', ctypes.c_uint*32),
]
class RARHeaderDataEx(ctypes.Structure):
def __init__(self):
self.CmtBuf = ctypes.c_buffer(64*1024)
ctypes.Structure.__init__(self, _CmtBuf=ctypes.addressof(self.CmtBuf), CmtBufSize=ctypes.sizeof(self.CmtBuf))
_fields_ = [
('ArcName', ctypes.c_char*1024),
('ArcNameW', ctypes.c_wchar*1024),
('FileName', ctypes.c_char*1024),
('FileNameW', ctypes.c_wchar*1024),
('Flags', ctypes.c_uint),
('PackSize', ctypes.c_uint),
('PackSizeHigh', ctypes.c_uint),
('UnpSize', ctypes.c_uint),
('UnpSizeHigh', ctypes.c_uint),
('HostOS', ctypes.c_uint),
('FileCRC', ctypes.c_uint),
('FileTime', ctypes.c_uint),
('UnpVer', ctypes.c_uint),
('Method', ctypes.c_uint),
('FileAttr', ctypes.c_uint),
('_CmtBuf', ctypes.c_voidp),
('CmtBufSize', ctypes.c_uint),
('CmtSize', ctypes.c_uint),
('CmtState', ctypes.c_uint),
('Reserved', ctypes.c_uint*1024),
]
def DosDateTimeToTimeTuple(dosDateTime):
"""Convert an MS-DOS format date time to a Python time tuple.
"""
dosDate = dosDateTime >> 16
dosTime = dosDateTime & 0xffff
day = dosDate & 0x1f
month = (dosDate >> 5) & 0xf
year = 1980 + (dosDate >> 9)
second = 2*(dosTime & 0x1f)
minute = (dosTime >> 5) & 0x3f
hour = dosTime >> 11
return time.localtime(time.mktime((year, month, day, hour, minute, second, 0, 1, -1)))
def _wrap(restype, function, argtypes):
result = function
result.argtypes = argtypes
result.restype = restype
return result
RARGetDllVersion = _wrap(ctypes.c_int, unrar.RARGetDllVersion, [])
RAROpenArchiveEx = _wrap(ctypes.wintypes.HANDLE, unrar.RAROpenArchiveEx, [ctypes.POINTER(RAROpenArchiveDataEx)])
RARReadHeaderEx = _wrap(ctypes.c_int, unrar.RARReadHeaderEx, [ctypes.wintypes.HANDLE, ctypes.POINTER(RARHeaderDataEx)])
_RARSetPassword = _wrap(ctypes.c_int, unrar.RARSetPassword, [ctypes.wintypes.HANDLE, ctypes.c_char_p])
def RARSetPassword(*args, **kwargs):
_RARSetPassword(*args, **kwargs)
RARProcessFile = _wrap(ctypes.c_int, unrar.RARProcessFile, [ctypes.wintypes.HANDLE, ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p])
RARCloseArchive = _wrap(ctypes.c_int, unrar.RARCloseArchive, [ctypes.wintypes.HANDLE])
UNRARCALLBACK = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_uint, ctypes.c_long, ctypes.c_long, ctypes.c_long)
RARSetCallback = _wrap(ctypes.c_int, unrar.RARSetCallback, [ctypes.wintypes.HANDLE, UNRARCALLBACK, ctypes.c_long])
RARExceptions = {
ERAR_NO_MEMORY : MemoryError,
ERAR_BAD_DATA : ArchiveHeaderBroken,
ERAR_BAD_ARCHIVE : InvalidRARArchive,
ERAR_EOPEN : FileOpenError,
}
class PassiveReader:
"""Used for reading files to memory"""
def __init__(self, usercallback = None):
self.buf = []
self.ucb = usercallback
def _callback(self, msg, UserData, P1, P2):
if msg == UCM_PROCESSDATA:
data = (ctypes.c_char*P2).from_address(P1).raw
if self.ucb!=None:
self.ucb(data)
else:
self.buf.append(data)
return 1
def get_result(self):
return ''.join(self.buf)
class RarInfoIterator(object):
def __init__(self, arc):
self.arc = arc
self.index = 0
self.headerData = RARHeaderDataEx()
self.res = RARReadHeaderEx(self.arc._handle, ctypes.byref(self.headerData))
if self.res==ERAR_BAD_DATA:
raise IncorrectRARPassword
self.arc.lockStatus = "locked"
self.arc.needskip = False
def __iter__(self):
return self
def next(self):
if self.index>0:
if self.arc.needskip:
RARProcessFile(self.arc._handle, RAR_SKIP, None, None)
self.res = RARReadHeaderEx(self.arc._handle, ctypes.byref(self.headerData))
if self.res:
raise StopIteration
self.arc.needskip = True
data = {}
data['index'] = self.index
data['filename'] = self.headerData.FileName
data['datetime'] = DosDateTimeToTimeTuple(self.headerData.FileTime)
data['isdir'] = ((self.headerData.Flags & 0xE0) == 0xE0)
data['size'] = self.headerData.UnpSize + (self.headerData.UnpSizeHigh << 32)
if self.headerData.CmtState == 1:
data['comment'] = self.headerData.CmtBuf.value
else:
data['comment'] = None
self.index += 1
return data
def __del__(self):
self.arc.lockStatus = "finished"
def generate_password_provider(password):
def password_provider_callback(msg, UserData, P1, P2):
if msg == UCM_NEEDPASSWORD and password!=None:
(ctypes.c_char*P2).from_address(P1).value = password
return 1
return password_provider_callback
class RarFileImplementation(object):
def init(self, password=None, custom_path = None):
self.password = password
archiveData = RAROpenArchiveDataEx(ArcNameW=self.archiveName, OpenMode=RAR_OM_EXTRACT)
self._handle = RAROpenArchiveEx(ctypes.byref(archiveData))
self.c_callback = UNRARCALLBACK(generate_password_provider(self.password))
RARSetCallback(self._handle, self.c_callback, 1)
if archiveData.OpenResult != 0:
raise RARExceptions[archiveData.OpenResult]
if archiveData.CmtState == 1:
self.comment = archiveData.CmtBuf.value
else:
self.comment = None
if password:
RARSetPassword(self._handle, password)
self.lockStatus = "ready"
def destruct(self):
if self._handle and RARCloseArchive:
RARCloseArchive(self._handle)
def make_sure_ready(self):
if self.lockStatus == "locked":
raise InvalidRARArchiveUsage("cannot execute infoiter() without finishing previous one")
if self.lockStatus == "finished":
self.destruct()
self.init(self.password)
def infoiter(self):
self.make_sure_ready()
return RarInfoIterator(self)
def read_files(self, checker):
res = []
for info in self.infoiter():
if checker(info) and not info.isdir:
reader = PassiveReader()
c_callback = UNRARCALLBACK(reader._callback)
RARSetCallback(self._handle, c_callback, 1)
tmpres = RARProcessFile(self._handle, RAR_TEST, None, None)
if tmpres==ERAR_BAD_DATA:
raise IncorrectRARPassword
self.needskip = False
res.append((info, reader.get_result()))
return res
def extract(self, checker, path, withSubpath, overwrite):
res = []
for info in self.infoiter():
checkres = checker(info)
if checkres!=False and not info.isdir:
if checkres==True:
fn = info.filename
if not withSubpath:
fn = os.path.split(fn)[-1]
target = os.path.join(path, fn)
else:
raise DeprecationWarning, "Condition callbacks returning strings are deprecated and only supported in Windows"
target = checkres
if overwrite or (not os.path.exists(target)):
tmpres = RARProcessFile(self._handle, RAR_EXTRACT, None, target)
if tmpres==ERAR_BAD_DATA:
raise IncorrectRARPassword
self.needskip = False
res.append(info)
return res
| gpl-3.0 | -3,787,577,499,002,732,000 | 33.757962 | 172 | 0.615723 | false |
nafitzgerald/allennlp | tests/data/dataset_readers/language_modeling_dataset_test.py | 1 | 1666 | # pylint: disable=no-self-use,invalid-name
from allennlp.data.dataset_readers import LanguageModelingReader
from allennlp.common.testing import AllenNlpTestCase
class TestLanguageModelingDatasetReader(AllenNlpTestCase):
def test_read_from_file(self):
reader = LanguageModelingReader(tokens_per_instance=3)
dataset = reader.read('tests/fixtures/data/language_modeling.txt')
instances = dataset.instances
# The last potential instance is left out, which is ok, because we don't have an end token
# in here, anyway.
assert len(instances) == 5
assert [t.text for t in instances[0].fields["input_tokens"].tokens] == ["This", "is", "a"]
assert [t.text for t in instances[0].fields["output_tokens"].tokens] == ["is", "a", "sentence"]
assert [t.text for t in instances[1].fields["input_tokens"].tokens] == ["sentence", "for", "language"]
assert [t.text for t in instances[1].fields["output_tokens"].tokens] == ["for", "language", "modelling"]
assert [t.text for t in instances[2].fields["input_tokens"].tokens] == ["modelling", ".", "Here"]
assert [t.text for t in instances[2].fields["output_tokens"].tokens] == [".", "Here", "'s"]
assert [t.text for t in instances[3].fields["input_tokens"].tokens] == ["'s", "another", "one"]
assert [t.text for t in instances[3].fields["output_tokens"].tokens] == ["another", "one", "for"]
assert [t.text for t in instances[4].fields["input_tokens"].tokens] == ["for", "extra", "language"]
assert [t.text for t in instances[4].fields["output_tokens"].tokens] == ["extra", "language", "modelling"]
| apache-2.0 | 8,587,836,367,144,935,000 | 56.448276 | 114 | 0.64886 | false |
bhargav2408/kivy | kivy/storage/dictstore.py | 43 | 2284 | '''
Dictionary store
=================
Use a Python dictionary as a store.
'''
__all__ = ('DictStore', )
try:
import cPickle as pickle
except ImportError:
import pickle
from os.path import exists
from kivy.compat import iteritems
from kivy.storage import AbstractStore
class DictStore(AbstractStore):
'''Store implementation using a pickled `dict`.
See the :mod:`kivy.storage` module documentation for more information.
'''
def __init__(self, filename, data=None, **kwargs):
if isinstance(filename, dict):
# backward compatibility, first argument was a dict.
self.filename = None
self._data = filename
else:
self.filename = filename
self._data = data or {}
self._is_changed = True
super(DictStore, self).__init__(**kwargs)
def store_load(self):
if self.filename is None:
return
if not exists(self.filename):
return
with open(self.filename, 'rb') as fd:
data = fd.read()
if data:
self._data = pickle.loads(data)
def store_sync(self):
if self.filename is None:
return
if not self._is_changed:
return
with open(self.filename, 'wb') as fd:
pickle.dump(self._data, fd)
self._is_changed = False
def store_exists(self, key):
return key in self._data
def store_get(self, key):
return self._data[key]
def store_put(self, key, value):
self._data[key] = value
self._is_changed = True
return True
def store_delete(self, key):
del self._data[key]
self._is_changed = True
return True
def store_find(self, filters):
for key, values in iteritems(self._data):
found = True
for fkey, fvalue in iteritems(filters):
if fkey not in values:
found = False
break
if values[fkey] != fvalue:
found = False
break
if found:
yield key, values
def store_count(self):
return len(self._data)
def store_keys(self):
return self._data.keys()
| mit | 4,555,821,813,723,845,600 | 24.662921 | 74 | 0.544658 | false |
mancoast/CPythonPyc_test | cpython/253_test_asynchat.py | 19 | 2391 | # test asynchat -- requires threading
import thread # If this fails, we can't test this module
import asyncore, asynchat, socket, threading, time
import unittest
from test import test_support
HOST = "127.0.0.1"
PORT = 54322
class echo_server(threading.Thread):
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
global PORT
PORT = test_support.bind_port(sock, HOST, PORT)
sock.listen(1)
conn, client = sock.accept()
buffer = ""
while "\n" not in buffer:
data = conn.recv(1)
if not data:
break
buffer = buffer + data
while buffer:
n = conn.send(buffer)
buffer = buffer[n:]
conn.close()
sock.close()
class echo_client(asynchat.async_chat):
def __init__(self, terminator):
asynchat.async_chat.__init__(self)
self.contents = None
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((HOST, PORT))
self.set_terminator(terminator)
self.buffer = ""
def handle_connect(self):
pass
##print "Connected"
def collect_incoming_data(self, data):
self.buffer = self.buffer + data
def found_terminator(self):
#print "Received:", repr(self.buffer)
self.contents = self.buffer
self.buffer = ""
self.close()
class TestAsynchat(unittest.TestCase):
def setUp (self):
pass
def tearDown (self):
pass
def test_line_terminator(self):
s = echo_server()
s.start()
time.sleep(1) # Give server time to initialize
c = echo_client('\n')
c.push("hello ")
c.push("world\n")
asyncore.loop()
s.join()
self.assertEqual(c.contents, 'hello world')
def test_numeric_terminator(self):
# Try reading a fixed number of bytes
s = echo_server()
s.start()
time.sleep(1) # Give server time to initialize
c = echo_client(6L)
c.push("hello ")
c.push("world\n")
asyncore.loop()
s.join()
self.assertEqual(c.contents, 'hello ')
def test_main(verbose=None):
test_support.run_unittest(TestAsynchat)
if __name__ == "__main__":
test_main(verbose=True)
| gpl-3.0 | 5,670,115,013,137,137,000 | 24.709677 | 66 | 0.578001 | false |
Slach/Diamond | src/collectors/squid/test/testsquid.py | 31 | 6954 | #!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from squid import SquidCollector
##########################################################################
class TestSquidCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('SquidCollector', {
'interval': 1,
})
self.collector = SquidCollector(config, None)
def test_import(self):
self.assertTrue(SquidCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_fake_data(self, publish_mock):
_getData_mock = patch.object(
SquidCollector,
'_getData',
Mock(
return_value=self.getFixture('fake_counters_1').getvalue()))
_getData_mock.start()
self.collector.collect()
_getData_mock.stop()
self.assertPublishedMany(publish_mock, {})
_getData_mock = patch.object(
SquidCollector,
'_getData',
Mock(
return_value=self.getFixture('fake_counters_2').getvalue()))
_getData_mock.start()
self.collector.collect()
_getData_mock.stop()
metrics = {
'3128.client_http.requests': 1,
'3128.client_http.hits': 2,
'3128.client_http.errors': 3,
'3128.client_http.kbytes_in': 4,
'3128.client_http.kbytes_out': 5,
'3128.client_http.hit_kbytes_out': 6,
'3128.server.all.requests': 7,
'3128.server.all.errors': 8,
'3128.server.all.kbytes_in': 9,
'3128.server.all.kbytes_out': 10,
'3128.server.http.requests': 1,
'3128.server.http.errors': 12,
'3128.server.http.kbytes_in': 13,
'3128.server.http.kbytes_out': 14,
'3128.server.ftp.requests': 15,
'3128.server.ftp.errors': 16,
'3128.server.ftp.kbytes_in': 17,
'3128.server.ftp.kbytes_out': 18,
'3128.server.other.requests': 19,
'3128.server.other.errors': 20,
'3128.server.other.kbytes_in': 21,
'3128.server.other.kbytes_out': 22,
'3128.icp.pkts_sent': 23,
'3128.icp.pkts_recv': 24,
'3128.icp.queries_sent': 25,
'3128.icp.replies_sent': 26,
'3128.icp.queries_recv': 27,
'3128.icp.replies_recv': 28,
'3128.icp.query_timeouts': 29,
'3128.icp.replies_queued': 30,
'3128.icp.kbytes_sent': 31,
'3128.icp.kbytes_recv': 32,
'3128.icp.q_kbytes_sent': 33,
'3128.icp.r_kbytes_sent': 34,
'3128.icp.q_kbytes_recv': 35,
'3128.icp.r_kbytes_recv': 36,
'3128.icp.times_used': 37,
'3128.cd.times_used': 38,
'3128.cd.msgs_sent': 39,
'3128.cd.msgs_recv': 40,
'3128.cd.memory': 41,
'3128.cd.local_memory': 42,
'3128.cd.kbytes_sent': 43,
'3128.cd.kbytes_recv': 44,
'3128.unlink.requests': 45,
'3128.page_faults': 46,
'3128.select_loops': 47,
'3128.cpu_time': 48.1234567890,
'3128.wall_time': 49.1234567890,
'3128.swap.outs': 50,
'3128.swap.ins': 51,
'3128.swap.files_cleaned': 52,
'3128.aborted_requests': 53
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
_getData_mock = patch.object(
SquidCollector,
'_getData',
Mock(
return_value=self.getFixture('counters_1').getvalue()))
_getData_mock.start()
self.collector.collect()
_getData_mock.stop()
self.assertPublishedMany(publish_mock, {})
_getData_mock = patch.object(
SquidCollector,
'_getData',
Mock(
return_value=self.getFixture('counters_2').getvalue()))
_getData_mock.start()
self.collector.collect()
_getData_mock.stop()
metrics = {
'3128.client_http.requests': 2,
'3128.client_http.hits': 1,
'3128.client_http.errors': 0,
'3128.client_http.kbytes_in': 1,
'3128.client_http.kbytes_out': 12.0,
'3128.client_http.hit_kbytes_out': 10,
'3128.server.all.requests': 0,
'3128.server.all.errors': 0,
'3128.server.all.kbytes_in': 0,
'3128.server.all.kbytes_out': 0,
'3128.server.http.requests': 0,
'3128.server.http.errors': 0,
'3128.server.http.kbytes_in': 0,
'3128.server.http.kbytes_out': 0,
'3128.server.ftp.requests': 0,
'3128.server.ftp.errors': 0,
'3128.server.ftp.kbytes_in': 0,
'3128.server.ftp.kbytes_out': 0,
'3128.server.other.requests': 0,
'3128.server.other.errors': 0,
'3128.server.other.kbytes_in': 0,
'3128.server.other.kbytes_out': 0,
'3128.icp.pkts_sent': 0,
'3128.icp.pkts_recv': 0,
'3128.icp.queries_sent': 0,
'3128.icp.replies_sent': 0,
'3128.icp.queries_recv': 0,
'3128.icp.replies_recv': 0,
'3128.icp.query_timeouts': 0,
'3128.icp.replies_queued': 0,
'3128.icp.kbytes_sent': 0,
'3128.icp.kbytes_recv': 0,
'3128.icp.q_kbytes_sent': 0,
'3128.icp.r_kbytes_sent': 0,
'3128.icp.q_kbytes_recv': 0,
'3128.icp.r_kbytes_recv': 0,
'3128.icp.times_used': 0,
'3128.cd.times_used': 0,
'3128.cd.msgs_sent': 0,
'3128.cd.msgs_recv': 0,
'3128.cd.memory': 0,
'3128.cd.local_memory': 0,
'3128.cd.kbytes_sent': 0,
'3128.cd.kbytes_recv': 0,
'3128.unlink.requests': 0,
'3128.page_faults': 0,
'3128.select_loops': 10827.0,
'3128.cpu_time': 0,
'3128.wall_time': 10,
'3128.swap.outs': 0,
'3128.swap.ins': 2,
'3128.swap.files_cleaned': 0,
'3128.aborted_requests': 0
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
| mit | -595,809,820,373,499,900 | 34.845361 | 76 | 0.504602 | false |
sxjscience/mxnet | benchmark/opperf/utils/common_utils.py | 11 | 5369 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import json
from operator import itemgetter
import logging
logging.basicConfig(level=logging.INFO)
def merge_map_list(map_list):
"""Merge all the Map in map_list into one final Map.
Useful when you have a list of benchmark result maps and you want to
prepare one final map combining all results.
Parameters
----------
map_list: List[maps]
List of maps to be merged.
Returns
-------
map where all individual maps in the into map_list are merged
"""
# Preserve order of underlying maps and keys when converting to a single map
final_map = dict()
for current_map in map_list:
for key in current_map:
final_map[key] = current_map[key]
return final_map
def save_to_file(inp_dict, out_filepath, out_format='json', runtime_features=None, profiler='native'):
"""Saves the given input dictionary to the given output file.
By default, saves the input dictionary as JSON file. Other supported formats include:
1. md
Parameters
----------
inp_dict: map
Input dictionary to be saved
out_filepath: str
Output file path
out_format: str, default 'json'
Format of the output file. Supported options - 'json', 'md'. Default - json.
runtime_features: map
Dictionary of runtime_features.
"""
if out_format == 'json':
# Save as JSON
with open(out_filepath, "w") as result_file:
json.dump(inp_dict, result_file, indent=4, sort_keys=False)
elif out_format == 'md':
# Save as md
with open(out_filepath, "w") as result_file:
result_file.write(_prepare_markdown(inp_dict, runtime_features, profiler))
else:
raise ValueError("Invalid output file format provided - '{}'. Supported - json, md".format(format))
def get_json(inp_dict):
"""Converts a given dictionary to prettified JSON string.
Parameters
----------
inp_dict: map
Input dictionary to be converted to JSON.
Returns
-------
Prettified JSON string
"""
return json.dumps(inp_dict, indent=4)
def _prepare_op_benchmark_result(op, op_bench_result, profiler):
operator_name = op
avg_forward_time = "---"
avg_backward_time = "---"
max_mem_usage = "---"
inputs = "---"
avg_time = "---"
p50_time = "---"
p90_time = "---"
p99_time = "---"
for key, value in op_bench_result.items():
if "avg_time_forward" in key:
avg_forward_time = value
elif "avg_time_backward" in key:
avg_backward_time = value
elif "max_storage_mem_alloc_" in key:
max_mem_usage = value
elif "inputs" in key:
inputs = value
elif "avg_time" in key:
avg_time = value
elif "p50_time" in key:
p50_time = value
elif "p90_time" in key:
p90_time = value
elif "p99_time" in key:
p99_time = value
result = ""
if profiler == "native":
result = "| {} | {} | {} | {} | {} |".format(operator_name,
inputs, max_mem_usage, avg_forward_time, avg_backward_time)
elif profiler == "python":
result = "| {} | {} | {} | {} | {} | {} |".format(operator_name, avg_time, p50_time, p90_time, p99_time, inputs)
return result
def _prepare_markdown(results, runtime_features=None, profiler='native'):
results_markdown = []
if runtime_features and 'runtime_features' in runtime_features:
results_markdown.append("# Runtime Features")
idx = 0
for key, value in runtime_features['runtime_features'].items():
results_markdown.append('{}. {} : {}'.format(idx, key, value))
results_markdown.append("# Benchmark Results")
if profiler == 'native':
results_markdown.append(
"| Operator | Inputs | Max Mem Usage (Storage) (Bytes) | Avg Forward Time (ms)"
" | Avg. Backward Time (ms) |")
results_markdown.append("| :---: | :---: | :---: | :---: | :---: |")
elif profiler == 'python':
results_markdown.append(
"| Operator | Avg Time (ms) | P50 Time (ms) | P90 Time (ms) | P99 Time (ms) | Inputs |")
results_markdown.append("| :---: | :---: | :---: | :---: | :---: | :---: |")
for op, op_bench_results in sorted(results.items(), key=itemgetter(0)):
for op_bench_result in op_bench_results:
results_markdown.append(_prepare_op_benchmark_result(op, op_bench_result, profiler))
return os.linesep.join(results_markdown)
| apache-2.0 | -775,430,578,229,791,000 | 32.767296 | 120 | 0.61315 | false |
openstack/trove | trove/guestagent/models.py | 1 | 3048 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from datetime import timedelta
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common import timeutils
from trove.common import utils
from trove.db import get_db_api
from trove.db import models as dbmodels
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def persisted_models():
return {'agent_heartbeats': AgentHeartBeat}
class AgentHeartBeat(dbmodels.DatabaseModelBase):
"""Defines the state of a Guest Agent."""
_data_fields = ['instance_id', 'updated_at', 'guest_agent_version',
'deleted', 'deleted_at']
_table_name = 'agent_heartbeats'
def __init__(self, **kwargs):
super(AgentHeartBeat, self).__init__(**kwargs)
@classmethod
def create(cls, **values):
values['id'] = utils.generate_uuid()
heartbeat = cls(**values).save()
if not heartbeat.is_valid():
raise exception.InvalidModelError(errors=heartbeat.errors)
return heartbeat
def save(self):
if not self.is_valid():
raise exception.InvalidModelError(errors=self.errors)
self['updated_at'] = timeutils.utcnow()
LOG.debug("Saving %(name)s: %(dict)s",
{'name': self.__class__.__name__, 'dict': self.__dict__})
return get_db_api().save(self)
@classmethod
def find_all_by_version(cls, guest_agent_version, deleted=0):
if guest_agent_version is None:
raise exception.ModelNotFoundError()
heartbeats = cls.find_all(guest_agent_version=guest_agent_version,
deleted=deleted)
if heartbeats is None or heartbeats.count() == 0:
raise exception.ModelNotFoundError(
guest_agent_version=guest_agent_version)
return heartbeats
@classmethod
def find_by_instance_id(cls, instance_id):
if instance_id is None:
raise exception.ModelNotFoundError(instance_id=instance_id)
try:
return cls.find_by(instance_id=instance_id)
except exception.NotFound:
LOG.exception("Error finding instance %s", instance_id)
raise exception.ModelNotFoundError(instance_id=instance_id)
@staticmethod
def is_active(agent):
return (datetime.now() - agent.updated_at <
timedelta(seconds=CONF.agent_heartbeat_time))
| apache-2.0 | 8,454,399,229,808,631,000 | 32.494505 | 78 | 0.656496 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-rdbms/azure/mgmt/rdbms/postgresql/models/tracked_resource.py | 2 | 1808 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource import ProxyResource
class TrackedResource(ProxyResource):
"""Resource properties including location and tags for track resources.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. The location the resource resides in.
:type location: str
:param tags: Application-specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(TrackedResource, self).__init__(**kwargs)
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
| mit | -8,186,692,194,587,403,000 | 33.113208 | 78 | 0.57135 | false |
TheWardoctor/Wardoctors-repo | plugin.video.salts/scrapers/movieblast_scraper.py | 7 | 4156 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib
import re
import scraper
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib import scraper_utils
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import QUALITIES
BASE_URL = 'http://www.movieblast.co'
Q_MAP = {'HD': QUALITIES.HD720, 'DVD': QUALITIES.HIGH, 'CAM': QUALITIES.LOW}
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'MovieBlast'
def resolve_link(self, link):
if self.base_url in link:
html = self._http_get(link, allow_redirect=False, cache_limit=.25)
if html.startswith('http'):
return html
else:
return link
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=8)
for _attrs, row in dom_parser2.parse_dom(html, 'tr', {'class': 'streaming-link'}):
match = re.search("nowPlaying\(\s*\d+\s*,\s*'([^']+)'\s*,\s*'([^']+)", row)
if not match: continue
host, stream_url = match.groups()
if re.search('server\s*\d+', host, re.I):
for source, value in self.__get_direct_links(stream_url).iteritems():
host = scraper_utils.get_direct_hostname(self, source)
source = {'multi-part': False, 'url': source, 'host': host, 'class': self, 'quality': value['quality'], 'views': None, 'rating': None, 'direct': True}
hosters.append(source)
else:
source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': QUALITIES.HIGH, 'views': None, 'rating': None, 'direct': False}
hosters.append(source)
return hosters
def __get_direct_links(self, stream_url):
return scraper_utils.parse_sources_list(self, self._http_get(stream_url, cache_limit=1))
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = scraper_utils.urljoin(self.base_url, '/search/%s/' % (urllib.quote(title)))
html = self._http_get(search_url, cache_limit=8)
for _attrs, item in dom_parser2.parse_dom(html, 'div', {'class': 'item'}):
match_title = dom_parser2.parse_dom(item, 'span')
match_url = dom_parser2.parse_dom(item, 'a', req='href')
if match_url and match_title:
match_url = match_url[0].attrs['href']
match_title = match_title[0].content
match_year = re.search('(\d{4})$', match_url)
match_year = match_year.group(1) if match_year else ''
if not year or not match_year or year == match_year:
result = {'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(match_url)}
results.append(result)
return results
| apache-2.0 | -8,555,988,969,447,919,000 | 42.291667 | 170 | 0.612849 | false |
nagyistoce/nips14-ssl | learn_yz_x_ss.py | 4 | 12614 | import sys
import os, numpy as np
import scipy.stats
import anglepy.paramgraphics as paramgraphics
import anglepy.ndict as ndict
from anglepy.sfo import SFO
from adam import AdaM
import theano
import theano.tensor as T
import preprocessing as pp
import time
def main(n_passes, n_labeled, n_z, n_hidden, dataset, seed, alpha, n_minibatches, comment):
'''
Learn a variational auto-encoder with generative model p(x,y,z)=p(y)p(z)p(x|y,z)
And where 'x' is always observed and 'y' is _sometimes_ observed (hence semi-supervised).
We're going to use q(y|x) as a classification model.
'''
import time
logdir = 'results/learn_yz_x_ss_'+dataset+'_'+str(n_z)+'-'+str(n_hidden)+'_nlabeled'+str(n_labeled)+'_alpha'+str(alpha)+'_seed'+str(seed)+'_'+comment+'-'+str(int(time.time()))+'/'
if not os.path.exists(logdir): os.makedirs(logdir)
print 'logdir:', logdir
print sys.argv[0], n_labeled, n_z, n_hidden, dataset, seed, comment
np.random.seed(seed)
# Init data
if dataset == 'mnist_2layer':
size = 28
dim_input = (size,size)
# Load model for feature extraction
path = 'models/mnist_z_x_50-500-500_longrun/' #'models/mnist_z_x_50-600-600/'
l1_v = ndict.loadz(path+'v.ndict.tar.gz')
l1_w = ndict.loadz(path+'w.ndict.tar.gz')
n_h = (500,500)
from anglepy.models.VAE_Z_X import VAE_Z_X
l1_model = VAE_Z_X(n_x=28*28, n_hidden_q=n_h, n_z=50, n_hidden_p=n_h, nonlinear_q='softplus', nonlinear_p='softplus', type_px='bernoulli', type_qz='gaussianmarg', type_pz='gaussianmarg', prior_sd=1)
# Load dataset
import anglepy.data.mnist as mnist
# load train and test sets
train_x, train_y, valid_x, valid_y, test_x, test_y = mnist.load_numpy_split(size, binarize_y=True)
# create labeled/unlabeled split in training set
x_l, y_l, x_u, y_u = mnist.create_semisupervised(train_x, train_y, n_labeled)
# Extract features
# 1. Determine which dimensions to keep
def transform(v, _x):
return l1_model.dist_qz['z'](*([_x] + v.values() + [np.ones((1, _x.shape[1]))]))
q_mean, _ = transform(l1_v, x_u[0:1000])
idx_keep = np.std(q_mean, axis=1) > 0.1
# 2. Select dimensions
for key in ['mean_b','mean_w','logvar_b','logvar_w']:
l1_v[key] = l1_v[key][idx_keep,:]
l1_w['w0'] = l1_w['w0'][:,idx_keep]
# 3. Extract features
x_mean_u, x_logvar_u = transform(l1_v, x_u)
x_mean_l, x_logvar_l = transform(l1_v, x_l)
x_unlabeled = {'mean':x_mean_u, 'logvar':x_logvar_u, 'y':y_u}
x_labeled = {'mean':x_mean_l, 'logvar':x_logvar_l, 'y':y_l}
valid_x, _ = transform(l1_v, valid_x)
test_x, _ = transform(l1_v, test_x)
n_x = np.sum(idx_keep)
n_y = 10
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
colorImg = False
if dataset == 'svhn_2layer':
size = 32
dim_input = (size,size)
# Load model for feature extraction
path = 'models/tmp/svhn_z_x_300-500-500/'
l1_v = ndict.loadz(path+'v.ndict.tar.gz')
l1_w = ndict.loadz(path+'w.ndict.tar.gz')
f_enc, f_dec = pp.PCA_fromfile(path+'pca_params.ndict.tar.gz', True)
from anglepy.models.VAE_Z_X import VAE_Z_X
n_x = l1_v['w0'].shape[1] #=600
l1_model = VAE_Z_X(n_x=n_x, n_hidden_q=(600,600), n_z=300, n_hidden_p=(600,600), nonlinear_q='softplus', nonlinear_p='softplus', type_px='gaussian', type_qz='gaussianmarg', type_pz='gaussianmarg', prior_sd=1)
# SVHN dataset
import anglepy.data.svhn as svhn
size = 32
train_x, train_y, valid_x, valid_y, test_x, test_y = svhn.load_numpy_split(False, binarize_y=True, extra=False) #norb.load_resized(size, binarize_y=True)
#train_x = np.hstack((_train_x, extra_x))
#train_y = np.hstack((_train_y, extra_y))[:,:604000]
# create labeled/unlabeled split in training set
import anglepy.data.mnist as mnist
x_l, y_l, x_u, y_u = mnist.create_semisupervised(train_x, train_y, n_labeled)
# Extract features
# 1. Determine which dimensions to keep
def transform(v, _x):
return l1_model.dist_qz['z'](*([f_enc(_x)] + v.values() + [np.ones((1, _x.shape[1]))]))
# 2. We're keeping all latent dimensions
# 3. Extract features
x_mean_u, x_logvar_u = transform(l1_v, x_u)
x_mean_l, x_logvar_l = transform(l1_v, x_l)
x_unlabeled = {'mean':x_mean_u, 'logvar':x_logvar_u, 'y':y_u}
x_labeled = {'mean':x_mean_l, 'logvar':x_logvar_l, 'y':y_l}
valid_x, _ = transform(l1_v, valid_x)
test_x, _ = transform(l1_v, test_x)
n_x = l1_w['w0'].shape[1]
n_y = 10
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
# Init VAE model p(x,y,z)
from anglepy.models.VAE_YZ_X import VAE_YZ_X
uniform_y = True
model = VAE_YZ_X(n_x, n_y, n_hidden, n_z, n_hidden, nonlinear, nonlinear, type_px, type_qz="gaussianmarg", type_pz=type_pz, prior_sd=1, uniform_y=uniform_y)
v, w = model.init_w(1e-3)
# Init q(y|x) model
from anglepy.models.MLP_Categorical import MLP_Categorical
n_units = [n_x]+list(n_hidden)+[n_y]
model_qy = MLP_Categorical(n_units=n_units, prior_sd=1, nonlinearity=nonlinear)
u = model_qy.init_w(1e-3)
# Just test
if False:
u = ndict.loadz('u.ndict.tar.gz')
v = ndict.loadz('v.ndict.tar.gz')
w = ndict.loadz('w.ndict.tar.gz')
pass
# Progress hook
t0 = time.time()
def hook(t, u, v, w, ll):
# Get classification error of validation and test sets
def error(dataset_x, dataset_y):
_, _, _z = model_qy.gen_xz(u, {'x':dataset_x}, {})
return np.sum( np.argmax(_z['py'], axis=0) != np.argmax(dataset_y, axis=0)) / (0.0 + dataset_y.shape[1])
valid_error = error(valid_x, valid_y)
test_error = error(test_x, test_y)
# Log
ndict.savez(u, logdir+'u')
ndict.savez(v, logdir+'v')
ndict.savez(w, logdir+'w')
dt = time.time() - t0
print dt, t, ll, valid_error, test_error
with open(logdir+'hook.txt', 'a') as f:
print >>f, dt, t, ll, valid_error, test_error
return valid_error
# Optimize
result = optim_vae_ss_adam(alpha, model_qy, model, x_labeled, x_unlabeled, n_y, u, v, w, n_minibatches=n_minibatches, n_passes=n_passes, hook=hook)
return result
def optim_vae_ss_adam(alpha, model_qy, model, x_labeled, x_unlabeled, n_y, u_init, v_init, w_init, n_minibatches, n_passes, hook, n_reset=20, resample_keepmem=False, display=0):
# Shuffle datasets
ndict.shuffleCols(x_labeled)
ndict.shuffleCols(x_unlabeled)
# create minibatches
minibatches = []
n_labeled = x_labeled.itervalues().next().shape[1]
n_batch_l = n_labeled / n_minibatches
if (n_labeled%n_batch_l) != 0: raise Exception()
n_unlabeled = x_unlabeled.itervalues().next().shape[1]
n_batch_u = n_unlabeled / n_minibatches
if (n_unlabeled%n_batch_u) != 0: raise Exception()
n_tot = n_labeled + n_unlabeled
# Divide into minibatches
def make_minibatch(i):
_x_labeled = ndict.getCols(x_labeled, i * n_batch_l, (i+1) * n_batch_l)
_x_unlabeled = ndict.getCols(x_unlabeled, i * n_batch_u, (i+1) * n_batch_u)
return [i, _x_labeled, _x_unlabeled]
for i in range(n_minibatches):
minibatches.append(make_minibatch(i))
# For integrating-out approach
L_inner = T.dmatrix()
L_unlabeled = T.dot(np.ones((1, n_y)), model_qy.p * (L_inner - T.log(model_qy.p)))
grad_L_unlabeled = T.grad(L_unlabeled.sum(), model_qy.var_w.values())
f_du = theano.function([model_qy.var_x['x']] + model_qy.var_w.values() + [model_qy.var_A, L_inner], [L_unlabeled] + grad_L_unlabeled)
# Some statistics
L = [0.]
n_L = [0]
def f_df(w, minibatch):
u = w['u']
v = w['v']
w = w['w']
i_minibatch = minibatch[0]
_x_l = minibatch[1] #labeled
x_minibatch_l = {'x': np.random.normal(_x_l['mean'], np.exp(0.5*_x_l['logvar'])), 'y': _x_l['y']}
eps_minibatch_l = model.gen_eps(n_batch_l)
_x_u = minibatch[2] #unlabeled
x_minibatch_u = {'x': np.random.normal(_x_u['mean'], np.exp(0.5*_x_u['logvar'])), 'y': _x_u['y']}
eps_minibatch_u = [model.gen_eps(n_batch_u) for i in range(n_y)]
# === Get gradient for labeled data
# gradient of -KL(q(z|y,x) ~p(x,y) || p(x,y,z))
logpx, logpz, logqz, gv_labeled, gw_labeled = model.dL_dw(v, w, x_minibatch_l, eps_minibatch_l)
# gradient of classification error E_{~p(x,y)}[q(y|x)]
logqy, _, gu_labeled, _ = model_qy.dlogpxz_dwz(u, x_minibatch_l, {})
# Reweight gu_labeled and logqy
#beta = alpha / (1.-alpha) * (1. * n_unlabeled / n_labeled) #old
beta = alpha * (1. * n_tot / n_labeled)
for i in u: gu_labeled[i] *= beta
logqy *= beta
L_labeled = logpx + logpz - logqz + logqy
# === Get gradient for unlabeled data
# -KL(q(z|x,y)q(y|x) ~p(x) || p(x,y,z))
# Approach where outer expectation (over q(z|x,y)) is taken as explicit sum (instead of sampling)
u = ndict.ordered(u)
py = model_qy.dist_px['y'](*([x_minibatch_u['x']] + u.values() + [np.ones((1, n_batch_u))]))
if True:
# Original
_L = np.zeros((n_y, n_batch_u))
gv_unlabeled = {i: 0 for i in v}
gw_unlabeled = {i: 0 for i in w}
for label in range(n_y):
new_y = np.zeros((n_y, n_batch_u))
new_y[label,:] = 1
eps = eps_minibatch_u[label]
#logpx, logpz, logqz, _gv, _gw = model.dL_dw(v, w, {'x':x_minibatch['x'],'y':new_y}, eps)
L_unweighted, L_weighted, _gv, _gw = model.dL_weighted_dw(v, w, {'x':x_minibatch_u['x'],'y':new_y}, eps, py[label:label+1,:])
_L[label:label+1,:] = L_unweighted
for i in v: gv_unlabeled[i] += _gv[i]
for i in w: gw_unlabeled[i] += _gw[i]
else:
# New, should be more efficient. (But is not in practice)
_y = np.zeros((n_y, n_batch_u*n_y))
for label in range(n_y):
_y[label,label*n_batch_u:(label+1)*n_batch_u] = 1
_x = np.tile(x_minibatch_u['x'].astype(np.float32), (1, n_y))
eps = model.gen_eps(n_batch_u*n_y)
L_unweighted, L_weighted, gv_unlabeled, gw_unlabeled = model.dL_weighted_dw(v, w, {'x':_x,'y':_y}, eps, py.reshape((1, -1)))
_L = L_unweighted.reshape((n_y, n_batch_u))
r = f_du(*([x_minibatch_u['x']] + u.values() + [np.zeros((1, n_batch_u)), _L]))
L_unlabeled = r[0]
gu_unlabeled = dict(zip(u.keys(), r[1:]))
# Get gradient of prior
logpu, gu_prior = model_qy.dlogpw_dw(u)
logpv, logpw, gv_prior, gw_prior = model.dlogpw_dw(v, w)
# Combine gradients and objective
gu = {i: ((gu_labeled[i] + gu_unlabeled[i]) * n_minibatches + gu_prior[i])/(-n_tot) for i in u}
gv = {i: ((gv_labeled[i] + gv_unlabeled[i]) * n_minibatches + gv_prior[i])/(-n_tot) for i in v}
gw = {i: ((gw_labeled[i] + gw_unlabeled[i]) * n_minibatches + gw_prior[i])/(-n_tot) for i in w}
f = ((L_labeled.sum() + L_unlabeled.sum()) * n_minibatches + logpu + logpv + logpw)/(-n_tot)
L[0] += ((L_labeled.sum() + L_unlabeled.sum()) * n_minibatches + logpu + logpv + logpw)/(-n_tot)
n_L[0] += 1
#ndict.pNorm(gu_unlabeled)
return f, {'u': gu, 'v':gv, 'w':gw}
w_init = {'u': u_init, 'v':v_init, 'w':w_init}
optimizer = AdaM(f_df, w_init, minibatches, alpha=3e-4, beta1=0.9, beta2=0.999)
for i in range(n_passes):
w = optimizer.optimize(num_passes=1)
LB = L[0]/(1.*n_L[0])
testset_error = hook(i, w['u'], w['v'], w['w'], LB)
L[0] = 0
n_L[0] = 0
return testset_error
| mit | -2,804,854,358,435,884,500 | 38.917722 | 216 | 0.543365 | false |
letouriste001/SmartForest_2.0 | python3.4Smartforest/lib/python3.4/site-packages/django/utils/functional.py | 1 | 14569 | import copy
import operator
from functools import total_ordering, wraps
from django.utils import six
# You can't trivially replace this with `functools.partial` because this binds
# to classes and returns bound instances, whereas functools.partial (on
# CPython) is a type and its instances don't bind.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args + moreargs), **dict(kwargs, **morekwargs))
return _curried
class cached_property(object):
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, func, name=None):
self.func = func
self.__doc__ = getattr(func, '__doc__')
self.name = name or func.__name__
def __get__(self, instance, type=None):
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__prepared = False
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if not self.__prepared:
self.__prepare_class__()
self.__prepared = True
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses
)
@classmethod
def __prepare_class__(cls):
for resultclass in resultclasses:
for type_ in resultclass.mro():
for method_name in type_.__dict__.keys():
# All __promise__ return the same wrapper method, they
# look up the correct implementation when called.
if hasattr(cls, method_name):
continue
meth = cls.__promise__(method_name)
setattr(cls, method_name, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = six.text_type in resultclasses
assert not (cls._delegate_bytes and cls._delegate_text), (
"Cannot call lazy() with both bytes and text return types.")
if cls._delegate_text:
if six.PY3:
cls.__str__ = cls.__text_cast
else:
cls.__unicode__ = cls.__text_cast
cls.__str__ = cls.__bytes_cast_encoded
elif cls._delegate_bytes:
if six.PY3:
cls.__bytes__ = cls.__bytes_cast
else:
cls.__str__ = cls.__bytes_cast
@classmethod
def __promise__(cls, method_name):
# Builds a wrapper around some magic method
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
return getattr(res, method_name)(*args, **kw)
return __wrapper__
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __bytes_cast_encoded(self):
return func(*self.__args, **self.__kw).encode('utf-8')
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __str__(self):
# object defines __str__(), so __prepare_class__() won't overload
# a __str__() method from the proxied class.
return str(self.__cast())
def __ne__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() != other
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
def __hash__(self):
return hash(self.__cast())
def __mod__(self, rhs):
if self._delegate_bytes and six.PY2:
return bytes(self) % rhs
elif self._delegate_text:
return six.text_type(self) % rhs
return self.__cast() % rhs
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
lazy_func = lazy(func, *resultclasses)
@wraps(func)
def wrapper(*args, **kwargs):
for arg in list(args) + list(kwargs.values()):
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy_func(*args, **kwargs)
return wrapper
empty = object()
def new_method_proxy(func):
def inner(self, *args):
if self._wrapped is empty:
self._setup()
return func(self._wrapped, *args)
return inner
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
# Avoid infinite recursion when tracing __init__ (#19456).
_wrapped = None
def __init__(self):
# Note: if a subclass overrides __init__(), it will likely need to
# override __copy__() and __deepcopy__() as well.
self._wrapped = empty
__getattr__ = new_method_proxy(getattr)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is empty:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is empty:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialize the wrapped object.
"""
raise NotImplementedError('subclasses of LazyObject must provide a _setup() method')
# Because we have messed with __class__ below, we confuse pickle as to what
# class we are pickling. We're going to have to initialize the wrapped
# object to successfully pickle it, so we might as well just pickle the
# wrapped object since they're supposed to act the same way.
#
# Unfortunately, if we try to simply act like the wrapped object, the ruse
# will break down when pickle gets our id(). Thus we end up with pickle
# thinking, in effect, that we are a distinct object from the wrapped
# object, but with the same __dict__. This can cause problems (see #25389).
#
# So instead, we define our own __reduce__ method and custom unpickler. We
# pickle the wrapped object as the unpickler's argument, so that pickle
# will pickle it normally, and then the unpickler simply returns its
# argument.
def __reduce__(self):
if self._wrapped is empty:
self._setup()
return (unpickle_lazyobject, (self._wrapped,))
# We have to explicitly override __getstate__ so that older versions of
# pickle don't try to pickle the __dict__ (which in the case of a
# SimpleLazyObject may contain a lambda). The value will end up being
# ignored by our __reduce__ and custom unpickler.
def __getstate__(self):
return {}
def __copy__(self):
if self._wrapped is empty:
# If uninitialized, copy the wrapper. Use type(self), not
# self.__class__, because the latter is proxied.
return type(self)()
else:
# If initialized, return a copy of the wrapped object.
return copy.copy(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use type(self), not self.__class__, because the
# latter is proxied.
result = type(self)()
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
if six.PY3:
__bytes__ = new_method_proxy(bytes)
__str__ = new_method_proxy(str)
__bool__ = new_method_proxy(bool)
else:
__str__ = new_method_proxy(str)
__unicode__ = new_method_proxy(unicode) # NOQA: unicode undefined on PY3
__nonzero__ = new_method_proxy(bool)
# Introspection support
__dir__ = new_method_proxy(dir)
# Need to pretend to be the wrapped class, for the sake of objects that
# care about this (especially in equality tests)
__class__ = property(new_method_proxy(operator.attrgetter("__class__")))
__eq__ = new_method_proxy(operator.eq)
__ne__ = new_method_proxy(operator.ne)
__hash__ = new_method_proxy(hash)
# List/Tuple/Dictionary methods support
__getitem__ = new_method_proxy(operator.getitem)
__setitem__ = new_method_proxy(operator.setitem)
__delitem__ = new_method_proxy(operator.delitem)
__iter__ = new_method_proxy(iter)
__len__ = new_method_proxy(len)
__contains__ = new_method_proxy(operator.contains)
def unpickle_lazyobject(wrapped):
"""
Used to unpickle lazy objects. Just return its argument, which will be the
wrapped object.
"""
return wrapped
unpickle_lazyobject.__safe_for_unpickling__ = True
class SimpleLazyObject(LazyObject):
"""
A lazy object initialized from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
super(SimpleLazyObject, self).__init__()
def _setup(self):
self._wrapped = self._setupfunc()
# Return a meaningful representation of the lazy object for debugging
# without evaluating the wrapped object.
def __repr__(self):
if self._wrapped is empty:
repr_attr = self._setupfunc
else:
repr_attr = self._wrapped
return '<%s: %r>' % (type(self).__name__, repr_attr)
def __copy__(self):
if self._wrapped is empty:
# If uninitialized, copy the wrapper. Use SimpleLazyObject, not
# self.__class__, because the latter is proxied.
return SimpleLazyObject(self._setupfunc)
else:
# If initialized, return a copy of the wrapped object.
return copy.copy(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
class lazy_property(property):
"""
A property that works with subclasses by wrapping the decorated
functions of the base class.
"""
def __new__(cls, fget=None, fset=None, fdel=None, doc=None):
if fget is not None:
@wraps(fget)
def fget(instance, instance_type=None, name=fget.__name__):
return getattr(instance, name)()
if fset is not None:
@wraps(fset)
def fset(instance, value, name=fset.__name__):
return getattr(instance, name)(value)
if fdel is not None:
@wraps(fdel)
def fdel(instance, name=fdel.__name__):
return getattr(instance, name)()
return property(fget, fset, fdel, doc)
def partition(predicate, values):
"""
Splits the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results
| mit | -2,134,086,999,665,588,000 | 34.276029 | 92 | 0.579175 | false |
sephiroth6/nodeshot | nodeshot/core/layers/views.py | 3 | 4580 | from django.http import Http404
from django.utils.translation import ugettext_lazy as _
from rest_framework import generics, permissions, authentication
from rest_framework.response import Response
from nodeshot.core.base.utils import Hider
from nodeshot.core.nodes.views import NodeList
from nodeshot.core.nodes.serializers import NodeGeoSerializer, PaginatedGeojsonNodeListSerializer
from .settings import REVERSION_ENABLED
from .models import Layer
from .serializers import * # noqa
if REVERSION_ENABLED:
from nodeshot.core.base.mixins import RevisionCreate, RevisionUpdate
class LayerListBase(RevisionCreate, generics.ListCreateAPIView):
pass
class LayerDetailBase(RevisionUpdate, generics.RetrieveUpdateAPIView):
pass
else:
class LayerListBase(generics.ListCreateAPIView):
pass
class LayerDetailBase(generics.RetrieveUpdateAPIView):
pass
class LayerList(LayerListBase):
"""
Retrieve list of all layers.
### POST
Create new layer if authorized (admins and allowed users only).
"""
queryset = Layer.objects.published()
permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly, )
authentication_classes = (authentication.SessionAuthentication,)
serializer_class = LayerListSerializer
pagination_serializer_class = PaginatedLayerListSerializer
paginate_by_param = 'limit'
paginate_by = None
layer_list = LayerList.as_view()
class LayerDetail(LayerDetailBase):
"""
Retrieve details of specified layer.
### PUT & PATCH
Edit specified layer
"""
permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly, )
authentication_classes = (authentication.SessionAuthentication,)
queryset = Layer.objects.published()
serializer_class = LayerDetailSerializer
lookup_field = 'slug'
layer_detail = LayerDetail.as_view()
class LayerNodesList(NodeList):
"""
Retrieve list of nodes of the specified layer
Parameters:
* `search=<word>`: search <word> in name of nodes of specified layer
* `limit=<n>`: specify number of items per page (defaults to 40)
"""
layer = None
def get_layer(self):
""" retrieve layer from DB """
if self.layer:
return
try:
self.layer = Layer.objects.get(slug=self.kwargs['slug'])
except Layer.DoesNotExist:
raise Http404(_('Layer not found'))
def get_queryset(self):
""" extend parent class queryset by filtering nodes of the specified layer """
self.get_layer()
return super(LayerNodesList, self).get_queryset().filter(layer_id=self.layer.id)
def get_nodes(self, request, *args, **kwargs):
""" this method might be overridden by other modules (eg: nodeshot.interop.sync) """
# ListSerializerMixin.list returns a serializer object
return (self.list(request, *args, **kwargs)).data
def get(self, request, *args, **kwargs):
""" Retrieve list of nodes of the specified layer """
self.get_layer()
# get nodes of layer
nodes = self.get_nodes(request, *args, **kwargs)
return Response(nodes)
post = Hider()
nodes_list = LayerNodesList.as_view()
class LayerNodesGeoJSONList(LayerNodesList):
"""
Retrieve list of nodes of the specified layer in GeoJSON format.
Parameters:
* `search=<word>`: search <word> in name, slug, description and address of nodes
* `limit=<n>`: specify number of items per page (show all by default)
"""
pagination_serializer_class = PaginatedGeojsonNodeListSerializer
paginate_by_param = 'limit'
paginate_by = 0
serializer_class = NodeGeoSerializer
def get(self, request, *args, **kwargs):
""" Retrieve list of nodes of the specified layer in GeoJSON format. """
# overwritten just to tweak the docstring for auto documentation purposes
return super(LayerNodesGeoJSONList, self).get(request, *args, **kwargs)
nodes_geojson_list = LayerNodesGeoJSONList.as_view()
class LayerGeoJSONList(generics.ListAPIView):
"""
Retrieve list of layers in GeoJSON format.
Parameters:
* `limit=<n>`: specify number of items per page (defaults to 40)
* `page=<n>`: show page n
"""
pagination_serializer_class = PaginatedGeojsonLayerListSerializer
paginate_by_param = 'limit'
paginate_by = 40
serializer_class = GeoLayerListSerializer
queryset = Layer.objects.published().exclude(area__isnull=True)
layers_geojson_list = LayerGeoJSONList.as_view()
| gpl-3.0 | -6,957,845,717,749,241,000 | 30.369863 | 97 | 0.700437 | false |
Sumith1896/sympy | sympy/combinatorics/tests/test_tensor_can.py | 96 | 24791 | from sympy.core.compatibility import range
from sympy.combinatorics.permutations import Permutation, Perm
from sympy.combinatorics.tensor_can import (perm_af_direct_product, dummy_sgs,
riemann_bsgs, get_symmetric_group_sgs, canonicalize, bsgs_direct_product)
from sympy.combinatorics.testutil import canonicalize_naive, graph_certificate
from sympy.utilities.pytest import skip, XFAIL
def test_perm_af_direct_product():
gens1 = [[1,0,2,3], [0,1,3,2]]
gens2 = [[1,0]]
assert perm_af_direct_product(gens1, gens2, 0) == [[1, 0, 2, 3, 4, 5], [0, 1, 3, 2, 4, 5], [0, 1, 2, 3, 5, 4]]
gens1 = [[1,0,2,3,5,4], [0,1,3,2,4,5]]
gens2 = [[1,0,2,3]]
assert [[1, 0, 2, 3, 4, 5, 7, 6], [0, 1, 3, 2, 4, 5, 6, 7], [0, 1, 2, 3, 5, 4, 6, 7]]
def test_dummy_sgs():
a = dummy_sgs([1,2], 0, 4)
assert a == [[0,2,1,3,4,5]]
a = dummy_sgs([2,3,4,5], 0, 8)
assert a == [x._array_form for x in [Perm(9)(2,3), Perm(9)(4,5),
Perm(9)(2,4)(3,5)]]
a = dummy_sgs([2,3,4,5], 1, 8)
assert a == [x._array_form for x in [Perm(2,3)(8,9), Perm(4,5)(8,9),
Perm(9)(2,4)(3,5)]]
def test_get_symmetric_group_sgs():
assert get_symmetric_group_sgs(2) == ([0], [Permutation(3)(0,1)])
assert get_symmetric_group_sgs(2, 1) == ([0], [Permutation(0,1)(2,3)])
assert get_symmetric_group_sgs(3) == ([0,1], [Permutation(4)(0,1), Permutation(4)(1,2)])
assert get_symmetric_group_sgs(3, 1) == ([0,1], [Permutation(0,1)(3,4), Permutation(1,2)(3,4)])
assert get_symmetric_group_sgs(4) == ([0,1,2], [Permutation(5)(0,1), Permutation(5)(1,2), Permutation(5)(2,3)])
assert get_symmetric_group_sgs(4, 1) == ([0,1,2], [Permutation(0,1)(4,5), Permutation(1,2)(4,5), Permutation(2,3)(4,5)])
def test_canonicalize_no_slot_sym():
# cases in which there is no slot symmetry after fixing the
# free indices; here and in the following if the symmetry of the
# metric is not specified, it is assumed to be symmetric.
# If it is not specified, tensors are commuting.
# A_d0 * B^d0; g = [1,0, 2,3]; T_c = A^d0*B_d0; can = [0,1,2,3]
base1, gens1 = get_symmetric_group_sgs(1)
dummies = [0, 1]
g = Permutation([1,0,2,3])
can = canonicalize(g, dummies, 0, (base1,gens1,1,0), (base1,gens1,1,0))
assert can == [0,1,2,3]
# equivalently
can = canonicalize(g, dummies, 0, (base1, gens1, 2, None))
assert can == [0,1,2,3]
# with antisymmetric metric; T_c = -A^d0*B_d0; can = [0,1,3,2]
can = canonicalize(g, dummies, 1, (base1,gens1,1,0), (base1,gens1,1,0))
assert can == [0,1,3,2]
# A^a * B^b; ord = [a,b]; g = [0,1,2,3]; can = g
g = Permutation([0,1,2,3])
dummies = []
t0 = t1 = (base1, gens1, 1, 0)
can = canonicalize(g, dummies, 0, t0, t1)
assert can == [0,1,2,3]
# B^b * A^a
g = Permutation([1,0,2,3])
can = canonicalize(g, dummies, 0, t0, t1)
assert can == [1,0,2,3]
# A symmetric
# A^{b}_{d0}*A^{d0, a} order a,b,d0,-d0; T_c = A^{a d0}*A{b}_{d0}
# g = [1,3,2,0,4,5]; can = [0,2,1,3,4,5]
base2, gens2 = get_symmetric_group_sgs(2)
dummies = [2,3]
g = Permutation([1,3,2,0,4,5])
can = canonicalize(g, dummies, 0, (base2, gens2, 2, 0))
assert can == [0, 2, 1, 3, 4, 5]
# with antisymmetric metric
can = canonicalize(g, dummies, 1, (base2, gens2, 2, 0))
assert can == [0, 2, 1, 3, 4, 5]
# A^{a}_{d0}*A^{d0, b}
g = Permutation([0,3,2,1,4,5])
can = canonicalize(g, dummies, 1, (base2, gens2, 2, 0))
assert can == [0, 2, 1, 3, 5, 4]
# A, B symmetric
# A^b_d0*B^{d0,a}; g=[1,3,2,0,4,5]
# T_c = A^{b,d0}*B_{a,d0}; can = [1,2,0,3,4,5]
dummies = [2,3]
g = Permutation([1,3,2,0,4,5])
can = canonicalize(g, dummies, 0, (base2,gens2,1,0), (base2,gens2,1,0))
assert can == [1,2,0,3,4,5]
# same with antisymmetric metric
can = canonicalize(g, dummies, 1, (base2,gens2,1,0), (base2,gens2,1,0))
assert can == [1,2,0,3,5,4]
# A^{d1}_{d0}*B^d0*C_d1 ord=[d0,-d0,d1,-d1]; g = [2,1,0,3,4,5]
# T_c = A^{d0 d1}*B_d0*C_d1; can = [0,2,1,3,4,5]
base1, gens1 = get_symmetric_group_sgs(1)
base2, gens2 = get_symmetric_group_sgs(2)
g = Permutation([2,1,0,3,4,5])
dummies = [0,1,2,3]
t0 = (base2, gens2, 1, 0)
t1 = t2 = (base1, gens1, 1, 0)
can = canonicalize(g, dummies, 0, t0, t1, t2)
assert can == [0, 2, 1, 3, 4, 5]
# A without symmetry
# A^{d1}_{d0}*B^d0*C_d1 ord=[d0,-d0,d1,-d1]; g = [2,1,0,3,4,5]
# T_c = A^{d0 d1}*B_d1*C_d0; can = [0,2,3,1,4,5]
g = Permutation([2,1,0,3,4,5])
dummies = [0,1,2,3]
t0 = ([], [Permutation(list(range(4)))], 1, 0)
can = canonicalize(g, dummies, 0, t0, t1, t2)
assert can == [0,2,3,1,4,5]
# A, B without symmetry
# A^{d1}_{d0}*B_{d1}^{d0}; g = [2,1,3,0,4,5]
# T_c = A^{d0 d1}*B_{d0 d1}; can = [0,2,1,3,4,5]
t0 = t1 = ([], [Permutation(list(range(4)))], 1, 0)
dummies = [0,1,2,3]
g = Permutation([2,1,3,0,4,5])
can = canonicalize(g, dummies, 0, t0, t1)
assert can == [0, 2, 1, 3, 4, 5]
# A_{d0}^{d1}*B_{d1}^{d0}; g = [1,2,3,0,4,5]
# T_c = A^{d0 d1}*B_{d1 d0}; can = [0,2,3,1,4,5]
g = Permutation([1,2,3,0,4,5])
can = canonicalize(g, dummies, 0, t0, t1)
assert can == [0,2,3,1,4,5]
# A, B, C without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1]
# g=[4,2,0,3,5,1,6,7]
# T_c=A^{d0 d1}*B_{a d1}*C_{d0 b}; can = [2,4,0,5,3,1,6,7]
t0 = t1 = t2 = ([], [Permutation(list(range(4)))], 1, 0)
dummies = [2,3,4,5]
g = Permutation([4,2,0,3,5,1,6,7])
can = canonicalize(g, dummies, 0, t0, t1, t2)
assert can == [2,4,0,5,3,1,6,7]
# A symmetric, B and C without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1]
# g=[4,2,0,3,5,1,6,7]
# T_c = A^{d0 d1}*B_{a d0}*C_{d1 b}; can = [2,4,0,3,5,1,6,7]
t0 = (base2,gens2,1,0)
t1 = t2 = ([], [Permutation(list(range(4)))], 1, 0)
dummies = [2,3,4,5]
g = Permutation([4,2,0,3,5,1,6,7])
can = canonicalize(g, dummies, 0, t0, t1, t2)
assert can == [2,4,0,3,5,1,6,7]
# A and C symmetric, B without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1]
# g=[4,2,0,3,5,1,6,7]
# T_c = A^{d0 d1}*B_{a d0}*C_{b d1}; can = [2,4,0,3,1,5,6,7]
t0 = t2 = (base2,gens2,1,0)
t1 = ([], [Permutation(list(range(4)))], 1, 0)
dummies = [2,3,4,5]
g = Permutation([4,2,0,3,5,1,6,7])
can = canonicalize(g, dummies, 0, t0, t1, t2)
assert can == [2,4,0,3,1,5,6,7]
# A symmetric, B without symmetry, C antisymmetric
# A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1]
# g=[4,2,0,3,5,1,6,7]
# T_c = -A^{d0 d1}*B_{a d0}*C_{b d1}; can = [2,4,0,3,1,5,7,6]
t0 = (base2,gens2, 1, 0)
t1 = ([], [Permutation(list(range(4)))], 1, 0)
base2a, gens2a = get_symmetric_group_sgs(2, 1)
t2 = (base2a, gens2a, 1, 0)
dummies = [2,3,4,5]
g = Permutation([4,2,0,3,5,1,6,7])
can = canonicalize(g, dummies, 0, t0, t1, t2)
assert can == [2,4,0,3,1,5,7,6]
def test_canonicalize_no_dummies():
base1, gens1 = get_symmetric_group_sgs(1)
base2, gens2 = get_symmetric_group_sgs(2)
base2a, gens2a = get_symmetric_group_sgs(2, 1)
# A commuting
# A^c A^b A^a; ord = [a,b,c]; g = [2,1,0,3,4]
# T_c = A^a A^b A^c; can = list(range(5))
g = Permutation([2,1,0,3,4])
can = canonicalize(g, [], 0, (base1, gens1, 3, 0))
assert can == list(range(5))
# A anticommuting
# A^c A^b A^a; ord = [a,b,c]; g = [2,1,0,3,4]
# T_c = -A^a A^b A^c; can = [0,1,2,4,3]
g = Permutation([2,1,0,3,4])
can = canonicalize(g, [], 0, (base1, gens1, 3, 1))
assert can == [0,1,2,4,3]
# A commuting and symmetric
# A^{b,d}*A^{c,a}; ord = [a,b,c,d]; g = [1,3,2,0,4,5]
# T_c = A^{a c}*A^{b d}; can = [0,2,1,3,4,5]
g = Permutation([1,3,2,0,4,5])
can = canonicalize(g, [], 0, (base2, gens2, 2, 0))
assert can == [0,2,1,3,4,5]
# A anticommuting and symmetric
# A^{b,d}*A^{c,a}; ord = [a,b,c,d]; g = [1,3,2,0,4,5]
# T_c = -A^{a c}*A^{b d}; can = [0,2,1,3,5,4]
g = Permutation([1,3,2,0,4,5])
can = canonicalize(g, [], 0, (base2, gens2, 2, 1))
assert can == [0,2,1,3,5,4]
# A^{c,a}*A^{b,d} ; g = [2,0,1,3,4,5]
# T_c = A^{a c}*A^{b d}; can = [0,2,1,3,4,5]
g = Permutation([2,0,1,3,4,5])
can = canonicalize(g, [], 0, (base2, gens2, 2, 1))
assert can == [0,2,1,3,4,5]
def test_no_metric_symmetry():
# no metric symmetry
# A^d1_d0 * A^d0_d1; ord = [d0,-d0,d1,-d1]; g= [2,1,0,3,4,5]
# T_c = A^d0_d1 * A^d1_d0; can = [0,3,2,1,4,5]
g = Permutation([2,1,0,3,4,5])
can = canonicalize(g, list(range(4)), None, [[], [Permutation(list(range(4)))], 2, 0])
assert can == [0,3,2,1,4,5]
# A^d1_d2 * A^d0_d3 * A^d2_d1 * A^d3_d0
# ord = [d0,-d0,d1,-d1,d2,-d2,d3,-d3]
# 0 1 2 3 4 5 6 7
# g = [2,5,0,7,4,3,6,1,8,9]
# T_c = A^d0_d1 * A^d1_d0 * A^d2_d3 * A^d3_d2
# can = [0,3,2,1,4,7,6,5,8,9]
g = Permutation([2,5,0,7,4,3,6,1,8,9])
#can = canonicalize(g, list(range(8)), 0, [[], [list(range(4))], 4, 0])
#assert can == [0, 2, 3, 1, 4, 6, 7, 5, 8, 9]
can = canonicalize(g, list(range(8)), None, [[], [Permutation(list(range(4)))], 4, 0])
assert can == [0, 3, 2, 1, 4, 7, 6, 5, 8, 9]
# A^d0_d2 * A^d1_d3 * A^d3_d0 * A^d2_d1
# g = [0,5,2,7,6,1,4,3,8,9]
# T_c = A^d0_d1 * A^d1_d2 * A^d2_d3 * A^d3_d0
# can = [0,3,2,5,4,7,6,1,8,9]
g = Permutation([0,5,2,7,6,1,4,3,8,9])
can = canonicalize(g, list(range(8)), None, [[], [Permutation(list(range(4)))], 4, 0])
assert can == [0,3,2,5,4,7,6,1,8,9]
g = Permutation([12,7,10,3,14,13,4,11,6,1,2,9,0,15,8,5,16,17])
can = canonicalize(g, list(range(16)), None, [[], [Permutation(list(range(4)))], 8, 0])
assert can == [0,3,2,5,4,7,6,1,8,11,10,13,12,15,14,9,16,17]
def test_canonical_free():
# t = A^{d0 a1}*A_d0^a0
# ord = [a0,a1,d0,-d0]; g = [2,1,3,0,4,5]; dummies = [[2,3]]
# t_c = A_d0^a0*A^{d0 a1}
# can = [3,0, 2,1, 4,5]
base = [0]
gens = [Permutation(5)(0,2)(1,3)]
g = Permutation([2,1,3,0,4,5])
num_free = 2
dummies = [[2,3]]
can = canonicalize(g, dummies, [None], ([], [Permutation(3)], 2, 0))
assert can == [3,0, 2,1, 4,5]
def test_canonicalize1():
base1, gens1 = get_symmetric_group_sgs(1)
base1a, gens1a = get_symmetric_group_sgs(1, 1)
base2, gens2 = get_symmetric_group_sgs(2)
base3, gens3 = get_symmetric_group_sgs(3)
base2a, gens2a = get_symmetric_group_sgs(2, 1)
base3a, gens3a = get_symmetric_group_sgs(3, 1)
# A_d0*A^d0; ord = [d0,-d0]; g = [1,0,2,3]
# T_c = A^d0*A_d0; can = [0,1,2,3]
g = Permutation([1,0,2,3])
can = canonicalize(g, [0, 1], 0, (base1, gens1, 2, 0))
assert can == list(range(4))
# A commuting
# A_d0*A_d1*A_d2*A^d2*A^d1*A^d0; ord=[d0,-d0,d1,-d1,d2,-d2]
# g = [1,3,5,4,2,0,6,7]
# T_c = A^d0*A_d0*A^d1*A_d1*A^d2*A_d2; can = list(range(8))
g = Permutation([1,3,5,4,2,0,6,7])
can = canonicalize(g, list(range(6)), 0, (base1, gens1, 6, 0))
assert can == list(range(8))
# A anticommuting
# A_d0*A_d1*A_d2*A^d2*A^d1*A^d0; ord=[d0,-d0,d1,-d1,d2,-d2]
# g = [1,3,5,4,2,0,6,7]
# T_c 0; can = 0
g = Permutation([1,3,5,4,2,0,6,7])
can = canonicalize(g, list(range(6)), 0, (base1, gens1, 6, 1))
assert can == 0
can1 = canonicalize_naive(g, list(range(6)), 0, (base1, gens1, 6, 1))
assert can1 == 0
# A commuting symmetric
# A^{d0 b}*A^a_d1*A^d1_d0; ord=[a,b,d0,-d0,d1,-d1]
# g = [2,1,0,5,4,3,6,7]
# T_c = A^{a d0}*A^{b d1}*A_{d0 d1}; can = [0,2,1,4,3,5,6,7]
g = Permutation([2,1,0,5,4,3,6,7])
can = canonicalize(g, list(range(2,6)), 0, (base2, gens2, 3, 0))
assert can == [0,2,1,4,3,5,6,7]
# A, B commuting symmetric
# A^{d0 b}*A^d1_d0*B^a_d1; ord=[a,b,d0,-d0,d1,-d1]
# g = [2,1,4,3,0,5,6,7]
# T_c = A^{b d0}*A_d0^d1*B^a_d1; can = [1,2,3,4,0,5,6,7]
g = Permutation([2,1,4,3,0,5,6,7])
can = canonicalize(g, list(range(2,6)), 0, (base2,gens2,2,0), (base2,gens2,1,0))
assert can == [1,2,3,4,0,5,6,7]
# A commuting symmetric
# A^{d1 d0 b}*A^{a}_{d1 d0}; ord=[a,b, d0,-d0,d1,-d1]
# g = [4,2,1,0,5,3,6,7]
# T_c = A^{a d0 d1}*A^{b}_{d0 d1}; can = [0,2,4,1,3,5,6,7]
g = Permutation([4,2,1,0,5,3,6,7])
can = canonicalize(g, list(range(2,6)), 0, (base3, gens3, 2, 0))
assert can == [0,2,4,1,3,5,6,7]
# A^{d3 d0 d2}*A^a0_{d1 d2}*A^d1_d3^a1*A^{a2 a3}_d0
# ord = [a0,a1,a2,a3,d0,-d0,d1,-d1,d2,-d2,d3,-d3]
# 0 1 2 3 4 5 6 7 8 9 10 11
# g = [10,4,8, 0,7,9, 6,11,1, 2,3,5, 12,13]
# T_c = A^{a0 d0 d1}*A^a1_d0^d2*A^{a2 a3 d3}*A_{d1 d2 d3}
# can = [0,4,6, 1,5,8, 2,3,10, 7,9,11, 12,13]
g = Permutation([10,4,8, 0,7,9, 6,11,1, 2,3,5, 12,13])
can = canonicalize(g, list(range(4,12)), 0, (base3, gens3, 4, 0))
assert can == [0,4,6, 1,5,8, 2,3,10, 7,9,11, 12,13]
# A commuting symmetric, B antisymmetric
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# ord = [d0,-d0,d1,-d1,d2,-d2,d3,-d3]
# g = [0,2,4,5,7,3,1,6,8,9]
# in this esxample and in the next three,
# renaming dummy indices and using symmetry of A,
# T = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3
# can = 0
g = Permutation([0,2,4,5,7,3,1,6,8,9])
can = canonicalize(g, list(range(8)), 0, (base3, gens3,2,0), (base2a,gens2a,1,0))
assert can == 0
# A anticommuting symmetric, B anticommuting
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3}
# can = [0,2,4, 1,3,6, 5,7, 8,9]
can = canonicalize(g, list(range(8)), 0, (base3, gens3,2,1), (base2a,gens2a,1,0))
assert can == [0,2,4, 1,3,6, 5,7, 8,9]
# A anticommuting symmetric, B antisymmetric commuting, antisymmetric metric
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = -A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3}
# can = [0,2,4, 1,3,6, 5,7, 9,8]
can = canonicalize(g, list(range(8)), 1, (base3, gens3,2,1), (base2a,gens2a,1,0))
assert can == [0,2,4, 1,3,6, 5,7, 9,8]
# A anticommuting symmetric, B anticommuting anticommuting,
# no metric symmetry
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3
# can = [0,2,4, 1,3,7, 5,6, 8,9]
can = canonicalize(g, list(range(8)), None, (base3, gens3,2,1), (base2a,gens2a,1,0))
assert can == [0,2,4,1,3,7,5,6,8,9]
# Gamma anticommuting
# Gamma_{mu nu} * gamma^rho * Gamma^{nu mu alpha}
# ord = [alpha, rho, mu,-mu,nu,-nu]
# g = [3,5,1,4,2,0,6,7]
# T_c = -Gamma^{mu nu} * gamma^rho * Gamma_{alpha mu nu}
# can = [2,4,1,0,3,5,7,6]]
g = Permutation([3,5,1,4,2,0,6,7])
t0 = (base2a, gens2a, 1, None)
t1 = (base1, gens1, 1, None)
t2 = (base3a, gens3a, 1, None)
can = canonicalize(g, list(range(2, 6)), 0, t0, t1, t2)
assert can == [2,4,1,0,3,5,7,6]
# Gamma_{mu nu} * Gamma^{gamma beta} * gamma_rho * Gamma^{nu mu alpha}
# ord = [alpha, beta, gamma, -rho, mu,-mu,nu,-nu]
# 0 1 2 3 4 5 6 7
# g = [5,7,2,1,3,6,4,0,8,9]
# T_c = Gamma^{mu nu} * Gamma^{beta gamma} * gamma_rho * Gamma^alpha_{mu nu} # can = [4,6,1,2,3,0,5,7,8,9]
t0 = (base2a, gens2a, 2, None)
g = Permutation([5,7,2,1,3,6,4,0,8,9])
can = canonicalize(g, list(range(4, 8)), 0, t0, t1, t2)
assert can == [4,6,1,2,3,0,5,7,8,9]
# f^a_{b,c} antisymmetric in b,c; A_mu^a no symmetry
# f^c_{d a} * f_{c e b} * A_mu^d * A_nu^a * A^{nu e} * A^{mu b}
# ord = [mu,-mu,nu,-nu,a,-a,b,-b,c,-c,d,-d, e, -e]
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13
# g = [8,11,5, 9,13,7, 1,10, 3,4, 2,12, 0,6, 14,15]
# T_c = -f^{a b c} * f_a^{d e} * A^mu_b * A_{mu d} * A^nu_c * A_{nu e}
# can = [4,6,8, 5,10,12, 0,7, 1,11, 2,9, 3,13, 15,14]
g = Permutation([8,11,5, 9,13,7, 1,10, 3,4, 2,12, 0,6, 14,15])
base_f, gens_f = bsgs_direct_product(base1, gens1, base2a, gens2a)
base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1)
t0 = (base_f, gens_f, 2, 0)
t1 = (base_A, gens_A, 4, 0)
can = canonicalize(g, [list(range(4)), list(range(4, 14))], [0, 0], t0, t1)
assert can == [4,6,8, 5,10,12, 0,7, 1,11, 2,9, 3,13, 15,14]
def test_riemann_invariants():
baser, gensr = riemann_bsgs
# R^{d0 d1}_{d1 d0}; ord = [d0,-d0,d1,-d1]; g = [0,2,3,1,4,5]
# T_c = -R^{d0 d1}_{d0 d1}; can = [0,2,1,3,5,4]
g = Permutation([0,2,3,1,4,5])
can = canonicalize(g, list(range(2, 4)), 0, (baser, gensr, 1, 0))
assert can == [0,2,1,3,5,4]
# use a non minimal BSGS
can = canonicalize(g, list(range(2, 4)), 0, ([2, 0], [Permutation([1,0,2,3,5,4]), Permutation([2,3,0,1,4,5])], 1, 0))
assert can == [0,2,1,3,5,4]
"""
The following tests in test_riemann_invariants and in
test_riemann_invariants1 have been checked using xperm.c from XPerm in
in [1] and with an older version contained in [2]
[1] xperm.c part of xPerm written by J. M. Martin-Garcia
http://www.xact.es/index.html
[2] test_xperm.cc in cadabra by Kasper Peeters, http://cadabra.phi-sci.com/
"""
# R_d11^d1_d0^d5 * R^{d6 d4 d0}_d5 * R_{d7 d2 d8 d9} *
# R_{d10 d3 d6 d4} * R^{d2 d7 d11}_d1 * R^{d8 d9 d3 d10}
# ord: contravariant d_k ->2*k, covariant d_k -> 2*k+1
# T_c = R^{d0 d1 d2 d3} * R_{d0 d1}^{d4 d5} * R_{d2 d3}^{d6 d7} *
# R_{d4 d5}^{d8 d9} * R_{d6 d7}^{d10 d11} * R_{d8 d9 d10 d11}
g = Permutation([23,2,1,10,12,8,0,11,15,5,17,19,21,7,13,9,4,14,22,3,16,18,6,20,24,25])
can = canonicalize(g, list(range(24)), 0, (baser, gensr, 6, 0))
assert can == [0,2,4,6,1,3,8,10,5,7,12,14,9,11,16,18,13,15,20,22,17,19,21,23,24,25]
# use a non minimal BSGS
can = canonicalize(g, list(range(24)), 0, ([2, 0], [Permutation([1,0,2,3,5,4]), Permutation([2,3,0,1,4,5])], 6, 0))
assert can == [0,2,4,6,1,3,8,10,5,7,12,14,9,11,16,18,13,15,20,22,17,19,21,23,24,25]
g = Permutation([0,2,5,7,4,6,9,11,8,10,13,15,12,14,17,19,16,18,21,23,20,22,25,27,24,26,29,31,28,30,33,35,32,34,37,39,36,38,1,3,40,41])
can = canonicalize(g, list(range(40)), 0, (baser, gensr, 10, 0))
assert can == [0,2,4,6,1,3,8,10,5,7,12,14,9,11,16,18,13,15,20,22,17,19,24,26,21,23,28,30,25,27,32,34,29,31,36,38,33,35,37,39,40,41]
@XFAIL
def test_riemann_invariants1():
skip('takes too much time')
baser, gensr = riemann_bsgs
g = Permutation([17, 44, 11, 3, 0, 19, 23, 15, 38, 4, 25, 27, 43, 36, 22, 14, 8, 30, 41, 20, 2, 10, 12, 28, 18, 1, 29, 13, 37, 42, 33, 7, 9, 31, 24, 26, 39, 5, 34, 47, 32, 6, 21, 40, 35, 46, 45, 16, 48, 49])
can = canonicalize(g, list(range(48)), 0, (baser, gensr, 12, 0))
assert can == [0, 2, 4, 6, 1, 3, 8, 10, 5, 7, 12, 14, 9, 11, 16, 18, 13, 15, 20, 22, 17, 19, 24, 26, 21, 23, 28, 30, 25, 27, 32, 34, 29, 31, 36, 38, 33, 35, 40, 42, 37, 39, 44, 46, 41, 43, 45, 47, 48, 49]
g = Permutation([0,2,4,6, 7,8,10,12, 14,16,18,20, 19,22,24,26, 5,21,28,30, 32,34,36,38, 40,42,44,46, 13,48,50,52, 15,49,54,56, 17,33,41,58, 9,23,60,62, 29,35,63,64, 3,45,66,68, 25,37,47,57, 11,31,69,70, 27,39,53,72, 1,59,73,74, 55,61,67,76, 43,65,75,78, 51,71,77,79, 80,81])
can = canonicalize(g, list(range(80)), 0, (baser, gensr, 20, 0))
assert can == [0,2,4,6, 1,8,10,12, 3,14,16,18, 5,20,22,24, 7,26,28,30, 9,15,32,34, 11,36,23,38, 13,40,42,44, 17,39,29,46, 19,48,43,50, 21,45,52,54, 25,56,33,58, 27,60,53,62, 31,51,64,66, 35,65,47,68, 37,70,49,72, 41,74,57,76, 55,67,59,78, 61,69,71,75, 63,79,73,77, 80,81]
def test_riemann_products():
baser, gensr = riemann_bsgs
base1, gens1 = get_symmetric_group_sgs(1)
base2, gens2 = get_symmetric_group_sgs(2)
base2a, gens2a = get_symmetric_group_sgs(2, 1)
# R^{a b d0}_d0 = 0
g = Permutation([0,1,2,3,4,5])
can = canonicalize(g, list(range(2,4)), 0, (baser, gensr, 1, 0))
assert can == 0
# R^{d0 b a}_d0 ; ord = [a,b,d0,-d0}; g = [2,1,0,3,4,5]
# T_c = -R^{a d0 b}_d0; can = [0,2,1,3,5,4]
g = Permutation([2,1,0,3,4,5])
can = canonicalize(g, list(range(2, 4)), 0, (baser, gensr, 1, 0))
assert can == [0,2,1,3,5,4]
# R^d1_d2^b_d0 * R^{d0 a}_d1^d2; ord=[a,b,d0,-d0,d1,-d1,d2,-d2]
# g = [4,7,1,3,2,0,5,6,8,9]
# T_c = -R^{a d0 d1 d2}* R^b_{d0 d1 d2}
# can = [0,2,4,6,1,3,5,7,9,8]
g = Permutation([4,7,1,3,2,0,5,6,8,9])
can = canonicalize(g, list(range(2,8)), 0, (baser, gensr, 2, 0))
assert can == [0,2,4,6,1,3,5,7,9,8]
can1 = canonicalize_naive(g, list(range(2,8)), 0, (baser, gensr, 2, 0))
assert can == can1
# A symmetric commuting
# R^{d6 d5}_d2^d1 * R^{d4 d0 d2 d3} * A_{d6 d0} A_{d3 d1} * A_{d4 d5}
# g = [12,10,5,2, 8,0,4,6, 13,1, 7,3, 9,11,14,15]
# T_c = -R^{d0 d1 d2 d3} * R_d0^{d4 d5 d6} * A_{d1 d4}*A_{d2 d5}*A_{d3 d6}
g = Permutation([12,10,5,2,8,0,4,6,13,1,7,3,9,11,14,15])
can = canonicalize(g, list(range(14)), 0, ((baser,gensr,2,0)), (base2,gens2,3,0))
assert can == [0, 2, 4, 6, 1, 8, 10, 12, 3, 9, 5, 11, 7, 13, 15, 14]
# R^{d2 a0 a2 d0} * R^d1_d2^{a1 a3} * R^{a4 a5}_{d0 d1}
# ord = [a0,a1,a2,a3,a4,a5,d0,-d0,d1,-d1,d2,-d2]
# 0 1 2 3 4 5 6 7 8 9 10 11
# can = [0, 6, 2, 8, 1, 3, 7, 10, 4, 5, 9, 11, 12, 13]
# T_c = R^{a0 d0 a2 d1}*R^{a1 a3}_d0^d2*R^{a4 a5}_{d1 d2}
g = Permutation([10,0,2,6,8,11,1,3,4,5,7,9,12,13])
can = canonicalize(g, list(range(6,12)), 0, (baser, gensr, 3, 0))
assert can == [0, 6, 2, 8, 1, 3, 7, 10, 4, 5, 9, 11, 12, 13]
#can1 = canonicalize_naive(g, list(range(6,12)), 0, (baser, gensr, 3, 0))
#assert can == can1
# A^n_{i, j} antisymmetric in i,j
# A_m0^d0_a1 * A_m1^a0_d0; ord = [m0,m1,a0,a1,d0,-d0]
# g = [0,4,3,1,2,5,6,7]
# T_c = -A_{m a1}^d0 * A_m1^a0_d0
# can = [0,3,4,1,2,5,7,6]
base, gens = bsgs_direct_product(base1, gens1, base2a, gens2a)
dummies = list(range(4, 6))
g = Permutation([0,4,3,1,2,5,6,7])
can = canonicalize(g, dummies, 0, (base, gens, 2, 0))
assert can == [0, 3, 4, 1, 2, 5, 7, 6]
# A^n_{i, j} symmetric in i,j
# A^m0_a0^d2 * A^n0_d2^d1 * A^n1_d1^d0 * A_{m0 d0}^a1
# ordering: first the free indices; then first n, then d
# ord=[n0,n1,a0,a1, m0,-m0,d0,-d0,d1,-d1,d2,-d2]
# 0 1 2 3 4 5 6 7 8 9 10 11]
# g = [4,2,10, 0,11,8, 1,9,6, 5,7,3, 12,13]
# if the dummy indices m_i and d_i were separated,
# one gets
# T_c = A^{n0 d0 d1} * A^n1_d0^d2 * A^m0^a0_d1 * A_m0^a1_d2
# can = [0, 6, 8, 1, 7, 10, 4, 2, 9, 5, 3, 11, 12, 13]
# If they are not, so can is
# T_c = A^{n0 m0 d0} A^n1_m0^d1 A^{d2 a0}_d0 A_d2^a1_d1
# can = [0, 4, 6, 1, 5, 8, 10, 2, 7, 11, 3, 9, 12, 13]
# case with single type of indices
base, gens = bsgs_direct_product(base1, gens1, base2, gens2)
dummies = list(range(4, 12))
g = Permutation([4,2,10, 0,11,8, 1,9,6, 5,7,3, 12,13])
can = canonicalize(g, dummies, 0, (base, gens, 4, 0))
assert can == [0, 4, 6, 1, 5, 8, 10, 2, 7, 11, 3, 9, 12, 13]
# case with separated indices
dummies = [list(range(4, 6)), list(range(6,12))]
sym = [0, 0]
can = canonicalize(g, dummies, sym, (base, gens, 4, 0))
assert can == [0, 6, 8, 1, 7, 10, 4, 2, 9, 5, 3, 11, 12, 13]
# case with separated indices with the second type of index
# with antisymmetric metric: there is a sign change
sym = [0, 1]
can = canonicalize(g, dummies, sym, (base, gens, 4, 0))
assert can == [0, 6, 8, 1, 7, 10, 4, 2, 9, 5, 3, 11, 13, 12]
def test_graph_certificate():
# test tensor invariants constructed from random regular graphs;
# checked graph isomorphism with networkx
import random
def randomize_graph(size, g):
p = list(range(size))
random.shuffle(p)
g1a = {}
for k, v in g1.items():
g1a[p[k]] = [p[i] for i in v]
return g1a
g1 = {0: [2, 3, 7], 1: [4, 5, 7], 2: [0, 4, 6], 3: [0, 6, 7], 4: [1, 2, 5], 5: [1, 4, 6], 6: [2, 3, 5], 7: [0, 1, 3]}
g2 = {0: [2, 3, 7], 1: [2, 4, 5], 2: [0, 1, 5], 3: [0, 6, 7], 4: [1, 5, 6], 5: [1, 2, 4], 6: [3, 4, 7], 7: [0, 3, 6]}
c1 = graph_certificate(g1)
c2 = graph_certificate(g2)
assert c1 != c2
g1a = randomize_graph(8, g1)
c1a = graph_certificate(g1a)
assert c1 == c1a
g1 = {0: [8, 1, 9, 7], 1: [0, 9, 3, 4], 2: [3, 4, 6, 7], 3: [1, 2, 5, 6], 4: [8, 1, 2, 5], 5: [9, 3, 4, 7], 6: [8, 2, 3, 7], 7: [0, 2, 5, 6], 8: [0, 9, 4, 6], 9: [8, 0, 5, 1]}
g2 = {0: [1, 2, 5, 6], 1: [0, 9, 5, 7], 2: [0, 4, 6, 7], 3: [8, 9, 6, 7], 4: [8, 2, 6, 7], 5: [0, 9, 8, 1], 6: [0, 2, 3, 4], 7: [1, 2, 3, 4], 8: [9, 3, 4, 5], 9: [8, 1, 3, 5]}
c1 = graph_certificate(g1)
c2 = graph_certificate(g2)
assert c1 != c2
g1a = randomize_graph(10, g1)
c1a = graph_certificate(g1a)
assert c1 == c1a
| bsd-3-clause | 4,903,138,451,708,447,000 | 42.955674 | 278 | 0.52035 | false |
klmitch/keystone | keystone/policy/backends/rules.py | 3 | 2734 | # Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy engine for keystone"""
from oslo_config import cfg
from oslo_log import log
from oslo_policy import policy as common_policy
from keystone import exception
from keystone import policy
CONF = cfg.CONF
LOG = log.getLogger(__name__)
_ENFORCER = None
def reset():
global _ENFORCER
_ENFORCER = None
def init():
global _ENFORCER
if not _ENFORCER:
_ENFORCER = common_policy.Enforcer(CONF)
def enforce(credentials, action, target, do_raise=True):
"""Verifies that the action is valid on the target in this context.
:param credentials: user credentials
:param action: string representing the action to be checked, which should
be colon separated for clarity.
:param target: dictionary representing the object of the action for object
creation this should be a dictionary representing the
location of the object e.g. {'project_id':
object.project_id}
:raises keystone.exception.Forbidden: If verification fails.
Actions should be colon separated for clarity. For example:
* identity:list_users
"""
init()
# Add the exception arguments if asked to do a raise
extra = {}
if do_raise:
extra.update(exc=exception.ForbiddenAction, action=action,
do_raise=do_raise)
return _ENFORCER.enforce(action, target, credentials, **extra)
class Policy(policy.PolicyDriverV8):
def enforce(self, credentials, action, target):
LOG.debug('enforce %(action)s: %(credentials)s', {
'action': action,
'credentials': credentials})
enforce(credentials, action, target)
def create_policy(self, policy_id, policy):
raise exception.NotImplemented()
def list_policies(self):
raise exception.NotImplemented()
def get_policy(self, policy_id):
raise exception.NotImplemented()
def update_policy(self, policy_id, policy):
raise exception.NotImplemented()
def delete_policy(self, policy_id):
raise exception.NotImplemented()
| apache-2.0 | -2,786,660,870,483,186,000 | 28.717391 | 78 | 0.678493 | false |
Percona-QA/package-testing | molecule/psmdb40-install/molecule/default/tests/test_psmdb40_install.py | 1 | 4946 | import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
DEB_PACKAGES = ['percona-server-mongodb', 'percona-server-mongodb-server', 'percona-server-mongodb-mongos',
'percona-server-mongodb-shell', 'percona-server-mongodb-tools', 'percona-server-mongodb-dbg']
RPM_PACKAGES = ['percona-server-mongodb', 'percona-server-mongodb-server', 'percona-server-mongodb-mongos',
'percona-server-mongodb-shell', 'percona-server-mongodb-tools', 'percona-server-mongodb-debuginfo']
RPM_NEW_CENTOS_PACKAGES = ['percona-server-mongodb', 'percona-server-mongodb-mongos-debuginfo',
'percona-server-mongodb-server-debuginfo', 'percona-server-mongodb-shell-debuginfo',
'percona-server-mongodb-tools-debuginfo', 'percona-server-mongodb-debugsource']
BINARIES = ['mongo', 'mongod', 'mongos', 'bsondump', 'mongoexport',
'mongofiles', 'mongoimport', 'mongorestore', 'mongotop', 'mongostat']
PSMDB40_VER = "4.0"
def test_mongod_service(host):
mongod = host.service("mongod")
assert mongod.is_running
def test_package_script(host):
with host.sudo():
result = host.run("/package-testing/package_check.sh psmdb40")
print(result.stdout)
print(result.stderr)
assert result.rc == 0, result.stderr
def test_version_script(host):
with host.sudo():
result = host.run("/package-testing/version_check.sh psmdb40")
print(result.stdout)
print(result.stderr)
assert result.rc == 0, result.stderr
@pytest.mark.parametrize("package", DEB_PACKAGES)
def test_deb_packages(host, package):
os = host.system_info.distribution
if os.lower() in ["redhat", "centos", 'rhel']:
pytest.skip("This test only for Debian based platforms")
pkg = host.package(package)
assert pkg.is_installed
assert PSMDB40_VER in pkg.version
# TODO add check that minor version is correct
@pytest.mark.parametrize("package", RPM_PACKAGES)
def test_rpm_packages(host, package):
os = host.system_info.distribution
if os in ["debian", "ubuntu"]:
pytest.skip("This test only for RHEL based platforms")
if float(host.system_info.release) >= 8.0:
pytest.skip("Only for centos7 tests")
pkg = host.package(package)
assert pkg.is_installed
assert PSMDB40_VER in pkg.version
@pytest.mark.parametrize("package", RPM_NEW_CENTOS_PACKAGES)
def test_rpm8_packages(host, package):
os = host.system_info.distribution
if os in ["debian", "ubuntu"]:
pytest.skip("This test only for RHEL based platforms")
if float(host.system_info.release) < 8.0:
pytest.skip("Only for centos7 tests")
pkg = host.package(package)
assert pkg.is_installed
assert PSMDB40_VER in pkg.version
@pytest.mark.parametrize("binary", BINARIES)
def test_binary_version(host, binary):
cmd = '{} --version|head -n1|grep -c "{}"'.format(binary, PSMDB40_VER)
result = host.run(cmd)
assert result.rc == 0, result.stdout
def test_functional(host):
with host.sudo():
result = host.run("/package-testing/scripts/psmdb_test.sh 4.0")
assert result.rc == 0, result.stderr
@pytest.mark.parametrize("encryption", ['keyfile', 'vault'])
def test_encryption(host, encryption):
with host.sudo():
result = host.run("/package-testing/scripts/psmdb_encryption/psmdb-encryption-test.sh {}".format(encryption))
print(result.stdout)
print(result.stderr)
assert result.rc == 0, result.stderr
def test_enable_auth(host):
cmd = "/package-testing/scripts/psmdb_set_auth.sh"
with host.sudo():
result = host.run(cmd)
print(result.stdout)
print(result.stderr)
assert result.rc == 0, result.stdout
def test_bats(host):
cmd = "/usr/local/bin/bats /package-testing/bats/mongo-init-scripts.bats"
with host.sudo():
result = host.run(cmd)
print(result.stdout)
print(result.stderr)
assert result.rc == 0, result.stdout
def test_bats_with_numactl(host):
with host.sudo():
os = host.system_info.distribution
cmd = 'apt-get install numactl -y'
if os.lower() in ["redhat", "centos", 'rhel']:
cmd = 'yum install numactl -y'
result = host.run(cmd)
assert result.rc == 0, result.stdout
cmd = "/usr/local/bin/bats /package-testing/bats/mongo-init-scripts.bats"
result = host.run(cmd)
print(result.stdout)
print(result.stderr)
assert result.rc == 0, result.stdout
def test_start_mongod_service(host):
cmd = "service mongod start"
with host.sudo():
result = host.run(cmd)
print(result.stdout)
print(result.stderr)
assert result.rc == 0, result.stdout
mongod = host.service("mongod")
assert mongod.is_running
| gpl-2.0 | 2,460,827,243,178,622,000 | 33.830986 | 117 | 0.664982 | false |
djpine/pyman | Book/chap8/Supporting Materials/FitOscDecay.py | 3 | 2239 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec # for unequal plot boxes
import scipy.optimize
# define function to calculate reduced chi-squared
def RedChiSqr(func, x, y, dy, params):
resids = y - func(x, *params)
chisq = ((resids/dy)**2).sum()
return chisq/float(x.size-params.size)
# define fitting function
def SineGaussDecay(t, A, B, C, tau, omega):
y = A * (1.0 + B*np.cos(omega*t)) * np.exp(-0.5*t*t/(tau*tau)) + C
return y
# read in spectrum from data file
t, decay, unc = np.loadtxt("OscData.txt", skiprows=4, unpack=True)
# initial values for fitting parameters (guesses)
A0 = 15.0
B0 = 0.6
C0 = 1.2*A0
tau0 = 16.0
omega0 = 2.0 * (2.0*np.pi/tau0)
#omega0 = 2.34
# fit data using SciPy's Levenberg-Marquart method
nlfit, nlpcov = scipy.optimize.curve_fit(SineGaussDecay,
t, decay, p0=[A0, B0, C0, tau0, omega0], sigma=unc)
# calculate reduced chi-squared
rchi = RedChiSqr(SineGaussDecay, t, decay, unc, nlfit)
# create fitting function from fitted parameters
A, B, C, tau, omega = nlfit
t_fit = np.linspace(0.0, 1.02*t[-1], 512)
d_fit = SineGaussDecay(t_fit, A, B, C, tau, omega)
# Create figure window to plot data
fig = plt.figure(1, figsize=(8,8)) # extra length for residuals
gs = gridspec.GridSpec(2, 1, height_ratios=[6, 2])
# Top plot: data and fit
ax1 = fig.add_subplot(gs[0])
ax1.plot(t_fit, d_fit)
ax1.errorbar(t, decay, yerr=unc, fmt='or', ecolor='black', ms=4)
ax1.set_xlabel('time (ms)')
ax1.set_ylabel('decay (arb units)')
ax1.text(0.55, 0.8, 'A = {0:0.1f}\nB = {1:0.3f}\nC = {2:0.1f}'.format(A, B, C),
transform = ax1.transAxes)
ax1.text(0.75, 0.8, '$\\tau$ = {0:0.1f}\n$\omega$ = {1:0.3f}\n$\chi^2$ = {2:0.3f}'.format(tau, omega, rchi),
transform = ax1.transAxes)
ax1.set_title('$d(t) = A (1+B\,\cos\,\omega t) e^{-t^2/2\\tau^2} + C$')
# Bottom plot: residuals
resids = decay - SineGaussDecay(t, A, B, C, tau, omega)
ax2 = fig.add_subplot(gs[1])
ax2.axhline(color="gray")
ax2.errorbar(t, resids, yerr = unc, ecolor="black", fmt="ro", ms=4)
ax2.set_xlabel('time (ms)')
ax2.set_ylabel('residuals')
ax2.set_ylim(-5, 5)
yticks = (-5, 0, 5)
ax2.set_yticks(yticks)
plt.savefig('FitOscDecay.pdf')
plt.show()
| cc0-1.0 | -4,827,395,123,857,624,000 | 30.985714 | 109 | 0.652077 | false |
FDewaleyne/rhns-utils | db-tools/rhns-package-infos.py | 1 | 3424 | #!/usr/bin/python
# script aimed at running a couple sql commands against satellite 5.6 to fetch info for a package we have ID from
__author__ = "Felix Dewaleyne"
__credits__ = ["Felix Dewaleyne"]
__license__ = "GPL"
__version__ = "0.3"
__maintainer__ = "Felix Dewaleyne"
__email__ = "[email protected]"
__status__ = "beta"
def package_details(packageid):
"""displays the details for that package id"""
#db access
import sys
sys.path.append("/usr/share/rhn")
try:
import spacewalk.common.rhnConfig as rhnConfig
import spacewalk.server.rhnSQL as rhnSQL
except ImportError:
try:
import common.rhnConfig as rhnConfig
import server.rhnSQL as rhnSQL
except ImportError:
print "Couldn't load the modules required to connect to the db"
sys.exit(1)
rhnConfig.initCFG()
rhnSQL.initDB()
query="""
select
rp.id as "package_id",
rpn.name||'-'||rpe.version||'-'||rpe.release||'.'||rpa.label as "package",
rc.label as "channel_label",
rc.id as "channel_id",
coalesce((select name from rhnpackageprovider rpp where rpp.id = rpk.provider_id),'Unknown') as "provider"
from rhnpackage rp
inner join rhnpackagename rpn on rpn.id = rp.name_id
inner join rhnpackageevr rpe on rpe.id = rp.evr_id
inner join rhnpackagearch rpa on rpa.id = rp.package_arch_id
left outer join rhnchannelpackage rcp on rcp.package_id = rp.id
left outer join rhnchannel rc on rc.id = rcp.channel_id
left outer join rhnpackagekeyassociation rpka on rpka.package_id = rp.id
left outer join rhnpackagekey rpk on rpk.id = rpka.key_id
where rp.id = :packageid
order by 2, 3
"""
cursor = rhnSQL.prepare(query)
cursor.execute(packageid=packageid)
rows = cursor.fetchall_dict()
if not rows is None:
c = 0
print "Package %d : %s" % (rows[0]['package_id'], rows[0]['package'])
pkg_channels = []
pkg_provider = []
for row in rows:
c += 1
if row.channel_id != None:
pkg_channels[row['channel_id']] = row['channel_label']
pkg_provider[row['channel_id']] = row['provider']
else:
pkg_channels[0] = "Not in a channel"
pkg_provider[0] = row['provider']
print "\r%s of %s" % (str(c), str(len(rows))),
print "Provided by channels : %s" % (', '.join(pkg_channels))
print "With providers (same order): %s" % (', '.join(pkg_provider))
else:
print "no package found for the id %d" % (packageid)
#the main function of the program
def main(versioninfo):
import optparse
parser = optparse.OptionParser(description="This script will output informations related to a specific package, using the database directly", version="%prog "+versioninfo)
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Enables debug output")
parser.add_option("-p", "--packageid",dest="packageid",type="int",action="store",help="the package ID to get info from")
(options, args) = parser.parse_args()
global verbose
verbose = options.verbose
if not options.packageid :
parser.error('A package ID is required.')
else:
package_details(options.packageid)
if __name__ == "__main__":
main(__version__)
| gpl-2.0 | -3,109,453,926,577,234,000 | 37.47191 | 175 | 0.622371 | false |
ChristopherRabotin/bungiesearch | bungiesearch/fields.py | 2 | 6140 | from django.template import Context, loader
from django.template.defaultfilters import striptags
from six import iteritems
from elasticsearch_dsl.analysis import Analyzer
class AbstractField(object):
'''
Represents an elasticsearch index field and values from given objects.
Currently does not support binary fields, but those can be created by manually providing a dictionary.
Values are extracted using the `model_attr` or `eval_as` attribute.
'''
meta_fields = ['_index', '_uid', '_type', '_id']
common_fields = ['index_name', 'store', 'index', 'boost', 'null_value', 'copy_to', 'type', 'fields']
@property
def fields(self):
try:
return self.fields
except:
raise NotImplementedError('Allowed fields are not defined.')
@property
def coretype(self):
try:
return self.coretype
except:
raise NotImplementedError('Core type is not defined!')
@property
def defaults(self):
'''
Stores default values.
'''
try:
return self.defaults
except:
return {}
def __init__(self, **args):
'''
Performs several checks to ensure that the provided attributes are valid. Will not check their values.
'''
if isinstance(self.coretype, list):
if 'coretype' not in args:
raise KeyError('{} can be represented as one of the following types: {}. Specify which to select as the `coretype` parameter.'.format(unicode(self), ', '.join(self.coretype)))
if args['coretype'] not in self.coretype:
raise KeyError('Core type {} is not supported by {}.'.format(args['coretype'], unicode(self)))
self.type = args.pop('coretype')
else:
self.type = self.coretype
self.model_attr = args.pop('model_attr', None)
self.eval_func = args.pop('eval_as', None)
self.template_name = args.pop('template', None)
for attr, value in iteritems(args):
if attr not in self.fields and attr not in AbstractField.common_fields:
raise KeyError('Attribute `{}` is not allowed for core type {}.'.format(attr, self.coretype))
setattr(self, attr, value)
for attr, value in iteritems(self.defaults):
if not hasattr(self, attr):
setattr(self, attr, value)
def value(self, obj):
'''
Computes the value of this field to update the index.
:param obj: object instance, as a dictionary or as a model instance.
'''
if self.template_name:
t = loader.select_template([self.template_name])
return t.render(Context({'object': obj}))
if self.eval_func:
try:
return eval(self.eval_func)
except Exception as e:
raise type(e)('Could not compute value of {} field (eval_as=`{}`): {}.'.format(unicode(self), self.eval_func, unicode(e)))
elif self.model_attr:
if isinstance(obj, dict):
return obj[self.model_attr]
current_obj = getattr(obj, self.model_attr)
if callable(current_obj):
return current_obj()
else:
return current_obj
else:
raise KeyError('{0} gets its value via a model attribute, an eval function, a template, or is prepared in a method '
'call but none of `model_attr`, `eval_as,` `template,` `prepare_{0}` is provided.'.format(unicode(self)))
def json(self):
json = {}
for attr, val in iteritems(self.__dict__):
if attr in ('eval_func', 'model_attr', 'template_name'):
continue
elif attr in ('analyzer', 'index_analyzer', 'search_analyzer') and isinstance(val, Analyzer):
json[attr] = val.to_dict()
else:
json[attr] = val
return json
# All the following definitions could probably be done with better polymorphism.
class StringField(AbstractField):
coretype = 'string'
fields = ['doc_values', 'term_vector', 'norms', 'index_options', 'analyzer', 'index_analyzer', 'search_analyzer', 'include_in_all', 'ignore_above', 'position_offset_gap', 'fielddata', 'similarity']
defaults = {'analyzer': 'snowball'}
def value(self, obj):
val = super(StringField, self).value(obj)
if val is None:
return None
return striptags(val)
def __unicode__(self):
return 'StringField'
class NumberField(AbstractField):
coretype = ['float', 'double', 'byte', 'short', 'integer', 'long']
fields = ['doc_values', 'precision_step', 'include_in_all', 'ignore_malformed', 'coerce']
def __unicode__(self):
return 'NumberField'
class DateField(AbstractField):
coretype = 'date'
fields = ['format', 'doc_values', 'precision_step', 'include_in_all', 'ignore_malformed']
def __unicode__(self):
return 'DateField'
class BooleanField(AbstractField):
coretype = 'boolean'
fields = [] # No specific fields.
def __unicode__(self):
return 'BooleanField'
# Correspondence between a Django field and an elasticsearch field.
def django_field_to_index(field, **attr):
'''
Returns the index field type that would likely be associated with each Django type.
'''
dj_type = field.get_internal_type()
if dj_type in ('DateField', 'DateTimeField'):
return DateField(**attr)
elif dj_type in ('BooleanField', 'NullBooleanField'):
return BooleanField(**attr)
elif dj_type in ('DecimalField', 'FloatField'):
return NumberField(coretype='float', **attr)
elif dj_type in ('PositiveSmallIntegerField', 'SmallIntegerField'):
return NumberField(coretype='short', **attr)
elif dj_type in ('IntegerField', 'PositiveIntegerField', 'AutoField'):
return NumberField(coretype='integer', **attr)
elif dj_type in ('BigIntegerField'):
return NumberField(coretype='long', **attr)
return StringField(**attr)
| bsd-3-clause | 185,962,642,136,293,570 | 36.212121 | 201 | 0.605537 | false |
tsauerwein/c2cgeoportal | c2cgeoportal/scaffolds/update/CONST_alembic/versions/20137477bd02_update_icons_url.py | 2 | 3713 | # -*- coding: utf-8 -*-
# Copyright (c) 2011-2014, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
"""Update icons url
Revision ID: 20137477bd02
Revises: 415746eb9f6
Create Date: 2014-12-10 17:50:36.176587
"""
# revision identifiers, used by Alembic.
revision = '20137477bd02'
down_revision = '1d5d4abfebd1'
from alembic import op, context
def upgrade():
schema = context.get_context().config.get_main_option('schema')
updates = [
"UPDATE %(schema)s.%(table)s SET %(column)s = 'static:///' || %(column)s "
"WHERE (%(column)s IS NOT NULL) AND (NOT %(column)s = '') "
"AND NOT (%(column)s LIKE 'http%%') ""AND NOT (%(column)s LIKE '/%%')",
"UPDATE %(schema)s.%(table)s SET %(column)s = 'static://' || %(column)s "
"WHERE (%(column)s IS NOT NULL) AND (NOT %(column)s = '') "
"AND NOT (%(column)s LIKE 'http%%') AND NOT (%(column)s LIKE 'static://%%')",
]
for update in updates:
op.execute(update % {
"schema": schema, "table": "theme", "column": "icon"
})
op.execute(update % {
"schema": schema, "table": "layerv1", "column": "icon"
})
op.execute(update % {
"schema": schema, "table": "layerv1", "column": "kml"
})
op.execute(update % {
"schema": schema, "table": "layerv1", "column": "legend_image"
})
def downgrade():
schema = context.get_context().config.get_main_option('schema')
updates = [
"UPDATE %(schema)s.%(table)s SET %(column)s = substring(%(column)s from 11) "
"WHERE %(column)s LIKE 'static:///%%'",
"UPDATE %(schema)s.%(table)s SET %(column)s = substring(%(column)s from 10) "
"WHERE %(column)s LIKE 'static://%%'",
]
for update in updates:
op.execute(update % {
"schema": schema, "table": "theme", "column": "icon"
})
op.execute(update % {
"schema": schema, "table": "layerv1", "column": "icon"
})
op.execute(update % {
"schema": schema, "table": "layerv1", "column": "kml"
})
op.execute(update % {
"schema": schema, "table": "layerv1", "column": "legend_image"
})
| bsd-2-clause | -558,641,575,320,640,640 | 39.358696 | 85 | 0.640991 | false |
notfoundsam/raspberry | app/__init__.py | 1 | 15732 | import os, functools, json, logging
from flask import Flask, redirect, session, request, g, jsonify, make_response, abort, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, login_user, logout_user, current_user, login_required
from flask_socketio import SocketIO, emit
from flask_cors import CORS
from flask_migrate import Migrate
from app.bootstrap import Config
from threading import Lock
flask_app = Flask(__name__)
config = Config(flask_app)
CORS(flask_app, supports_credentials=True)
db = SQLAlchemy(flask_app)
mg = Migrate(flask_app, db)
so = SocketIO(flask_app)
lm = LoginManager()
lm.init_app(flask_app)
from app import service
from app.helpers import RcHelper, ButtonHelper, NodeHelper, ArduinoHelper, RadioHelper
from app.models import User
serv = service.Service(config)
@flask_app.before_first_request
def activate_services():
serv.activateDiscoverService()
serv.activateNodeService()
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@lm.unauthorized_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized'}), 401)
@flask_app.before_request
def before_request():
g.user = current_user
@flask_app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@flask_app.errorhandler(400)
def not_found(error):
return make_response(jsonify({'error': 'Bad request'}), 400)
@flask_app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(flask_app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@flask_app.route('/api/v1/login', methods=['POST'])
def login():
if g.user is not None and g.user.is_authenticated:
return jsonify({'result': True})
if request.json and 'username' in request.json and 'password' in request.json:
username = request.json['username']
password = request.json['password']
user = User.query.filter_by(username=username).first()
if user is not None and user.password == password:
session['remember_me'] = True
login_user(user)
return jsonify({'result': True})
return jsonify({'result': False}), 403
@flask_app.route('/api/v1/logout', methods=['GET'])
def logout():
logout_user()
return jsonify({'result': True})
@login_required
@flask_app.route('/api/v1/user', methods=['GET'])
def get_user():
return jsonify({'username': g.user.username})
# Rc routes
@flask_app.route('/api/v1/rcs', methods=['GET'])
@login_required
def get_rcs():
rch = RcHelper()
return jsonify({'rcs': rch.getRcs()})
@flask_app.route('/api/v1/rcs', methods=['POST'])
@login_required
def create_rc():
if not request.json or not 'name' in request.json or not 'icon' in request.json or not 'order' in request.json or not 'public' in request.json:
abort(400)
rch = RcHelper()
rc = rch.createRc(request.json)
so.emit('updateRcs', {'rcs': rch.getRcs()}, broadcast=True)
return jsonify({'rc': rc}), 201
@flask_app.route('/api/v1/rcs/<int:rc_id>', methods=['GET'])
@login_required
def get_rc(rc_id):
rch = RcHelper(rc_id)
rc = rch.getRc()
if rc is None:
abort(404)
return jsonify({'rc': rc})
@flask_app.route('/api/v1/rcs/<int:rc_id>', methods=['PUT'])
@login_required
def update_rc(rc_id):
rch = RcHelper(rc_id)
if not request.json or not 'name' in request.json or not 'icon' in request.json or not 'order' in request.json or not 'public' in request.json:
abort(400)
rc = rch.updateRc(request.json)
if rc is None:
abort(404)
so.emit('updateRcs', {'rcs': rch.getRcs()}, broadcast=True)
return jsonify({'rc': rc})
@flask_app.route('/api/v1/rcs/<int:rc_id>', methods=['DELETE'])
@login_required
def delete_rc(rc_id):
rch = RcHelper(rc_id)
result = rch.deleteRc()
if result is None:
abort(404)
so.emit('updateRcs', {'rcs': rch.getRcs()}, broadcast=True)
return jsonify({'result': result})
@flask_app.route('/api/v1/rcs/<int:rc_id>/buttons', methods=['GET'])
@login_required
def get_rc_buttons(rc_id):
rch = RcHelper(rc_id)
buttons = rch.getButtons()
if buttons is None:
abort(404)
return jsonify({'buttons': buttons})
# Button routes
@flask_app.route('/api/v1/buttons', methods=['POST'])
@login_required
def create_button():
bh = ButtonHelper()
if not request.json or not 'rc_id' in request.json or not 'name' in request.json or not 'order_hor' in request.json or not 'order_ver' in request.json or not 'color' in request.json or not 'message' in request.json or not 'type' in request.json or not 'radio_id' in request.json:
abort(400)
button = bh.createButton(request.json)
if button is None:
abort(404)
rch = RcHelper(request.json['rc_id'])
so.emit('updateButtons', {'rc_id': button['rc_id'], 'buttons': rch.getButtons()}, broadcast=True)
return jsonify({'button': button}), 201
@flask_app.route('/api/v1/buttons/<int:btn_id>', methods=['GET'])
@login_required
def get_button(btn_id):
bh = ButtonHelper(btn_id)
button = bh.getButton()
if button is None:
abort(404)
return jsonify({'button': button})
@flask_app.route('/api/v1/buttons/<int:btn_id>', methods=['PUT'])
@login_required
def update_button(btn_id):
bh = ButtonHelper(btn_id)
if not request.json or not 'name' in request.json or not 'order_hor' in request.json or not 'order_ver' in request.json or not 'color' in request.json or not 'message' in request.json or not 'type' in request.json or not 'radio_id' in request.json:
abort(400)
button = bh.updateButton(request.json)
if button is None:
abort(404)
rch = RcHelper(bh.get().rc_id)
so.emit('updateButtons', {'rc_id': button['rc_id'], 'buttons': rch.getButtons()}, broadcast=True)
return jsonify({'button': button})
@flask_app.route('/api/v1/buttons/<int:btn_id>', methods=['DELETE'])
@login_required
def delete_button(btn_id):
bh = ButtonHelper(btn_id)
button = bh.deleteButton()
if button is None:
abort(404)
so.emit('updateButtons', {'rc_id': button['rc_id'], 'buttons': bh.getButtons()}, broadcast=True)
return jsonify({'result': True})
@flask_app.route('/api/v1/buttons/<int:btn_id>/push', methods=['GET'])
@login_required
def push_button(btn_id):
bh = ButtonHelper(btn_id)
button = bh.getButton()
if bh.get() is None:
abort(404)
if bh.get().type == 'radio':
logging.info(bh.getHostName())
event = {
'event': 'pushButton',
'user_id': g.user.id,
'button_id': bh.get().id,
'host_name': bh.getHostName()
}
result = serv.node_sevice.pushToNode(event)
return jsonify({'result': result})
# Node routes
@flask_app.route('/api/v1/nodes', methods=['GET'])
@login_required
def get_nodes():
nh = NodeHelper()
return jsonify({'nodes': nh.getNodes()})
@flask_app.route('/api/v1/nodes', methods=['POST'])
@login_required
def create_node():
if not request.json or not 'name' in request.json or not 'host_name' in request.json or not 'order' in request.json:
abort(400)
nh = NodeHelper()
node = nh.createNode(request.json)
so.emit('updateNodes', {'nodes': nh.getNodes()}, broadcast=True)
return jsonify({'node': node}), 201
@flask_app.route('/api/v1/nodes/<int:node_id>', methods=['GET'])
@login_required
def get_node(node_id):
nh = NodeHelper(node_id)
node = nh.getNode()
if node is None:
abort(404)
return jsonify({'node': node})
@flask_app.route('/api/v1/nodes/<int:node_id>', methods=['PUT'])
@login_required
def update_node(node_id):
nh = NodeHelper(node_id)
if not request.json or not 'host_name' in request.json or not 'order' in request.json:
abort(400)
node = nh.updateNode(request.json)
if node is None:
abort(404)
so.emit('updateNodes', {'nodes': nh.getNodes()}, broadcast=True)
return jsonify({'node': node})
@flask_app.route('/api/v1/nodes/<int:node_id>', methods=['DELETE'])
@login_required
def delete_node(node_id):
nh = NodeHelper(node_id)
result = nh.deleteNode()
if result is None:
abort(404)
so.emit('updateNodes', {'nodes': nh.getNodes()}, broadcast=True)
return jsonify({'result': result})
# Arduino routes
@flask_app.route('/api/v1/arduinos', methods=['GET'])
@login_required
def get_arduinos():
ah = ArduinoHelper()
arduinos = ah.getArduinos()
if arduinos is None:
abort(404)
return jsonify({'arduinos': arduinos})
@flask_app.route('/api/v1/arduinos', methods=['POST'])
@login_required
def create_arduino():
ah = ArduinoHelper()
if not request.json or not 'usb' in request.json or not 'node_id' in request.json or not 'name' in request.json or not 'order' in request.json:
abort(400)
arduino = ah.createArduino(request.json)
if arduino is None:
abort(404)
so.emit('updateArduinos', {'node_id': arduino['node_id'], 'arduinos': ah.getArduinos()}, broadcast=True)
event = {
'event': 'restart',
'host_name': ah.getNode().host_name
}
if serv.node_sevice.pushToNode(event) == False:
pass
# so.emit('recievedIr', {'result': 'error', 'errors': 'Node is offline'})
return jsonify({'arduino': arduino}), 201
@flask_app.route('/api/v1/arduinos/<int:arduino_id>', methods=['GET'])
@login_required
def get_arduino(arduino_id):
ah = ArduinoHelper(arduino_id)
arduino = ah.getArduino()
if arduino is None:
abort(404)
return jsonify({'arduino': arduino})
@flask_app.route('/api/v1/arduinos/<int:arduino_id>', methods=['PUT'])
@login_required
def update_arduino(arduino_id):
ah = ArduinoHelper(arduino_id)
if not request.json or not 'usb' in request.json or not 'node_id' in request.json or not 'name' in request.json or not 'order' in request.json:
abort(400)
arduino = ah.updateArduino(request.json)
if arduino is None:
abort(404)
so.emit('updateArduinos', {'node_id': arduino['node_id'], 'arduinos': ah.getArduinos()}, broadcast=True)
event = {
'event': 'restart',
'host_name': ah.getNode().host_name
}
if serv.node_sevice.pushToNode(event) == False:
pass
# so.emit('recievedIr', {'result': 'error', 'errors': 'Node is offline'})
return jsonify({'arduino': arduino})
@flask_app.route('/api/v1/arduinos/<int:arduino_id>', methods=['DELETE'])
@login_required
def delete_arduino(arduino_id):
ah = ArduinoHelper(arduino_id)
host_name = ah.getNode().host_name
arduino = ah.deleteArduino()
if arduino is None:
abort(404)
so.emit('updateArduinos', {'node_id': arduino['node_id'], 'arduinos': ah.getArduinos()}, broadcast=True)
event = {
'event': 'restart',
'host_name': host_name
}
if serv.node_sevice.pushToNode(event) == False:
pass
# so.emit('recievedIr', {'result': 'error', 'errors': 'Node is offline'})
return jsonify({'result': True})
# Radio routes
@flask_app.route('/api/v1/radios', methods=['GET'])
@login_required
def get_radios():
rh = RadioHelper()
return jsonify({'radios': rh.getRadios()})
@flask_app.route('/api/v1/radios', methods=['POST'])
@login_required
def create_radio():
if not request.json or not 'arduino_id' in request.json or not 'type' in request.json or not 'name' in request.json or not 'pipe' in request.json or not 'order' in request.json or not 'on_request' in request.json or not 'expired_after' in request.json or not 'enabled' in request.json:
abort(400)
rh = RadioHelper()
radio = rh.createRadio(request.json)
so.emit('updateRadios', {'radios': rh.getRadios()}, broadcast=True)
return jsonify({'radio': radio}), 201
@flask_app.route('/api/v1/radios/<int:radio_id>', methods=['GET'])
@login_required
def get_radio(radio_id):
rh = RadioHelper(radio_id)
radio = rh.getRadio()
if radio is None:
abort(404)
return jsonify({'radio': radio})
@flask_app.route('/api/v1/radios/<int:radio_id>', methods=['PUT'])
@login_required
def update_radio(radio_id):
rh = RadioHelper(radio_id)
if not request.json or not 'pipe' in request.json or not 'name' in request.json or not 'enabled' in request.json or not 'order' in request.json:
abort(400)
radio = rh.updateRadio(request.json)
if radio is None:
abort(404)
so.emit('updateRadios', {'radios': rh.getRadios()}, broadcast=True)
return jsonify({'radio': radio})
@flask_app.route('/api/v1/radios/<int:radio_id>', methods=['DELETE'])
@login_required
def delete_radio(radio_id):
rh = RadioHelper(radio_id)
result = rh.deleteRadio()
if result is None:
abort(404)
so.emit('updateRadios', {'radios': rh.getRadios()}, broadcast=True)
return jsonify({'result': result})
#############
# Socket io #
#############
def authenticated_only(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not current_user.is_authenticated:
disconnect()
else:
return f(*args, **kwargs)
return wrapped
@so.on('connect')
def handle_connect():
id = request.sid
logging.info("%s socket connected" % id)
emit('customEmit', {'data': 'Connected', 'count': 0}, broadcast=True)
@so.on('json')
@authenticated_only
def handle_json(data):
# Debug
logging.info("received json: %s" % data)
# if data['action'] == 'catch_ir_signal':
# # signal = ir_reader.read_signal()
# emit('json', {'response': {'result': 'success', 'callback': 'ir_signal', 'signal': '1500 800 800 800 1500 1500'}})
# thread = None
# thread_lock = Lock()
# def background_thread():
# """Example of how to send server generated events to clients."""
# count = 0
# # status = True
# while True:
# so.sleep(20)
# count += 1
# so.emit('test', {'count': count}, broadcast=True)
# if status:
# status = False
# event = {
# 'event': 'stop',
# 'host_name': 'rpi-node-1'
# }
# else:
# status = True
# event = {
# 'event': 'start',
# 'host_name': 'rpi-node-1'
# }
# event = {
# 'event': 'restart',
# 'host_name': 'rpi-node-1'
# }
# if serv.node_sevice.pushToNode(event) == False:
# pass
# so.emit('recievedIr', {'result': 'error', 'errors': 'Node is offline'})
# nh = NodeHelper()
# so.emit('updateNodes', {'nodes': nh.getNodes()}, broadcast=True)
# @so.on('connect')
# def test_connect():
# global thread
# with thread_lock:
# if thread is None:
# thread = so.start_background_task(target=background_thread)
# so.emit('test', ('foo', 'bar'), broadcast=True)
@so.on('catch_ir')
def handle_catch_ir(json_data):
data = json.loads(json_data)
if 'node_id' in data:
node = NodeHelper(data['node_id']).get()
if node is not None:
event = {
'event': 'catchIr',
'user_id': current_user.id,
'host_name': node.host_name
}
if serv.node_sevice.pushToNode(event) == False:
so.emit('recievedIr', {'result': 'error', 'errors': 'Node is offline'})
@so.on('emit_method')
def handle_emit_method(message):
logging.info("received emit_method: %s" % message)
| unlicense | -6,534,579,340,163,614,000 | 28.627119 | 289 | 0.624142 | false |
martyone/sailfish-qtcreator | scripts/packageIfw.py | 6 | 5571 | #!/usr/bin/env python
################################################################################
# Copyright (C) 2015 The Qt Company Ltd
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of The Qt Company Ltd, nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
import os
import sys
import datetime
import getopt
import subprocess
import fnmatch
import tempfile
import shutil
import inspect
def usage():
print 'Usage: %s [-v|--version-string=versionstring] [-i|--installer-path=/path/to/installerfw] [-a|--archive=archive.7z] <outputname>' % os.path.basename(sys.argv[0])
def substitute_file(infile, outfile, substitutions):
with open(infile, 'r') as f:
template = f.read()
with open(outfile, 'w') as f:
f.write(template.format(**substitutions))
def ifw_template_dir():
script_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))
source_dir = os.path.normpath(os.path.join(script_dir, '..'));
return os.path.normpath(os.path.join(source_dir, 'dist', 'installer', 'ifw'))
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hv:i:a:', ['help', 'version-string=', 'installer-path=', 'archive'])
except:
usage()
sys.exit(2)
if len(args) < 1:
usage()
sys.exit(2)
version = ''
ifw_location = ''
archive = ''
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-v', '--version-string'):
version = a
if o in ('-i', '--installer-path'):
ifw_location = a
if o in ('-a', '--archive'):
archive = a
if (version == ''):
raise Exception('Version not specified (--version-string)!')
if (ifw_location == ''):
raise Exception('Installer framework location not specified (--installer-path)!')
if (archive == ''):
raise Exception('Archive not specified (--archive)!')
installer_name = args[0]
config_postfix = ''
if sys.platform == 'darwin':
config_postfix = '-mac'
if sys.platform.startswith('win'):
config_postfix = '-windows'
if sys.platform.startswith('linux'):
config_postfix = '-linux'
installer_name = installer_name + '.run'
config_name = 'config' + config_postfix + '.xml'
try:
temp_dir = tempfile.mkdtemp()
except:
raise Exception('Failed to create a temporary directory!')
try:
substs = {}
substs['version'] = version
substs['date'] = datetime.date.today().isoformat()
template_dir = ifw_template_dir()
out_config_dir = os.path.join(temp_dir,'config')
out_packages_dir = os.path.join(temp_dir, 'packages')
shutil.copytree(os.path.join(template_dir, 'packages'), os.path.join(temp_dir, 'packages'))
shutil.copytree(os.path.join(template_dir, 'config'), os.path.join(temp_dir, 'config'))
for root, dirnames, filenames in os.walk(out_packages_dir):
for template in fnmatch.filter(filenames, '*.in'):
substitute_file(os.path.join(root, template), os.path.join(root, template[:-3]), substs)
os.remove(os.path.join(root, template))
for root, dirnames, filenames in os.walk(out_config_dir):
for template in fnmatch.filter(filenames, '*.in'):
substitute_file(os.path.join(root, template), os.path.join(root, template[:-3]), substs)
os.remove(os.path.join(root, template))
data_path = os.path.join(out_packages_dir, 'org.qtproject.qtcreator.application', 'data')
if not os.path.exists(data_path):
os.makedirs(data_path)
shutil.copy(archive, data_path)
ifw_call = [os.path.join(ifw_location, 'bin', 'binarycreator'), '-c', os.path.join(out_config_dir, config_name), '-p', out_packages_dir, installer_name, '--offline-only' ]
subprocess.check_call(ifw_call, stderr=subprocess.STDOUT)
finally:
print 'Cleaning up...'
shutil.rmtree(temp_dir)
print 'Done.'
if __name__ == '__main__':
main()
| lgpl-2.1 | -3,399,315,499,870,055,400 | 38.792857 | 179 | 0.630228 | false |
rgs1/zktraffic | zktraffic/endpoints/stats_server.py | 2 | 3534 | # ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import multiprocessing
from zktraffic.base.process import ProcessOptions
from zktraffic.stats.loaders import QueueStatsLoader
from zktraffic.stats.accumulators import (
PerAuthStatsAccumulator,
PerIPStatsAccumulator,
PerPathStatsAccumulator,
)
from .endpoints_server import EndpointsServer
from twitter.common.http import HttpServer
class StatsServer(EndpointsServer):
def __init__(self,
iface,
zkport,
aggregation_depth,
max_results=EndpointsServer.MAX_RESULTS,
max_reqs=400000,
max_reps=400000,
max_events=400000,
start_sniffer=True,
timer=None,
sampling=1.0,
include_bytes=True):
# Forcing a load of the multiprocessing module here
# seem to be hitting http://bugs.python.org/issue8200
multiprocessing.current_process().name
self._max_results = max_results
self._stats = QueueStatsLoader(max_reqs, max_reps, max_events, timer)
self._stats.register_accumulator(
'per_path', PerPathStatsAccumulator(aggregation_depth, include_bytes))
self._stats.register_accumulator(
'per_ip', PerIPStatsAccumulator(aggregation_depth, include_bytes))
self._stats.register_accumulator(
'per_auth', PerAuthStatsAccumulator(aggregation_depth, include_bytes))
self._stats.start()
super(StatsServer, self).__init__(
iface,
zkport,
self._stats.handle_request,
self._stats.handle_reply,
self._stats.handle_event,
start_sniffer,
sampling=sampling)
def wakeup(self):
self._stats.wakeup()
@property
def has_stats(self):
return len(self._get_stats('per_path')) > 0
def _get_stats(self, name, prefix=''):
stats_by_opname = self._stats.stats(name, self._max_results)
stats = {}
for opname, opstats in stats_by_opname.items():
for path, value in opstats.items():
stats["%s%s%s" % (prefix, opname, path)] = value
return stats
@HttpServer.route("/json/paths")
def json_paths(self):
return self._get_stats('per_path')
@HttpServer.route("/json/ips")
def json_ips(self):
return self._get_stats('per_ip', 'per_ip/')
@HttpServer.route("/json/auths")
def json_auths(self):
return self._get_stats('per_auth', 'per_auth/')
@HttpServer.route("/json/auths-dump")
def json_auths_dump(self):
return self._stats.auth_by_client
@HttpServer.route("/json/info")
def json_info(self):
""" general info about this instance """
proc = ProcessOptions()
return {
"uptime": proc.uptime
}
| apache-2.0 | 6,366,073,485,425,913,000 | 30.553571 | 100 | 0.614035 | false |
QingChenmsft/azure-cli | src/command_modules/azure-cli-extension/azure/cli/command_modules/extension/tests/test_extension_commands.py | 3 | 2972 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import tempfile
import unittest
import shutil
import mock
from azure.cli.core.util import CLIError
from azure.cli.command_modules.extension.custom import (list_extensions, add_extension, show_extension,
remove_extension, OUT_KEY_NAME)
def _get_test_data_file(filename):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', filename)
MY_EXT_NAME = 'myfirstcliextension'
MY_EXT_SOURCE = _get_test_data_file('myfirstcliextension-0.0.3+dev-py2.py3-none-any.whl')
MY_BAD_EXT_SOURCE = _get_test_data_file('notanextension.txt')
class TestExtensionCommands(unittest.TestCase):
def setUp(self):
self.ext_dir = tempfile.mkdtemp()
self.patcher = mock.patch('azure.cli.core.extension.EXTENSIONS_DIR', self.ext_dir)
self.patcher.start()
def tearDown(self):
self.patcher.stop()
shutil.rmtree(self.ext_dir, ignore_errors=True)
def test_no_extensions_dir(self):
shutil.rmtree(self.ext_dir)
actual = list_extensions()
self.assertEqual(len(actual), 0)
def test_no_extensions_in_dir(self):
actual = list_extensions()
self.assertEqual(len(actual), 0)
def test_add_list_show_remove_extension(self):
add_extension(MY_EXT_SOURCE)
actual = list_extensions()
self.assertEqual(len(actual), 1)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME)
remove_extension(MY_EXT_NAME)
num_exts = len(list_extensions())
self.assertEqual(num_exts, 0)
def test_add_extension_twice(self):
add_extension(MY_EXT_SOURCE)
num_exts = len(list_extensions())
self.assertEqual(num_exts, 1)
with self.assertRaises(CLIError):
add_extension(MY_EXT_SOURCE)
def test_add_extension_invalid(self):
with self.assertRaises(ValueError):
add_extension(MY_BAD_EXT_SOURCE)
actual = list_extensions()
self.assertEqual(len(actual), 0)
def test_add_extension_invalid_whl_name(self):
with self.assertRaises(CLIError):
add_extension(os.path.join('invalid', 'ext', 'path', 'file.whl'))
actual = list_extensions()
self.assertEqual(len(actual), 0)
def test_add_extension_valid_whl_name_filenotfound(self):
with self.assertRaises(CLIError):
add_extension(_get_test_data_file('mywheel-0.0.3+dev-py2.py3-none-any.whl'))
actual = list_extensions()
self.assertEqual(len(actual), 0)
if __name__ == '__main__':
unittest.main()
| mit | -2,684,690,568,069,361,000 | 34.807229 | 103 | 0.610363 | false |
RedhawkSDR/integration-gnuhawk | components/vector_to_streams_bb_1o/tests/test_vector_to_streams_bb_1o.py | 1 | 4089 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of GNUHAWK.
#
# GNUHAWK is free software: you can redistribute it and/or modify is under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/.
#
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in vector_to_streams_bb_1o"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../vector_to_streams_bb_1o.spd.xml") # By default tests all implementations
| gpl-3.0 | 7,337,619,982,029,532,000 | 47.105882 | 133 | 0.593055 | false |
codedude/pySequence | src/Referentiel.py | 1 | 81736 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##This file is part of pySequence
#############################################################################
#############################################################################
## ##
## Referentiel ##
## ##
#############################################################################
#############################################################################
## Copyright (C) 2014 Cédrick FAURY - Jean-Claude FRICOU
# pySequence is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# pySequence is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pySequence; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
Created on 15-06-2014
@author: Cedrick
'''
import xlrd
from xlrd import open_workbook
import constantes
import os
import wx # Juste pour la fonction GetBitmap()
# Pour enregistrer en xml
import xml.etree.ElementTree as ET
Element = type(ET.Element(None))
import util_path
#########################################################################################
DOSSIER_REF = os.path.join(util_path.PATH, r"..", r"referentiels")
REFERENTIELS = {}
ARBRE_REF = {}
###########################################################
def int0(txt):
try:
return int(txt)
except:
return 0
###########################################################
def includeElem(pl, gl):
""" Teste si un élément de la petite liste <pl>
est inclu dans la grande liste <gl>
"""
for e in pl:
if e in gl: return True
return False
class XMLelem():
######################################################################################
def getBranche(self, nomb = ""):
""" Construction et renvoi d'une branche XML
(enregistrement de fichier)
"""
if nomb != "":
nomb = "_" + nomb
ref = ET.Element(str(self._codeXML+nomb))
def sauv(branche, val, nom = None):
nom = nom.replace("\n", "--")
if type(val) == str or type(val) == unicode:
branche.set("S_"+nom, val.replace("\n", "--"))
elif type(val) == int:
branche.set("I_"+nom, str(val))
elif type(val) == long:
branche.set("L_"+nom, str(val))
elif type(val) == float:
branche.set("F_"+nom, str(val))
elif type(val) == bool:
branche.set("B_"+nom, str(val))
elif type(val) == list:
sub = ET.SubElement(branche, "l_"+nom)
for i, sv in enumerate(val):
sauv(sub, sv, format(i, "02d"))
elif type(val) == dict:
sub = ET.SubElement(branche, "d_"+nom)
for k, sv in val.items():
if type(k) != str and type(k) != unicode:
k = "_"+format(k, "02d")
sauv(sub, sv, k)
elif isinstance(val, XMLelem):
branche.append(val.getBranche(nom))
for attr in dir(self):
if attr[0] != "_":
val = getattr(self, attr)
sauv(ref, val, attr)
return ref
######################################################################################
def setBranche(self, branche):
""" Lecture de la branche XML
(ouverture de fichier)
"""
# print "setBranche", self._codeXML
nomerr = []
def lect(branche, nom = ""):
if nom[:2] == "S_":
return unicode(branche.get(nom)).replace(u"--", u"\n")
elif nom[:2] == "I_":
if branche.get(nom) == None: # Pour passage 6.0-beta19 à beta20
nomerr.append(nom)
return 0
return int(branche.get(nom))
elif nom[:2] == "L_":
return long(branche.get(nom))
elif nom[:2] == "F_":
return float(branche.get(nom))
elif nom[:2] == "B_":
if branche.get(nom) == None: # Pour corriger un bug (version <=5.0beta3)
nomerr.append(nom)
return False
return branche.get(nom)[0] == "T"
elif nom[:2] == "l_":
sbranche = branche.find(nom)
if sbranche == None: return []
dic = {}
for k, sb in sbranche.items():
_k = k[2:]
if isinstance(_k, (str, unicode)) and "--" in _k:
_k = _k.replace("--", "\n")
dic[_k] = lect(sbranche, k)
for sb in list(sbranche):
k = sb.tag
_k = k[2:]
if isinstance(_k, (str, unicode)) and "--" in _k:
_k = _k.replace("--", "\n")
dic[_k] = lect(sbranche, k)
# print dic.values()
liste = [dic[v] for v in sorted(dic)]
# print " >", liste
return liste
# liste = [lect(sbranche, k) for k, sb in sbranche.items()]
# return liste + [lect(sb, k) for k, sb in list(sbranche)]
elif nom[:2] == "d_":
sbranche = branche.find(nom)
d = {}
if sbranche != None:
for k, sb in sbranche.items():
# print k, sb
# _k = k[2:]
_k = k.split("_")[1]
if isinstance(_k, (str, unicode)) and "--" in _k:
_k = _k.replace("--", "\n")
d[_k] = lect(sbranche, k)
for sb in list(sbranche):
k = sb.tag
# _k = k[2:]
_k = k.split("_")#[1]
if len(_k) == 3:#k =="":#_k[0] == "_":
_k = eval(_k[2])
else:
_k = _k[1]
if isinstance(_k, (str, unicode)) and "--" in _k:
_k = _k.replace("--", "\n")
d[_k] = lect(sbranche, k)
return d
elif nom.split("_")[0] == "Indicateur":
sbranche = branche.find(nom)
indic, err = Indicateur().setBranche(sbranche)
nomerr.extend(err)
return indic
elif nom.split("_")[0] == "Projet":
sbranche = branche.find(nom)
proj, err = Projet().setBranche(sbranche)
nomerr.extend(err)
return proj
for attr in dir(self):
if attr[0] != "_":
val = getattr(self, attr)
if type(val) == str or type(val) == unicode:
_attr = "S_"+attr
elif type(val) == int:
_attr = "I_"+attr
elif type(val) == long:
_attr = "L_"+attr
elif type(val) == float:
_attr = "F_"+attr
elif type(val) == bool:
_attr = "B_"+attr
elif type(val) == list:
_attr = "l_"+attr
elif type(val) == dict:
_attr = "d_"+attr
else:
_attr = None
if _attr != None:
v = lect(branche, _attr.replace("\n", "--"))
setattr(self, attr, v)
return self, nomerr
######################################################################################
def __eq__(self, ref):
""" Comparaison de deux référentiels
"""
if not isinstance(ref, type(self)):
return False
def egal(val1, val2):
if isinstance(val1, (str, unicode)) and isinstance(val2, (str, unicode)):
# if val1 != val2:#.replace("\n", "--"):
# print "Erreur s : xml =", val1, " xls =", val2#.replace("\n", "--")
return val1 == val2#.replace("\n", "--")
elif isinstance(val1, (int, long, float)) and isinstance(val2, (int, long, float)):
# if val1 != val2:
# print "Erreur : xml =", val1, " xls =", val2
return val1 == val2
elif type(val1) == bool and type(val2) == bool:
# if val1 != val2:
# print "Erreur : xml =", val1, " xls =", val2
return val1 == val2
elif type(val1) == list:
if len(val1) != len(val2):
# print "Erreur : xml =", val1, " xls =", val2
return False
e = True
for sval1, sval2 in zip(val1, val2):
e = e and egal(sval1, sval2)
return e
elif type(val1) == dict and type(val2) == dict:
if not egal(sorted(val1), sorted(val2)):
# print "Erreur : xml =", val1, " xls =", val2
return False
e = True
for k, v in val1.items():
# if isinstance(k, (str, unicode)):
# k = k.replace("--", "\n")
e = e and egal(v, val2[k])
return e
elif isinstance(val1, XMLelem) and isinstance(val2, XMLelem):
return val1 == val2
else:
# print "Erreur : xml =", val1, " xls =", val2
return False
for attr in dir(self):
if attr[0] != "_":
val1 = getattr(self, attr)
if isinstance(val1, (str, unicode, int, long, float, bool, list, dict, XMLelem)) :
val2 = getattr(ref, attr)
if not egal(val1, val2):
print "Différence"
print " ", attr
print " ", val1
print " ", val2
break
return False
return True
###########################################################
def normaliserPoids(self, dic, debug = False):
for k0, v0 in dic.items():
if len(v0) > 2:
# print self.parties.keys()
tot = {}
for p in self.parties.keys():
tot[p] = 0
if type(v0[1]) == dict :
lstindic = []
for v1 in v0[1].values():
for ii in v1[1]:
lstindic.append(ii)
else:
lstindic = v0[1]
if debug: print " ", lstindic
for indic in lstindic:
for part, poids in indic.poids.items():
if part in tot.keys():
tot[part] = tot[part] + poids
if debug: print " tot", tot
coef = {}
for p in self.parties.keys():
coef[p] = 1.0*tot[p]/100
if debug: print " coef", coef
for indic in lstindic:
for part, poids in indic.poids.items():
if part in coef.keys() and coef[part] != 0:
indic.poids[part] = round(indic.poids[part] / coef[part], 6)
###########################################################
def getPremierEtDernierNiveauArbre(self, dic):
sdic = {}
for k0, v0 in dic.items():
if len(v0) > 1 and type(v0[1]) == dict:
if len(v0) == 3: # premier niveau = [intitule, dict ou liste, poids]
sdic[k0] = [v0[0], self.getDernierNiveauArbre(v0[1]), v0[2]]
else:
sdic.update(self.getDernierNiveauArbre(v0[1]))
else:
sdic[k0] = v0
return sdic
###########################################################
def getArbreProjet(self, dic, prj = None, debug = False):
# print "getArbreProjet", self.parties.keys()
sdic = {}
for k0, v0 in dic.items():
if debug: print k0
if len(v0) > 1 and type(v0[1]) == dict:
if debug: print " ", v0
if len(v0) == 2:
sdic[k0] = [v0[0], self.getArbreProjet(v0[1], prj = prj, debug = debug)]
else:
if debug: print " prem's", v0[2]
if includeElem(self.parties.keys(), v0[2].keys()):
# if len(v0[2]) > 0 and not v0[2].keys() == ['E']:
# if v0[2][1] != 0 or v0[2][2] != 0: # Conduite ou Soutenance
sdic[k0] = [v0[0], self.getArbreProjet(v0[1], prj = prj, debug = debug), v0[2]]
else:
lst = []
for l in v0[1]:
if debug: print l
# print v0
if l.estProjet(): # Conduite ou Soutenance
if prj == None or l.getType() in prj.parties.keys():
# if l.getType() == v0[2].keys():
lst.append(l)
if lst != []:
if len(v0) > 2:
sdic[k0] = [v0[0], lst, v0[2]]
else:
sdic[k0] = [v0[0], lst]
return sdic
###########################################################
def getDernierNiveauArbre2(self, dic):
sdic = {}
for k0, v0 in dic.items():
if type(v0) == dict:
sdic.update(self.getDernierNiveauArbre(v0))
else:
sdic[k0] = v0
return sdic
###########################################################
def getDeuxiemeNiveauArbre(self, dic):
sdic = {}
# if len(dic) > 0 and type(dic.values()[0][1]) == dict:
for k0, v0 in dic.items():
if type(v0[1]) == dict:
for k1, v1 in v0[1].items():
if len(v1) > 1 and type(v1[1]) == dict: # pas fini = 3ème niveau
self._niveau = 3
sdic[k1] = {}
for k2, v2 in v1[1].items():
sdic[k1][k2] = v2[1]
else: # Niveau "indicateur"
self._niveau = 2
sdic[k1] = v1[1]
else:
sdic[k0] = v0[1]
# else:
# return dic
return sdic
###########################################################
def getDernierNiveauArbre(self, dic):
sdic = {}
for k0, v0 in dic.items():
if len(v0) > 1 and type(v0[1]) == dict:
sdic.update(self.getDernierNiveauArbre(v0[1]))
else:
sdic[k0] = v0
return sdic
#################################################################################################################################
#
# Référentiel
#
#################################################################################################################################
class Referentiel(XMLelem):
def __init__(self, nomFichier = r""):
# Enseignement Famille, Nom , Nom complet
self._codeXML = "Referentiel"
self.initParam()
self._bmp = None
if nomFichier != r"":
self.importer(nomFichier)
######################################################################################
def __repr__(self):
# print "*********************"
# print self.Code
# print "positions_CI", self.positions_CI
## print "CI_BO :", self.CI_BO
## print "CI :", self.CentresInterets
## print "Sav :", self.dicSavoirs
## print "dicSavoirs_Math", self.dicSavoirs_Math
## for p in self.getParams():
## v = getattr(self, p)
## if type(v) == dict:
## print p, v
## print "dicCompetences :", self.dicCompetences
## print "Mat :", self.dicSavoirs_Math
## print "Phy :", self.dicSavoirs_Phys
# print "Dem :", self.demarches
## print "Act :", self.activites
## print "Sea :", self.seances
# print "DeS :", self.demarcheSeance
return self.Code
######################################################################################
def initParam(self):
#
# Généralités
#
self.Famille = u""
self.Code = u""
self.Enseignement = [u"" , u"", u""]
self.options = {} # options de l'enseignement : {Code : nomFichier}
self.tr_com = [] # tronc commun de l'enseignement : [Code, nomFichier]
self.periodes = [] # découpage de l'enseignement en années/périodes
self.FichierLogo = r"" # Fichier désignant l'image du Logo de l'enseignement
#
# Projets
#
self.projets = {}
self.aColNon = {} # Pour indiquer si les différentes parties d'un projet ont une colonne "NON" dans leur grille
self.compImposees = {} # Indique que les competences sont imposées pour chaque revue
self.parties = {}
#
# Centre d'intérêt
#
self.nomCI = u"Centres d'intérêt"
self.abrevCI = u"CI"
self.CentresInterets = [] #
self.CI_BO = True # les ci sont donnés par le B.O. (pas modifiables)
self.CI_cible = False # les ci se placent sur une cible MEI FSC
self.positions_CI = [] # positions sur la cible MEI FSC
#
# Savoirs ou capacités
#
self.nomSavoirs = u"Savoirs" # nom donnés aux savoirs : "Savoirs", "Capacités", ...
self.surnomSavoirs = u""
self.dicSavoirs = {}
#
# Compétences
#
self.nomCompetences = u"Compétences" # nom donnés aux compétences : "Compétences", ...
self.nomIndicateurs = u"Indicateurs de performance"
self.dicCompetences = {}
# self.dicCompetences_prj = {}
# self.dicIndicateurs_prj = {}
# self.dicPoidsIndicateurs_prj = {}
# self.dicLignesIndicateurs_prj = {}
#
# Fonctions/Tâches
#
self.nomFonctions = u"Fonctions" # nom donnés aux Fonctions : "Fonctions", ...
self.nomTaches = u"Tâches" # nom donnés aux Tâches : "Tâches", ...
self.dicFonctions = {}
#
# Pratique pédagogiques
#
self.demarches = {}
self.listeDemarches = []
self.seances = {}
self.listeTypeSeance = []
self.activites = {}
self.listeTypeActivite = []
self.horsClasse = {}
self.listeTypeHorsClasse = []
self.demarcheSeance = {}
#
# Effectifs
#
self.effectifs = {}
self.listeEffectifs = []
self.effectifsSeance = {} #{"" : []}
self.nomSavoirs_Math = u"Mathématiques"
self.dicSavoirs_Math = {}
self.objSavoirs_Math = False
self.preSavoirs_Math = True
self.nomSavoirs_Phys = u"Sciences Physiques"
self.dicSavoirs_Phys = {}
self.objSavoirs_Phys = False
self.preSavoirs_Phys = True
#
# Bulletins Officiels
#
self.BO_dossier = []
self.BO_URL = []
######################################################################################
def setBrancheCodeV5(self, branche):
try:
return branche.get("S_Code")
except:
return
######################################################################################
def setBrancheV5(self, branche):
""" Lecture de la branche XML
(ouverture de fichier)
"""
print "setBranche référentiel V5"
self.initParam()
nomerr = []
def lect(branche, nom = ""):
if nom[:2] == "S_":
return unicode(branche.get(nom)).replace(u"--", u"\n")
elif nom[:2] == "I_":
return int(eval(branche.get(nom)))
elif nom[:2] == "L_":
return long(eval(branche.get(nom)))
elif nom[:2] == "F_":
return float(eval(branche.get(nom)))
elif nom[:2] == "B_":
if branche.get(nom) == None: # Pour corriger un bug (version <=5.0beta3)
nomerr.append(nom)
return False
return branche.get(nom)[0] == "T"
elif nom[:2] == "l_":
sbranche = branche.find(nom)
if sbranche == None: return []
dic = {}
for k, sb in sbranche.items():
_k = k[2:]
if isinstance(_k, (str, unicode)) and "--" in _k:
_k = _k.replace("--", "\n")
dic[_k] = lect(sbranche, k)
for sb in list(sbranche):
k = sb.tag
_k = k[2:]
if isinstance(_k, (str, unicode)) and "--" in _k:
_k = _k.replace("--", "\n")
dic[_k] = lect(sbranche, k)
# print dic.values()
liste = [dic[v] for v in sorted(dic)]
# print " >", liste
return liste
# liste = [lect(sbranche, k) for k, sb in sbranche.items()]
# return liste + [lect(sb, k) for k, sb in list(sbranche)]
elif nom[:2] == "d_":
sbranche = branche.find(nom)
d = {}
if sbranche != None:
for k, sb in sbranche.items():
_k = k[2:]
if isinstance(_k, (str, unicode)) and "--" in _k:
_k = _k.replace("--", "\n")
d[_k] = lect(sbranche, k)
for sb in list(sbranche):
k = sb.tag
_k = k[2:]
if _k[0] == "_":
_k = eval(_k[1:])
if isinstance(_k, (str, unicode)) and "--" in _k:
_k = _k.replace("--", "\n")
d[_k] = lect(sbranche, k)
return d
for attr in dir(self):
if attr[0] != "_":
val = getattr(self, attr)
if type(val) == str or type(val) == unicode:
_attr = "S_"+attr
elif type(val) == int:
_attr = "I_"+attr
elif type(val) == long:
_attr = "L_"+attr
elif type(val) == float:
_attr = "F_"+attr
elif type(val) == bool:
_attr = "B_"+attr
elif type(val) == list:
_attr = "l_"+attr
elif type(val) == dict:
_attr = "d_"+attr
else:
_attr = None
if _attr:
setattr(self, attr, lect(branche, _attr.replace("\n", "--")))
# Pour corriger une erreur de jeunesse de la 5.0beta1
if len(self.aColNon) == 0:
self.aColNon = {'R' : True, 'S' : False}
# # Pour corriger une erreur de jeunesse de la 5.0beta3
# if self.Code in ['SIN', 'ITEC', 'AC', 'EE']:
# self.tr_com == True
# Pour rajouter les periodes aux fichiers < 5.7
if self.periodes == []:
self.periodes = self.defPeriode()
# print ">>>", self.periode_prj
# Pour ajouter les noms des CI < 5.8
if self.nomCI == "None":
self.nomCI = u"Centres d'intérêt"
self.abrevCI = u"CI"
# Pour ajouter savoirs prérequis/objectifs < 5.9
if "B_objSavoirs_Math" in nomerr:
self.nomSavoirs_Math = u"Mathématiques"
self.nomSavoirs_Phys = u"Sciences Physiques"
self.objSavoirs_Math = False
self.preSavoirs_Math = True
self.objSavoirs_Phys = False
self.preSavoirs_Phys = True
# Pour mettre à jour les généralités sur le projet
if self.attributs_prj == {}:
self.attributs_prj = REFERENTIELS[self.Code].attributs_prj
###########################################################
def corrigeArbreProjet(dic, debug = False):
for k0, v0 in dic.items():
if debug: print k0
if len(v0) > 1 and type(v0[1]) == dict:
if debug: print " ", v0[0]
if len(v0) == 2:
corrigeArbreProjet(v0[1], debug = debug)
else:
if debug: print " prem's", v0[2]
if v0[2][1] != 0 or v0[2][2] != 0: # Conduite ou Soutenance
corrigeArbreProjet(v0[1], debug = debug)
else:
lst = []
for l in v0[1]:
if debug: print l
if l != None and not isinstance(l, Indicateur):
if debug: print "Correction"
lst.append(Indicateur(l[0], l[1], l[2]))
v0[1] = lst
return
corrigeArbreProjet(self.dicCompetences, debug = False)
# print "dicCompetences Corr", self.dicCompetences
self.postTraiter()
self.completer()
return
######################################################################################
def getParams(self):
l = []
for attr in dir(self):
if attr[0] != "_":
val = getattr(self, attr)
if isinstance(val, (str, unicode, int, long, float, bool, list, dict)):
l.append(attr)
return l
######################################################################################
def corrigerVersion(self, nomerr):
""" Correction d'erreur de lecture de la branche XML
pour cause de changement de version
"""
# print "corrigerVersion"
# print self.projets
# print
# Pour corriger une erreur de jeunesse de la 5.0beta1
# if len(self.aColNon) == 0:
# self.aColNon = {'R' : True, 'S' : False}
# # Pour corriger une erreur de jeunesse de la 5.0beta3
# if self.Code in ['SIN', 'ITEC', 'AC', 'EE']:
# self.tr_com == True
# Pour rajouter les periodes aux fichiers < 5.7
if self.periodes == []:
self.periodes = self.defPeriode()
# print ">>>", self.periode_prj
# Pour ajouter les noms des CI < 5.8
if self.nomCI == "None":
self.nomCI = u"Centres d'intérêt"
self.abrevCI = u"CI"
# Pour ajouter savoirs prérequis/objectifs < 5.9
if "B_objSavoirs_Math" in nomerr:
self.nomSavoirs_Math = u"Mathématiques"
self.nomSavoirs_Phys = u"Sciences Physiques"
self.objSavoirs_Math = False
self.preSavoirs_Math = True
self.objSavoirs_Phys = False
self.preSavoirs_Phys = True
# # Pour mettre à jour les généralités sur le projet
# if self.attributs_prj == {}:
# self.attributs_prj = REFERENTIELS[self.Code].attributs_prj
# print "self.BO_dossier", self.BO_dossier
# # Pour les BO
# if type(self.BO_dossier) != list:
# self.BO_dossier = [self.BO_dossier]
for p in self.projets.values():
# print p.listeParties, p.parties
if len(p.listeParties) <> len(p.parties):
p.listeParties = p.parties.keys()
p.corrigerVersion(nomerr)
# print p.listeParties, p.parties
return
######################################################################################
def importer(self, nomFichier):
"""
"""
# print "IMPORTER" ,
self.initParam()
###########################################################
def remplir(sh, col, rng, mode = 1, condition = None, debug = False, niveau = 0):
""" Mode = 1 : on finit par une liste
Mode = 2 : on finit par un dict
"""
if debug: print " "*niveau+"remplir : col="+chr(65+col), "lignes=",[n+1 for n in rng]
if rng == [] and mode == 2:
return None
# self.prof_Comp = max(self.prof_Comp, col)
lig = [l for l in rng if sh.cell(l,col).value != u""]
if debug: print " "*niveau+">> branches :", [n+1 for n in lig]
if lig == rng:
if debug: print " "*niveau+"FIN"
if mode == 1:
if col+1 >= sh.ncols or (len(lig)>0 and sh.cell(lig[0],col+1).value) == u"":
return [sh.cell(l,col).value for l in lig]
else:
d = {}
for l in lig:
if condition == None or sh.cell(l,4).value == condition:
if debug: print " "*niveau+str(sh.cell(l,col).value)
d[str(sh.cell(l,col).value)] = [sh.cell(l,col+1).value, []]
return d
else:
# if condition == None or sh.cell(l,4).value == condition:
d = {}
for l in lig:
if condition == None or sh.cell(l,4).value == condition:
if debug: print " "*niveau+str(sh.cell(l,col).value)
d[str(sh.cell(l,col).value)] = sh.cell(l,col+1).value
if condition == None or len(d) > 0:
return d
else:
return None
else:
# if len(lig) > 0:
llig = lig + [rng[-1]+1]
dic = {}
for i, p in enumerate(lig):
if debug: print " "*niveau+"-> ", i, [n+1 for n in lig], [n+1 for n in llig]
sdic = remplir(sh, col+1, range(p+1, llig[i+1]), mode = mode, condition = condition, debug = debug, niveau = niveau+1)
if sdic != None:
if debug: print " "*niveau+"+++"+str(sh.cell(p,col).value)
dic[str(sh.cell(p,col).value)] = [sh.cell(p,col+1).value, sdic]
return dic
###########################################################
def getArbre(sh, rng, col, prems = False, fonction = False, debug = False):
""" Construit la structure en arbre des "compétences"
"""
dic = {}
# Liste des lignes comportant un code dans la colonne <col>, dans l'intervalle <rng>
lstLig = [l for l in rng if sh.cell(l,col).value != u""]
if debug: print " **",lstLig
for i, l in enumerate(lstLig):
code = str(sh.cell(l,col).value)
intitule = unicode(sh.cell(l,col+1).value)
if debug: print "-> ",l, code, intitule
# Toutes les lignes entre chaque code
if i < len(lstLig)-1:
ssRng = range(l+1, lstLig[i+1])
else:
ssRng = range(l+1, rng[-1]+1)
if debug: print " ", ssRng
# Il y a encore des items à droite ...
if len(ssRng) > 0 and col < 2 and [li for li in ssRng if sh.cell(li,col+1).value != u""] != []:
dic[code] = [intitule, getArbre(sh, ssRng, col+1, fonction = fonction, debug = debug)]
if not fonction:
if prems:
poids = {}
for p, c in self._colParties:
v = int0(sh.cell(l,c).value)
if v > 0:
poids[p] = v
# poids = [int0(sh.cell(l,7).value), # poids Ecrit
# int0(sh.cell(l,8).value), # poids Conduite projet
# int0(sh.cell(l,9).value)] # poids Soutenance projet
dic[code].append(poids)
else:
lstComp = [sh.cell(1,co).value for co in range(5, sh.ncols) if sh.cell(l,co).value != u""]
# print " lstComp1 =", lstComp
dic[code].append(lstComp)
# Il n'y a plus d'item à droite => Indicateur()
else:
dic[code] = [intitule, []]
for ll in [l] + ssRng:
indic = unicode(sh.cell(ll,5).value)
if not fonction:
poids = {}
lignes = {}
revues = {}
for p, c in self._colParties:
v = int0(sh.cell(ll,c).value)
if v > 0:
poids[p] = v
lignes[p] = int0(sh.cell(ll,c+1).value)
revues[p] = int0(sh.cell(ll,c+2).value)
if lignes[p] != 0:
self.aColNon[p] = True
if revues[p] != 0:
self.compImposees[p] = True
# poids = [int0(sh.cell(ll,7).value), # poids Ecrit
# int0(sh.cell(ll,8).value), # poids Conduite projet
# int0(sh.cell(ll,9).value)] # poids Soutenance projet
if indic == u"":
# print "code", code, poids
dic[code].append(poids)
else:
# ligne = int0(sh.cell(ll,10).value) # ligne dans la grille
# if ligne != 0:
# if poids[1] != 0:
# self.aColNon['R'] = True
# elif poids[2] != 0:
# self.aColNon['S'] = True
# revue = 0
# if sh.ncols > 11:
# revue = int0(sh.cell(ll,11).value)
dic[code][1].append(Indicateur(indic, poids, lignes, revues))
else:
lstComp = [sh.cell(1,co).value for co in range(5, sh.ncols) if sh.cell(ll,co).value != u""]
# print " lstComp2 =", lstComp
dic[code][1] = lstComp
if debug: print
return dic
###########################################################
def listItemCol(sh, col, rng):
return [[l, sh.cell(l,col).value] for l in rng if sh.cell(l,col).value != u""]
###########################################################
def aplatir2(dic, niv=1):
ddic = {}
for k0, v0 in dic.items():
ddic.update(v0[1])
return ddic
#
# Ouverture fichier EXCEL
#
wb = open_workbook(nomFichier)
sh = wb.sheets()
#
# Généralités
#
sh_g = wb.sheet_by_name(u"Généralités")
self.Famille = sh_g.cell(2,0).value
self.Code = sh_g.cell(2,1).value
self.Enseignement[0] = sh_g.cell(6,0).value #Abréviation
self.Enseignement[1] = sh_g.cell(6,1).value #Nom complet
self.Enseignement[2] = sh_g.cell(6,2).value #Famille
# print self.Code
if sh_g.ncols > 3:
lig = [l for l in range(10, 17) if sh_g.cell(l,3).value != u""]
for l in lig:
self.periodes.append([sh_g.cell(l,2).value, int(sh_g.cell(l,3).value)])
self.FichierLogo = sh_g.cell(6,3).value
#
# Projets
#
col = [c for c in range(1, sh_g.ncols) if sh_g.cell(24,c).value != u""]
for c in col:
self.projets[sh_g.cell(25,c).value] = Projet(sh_g.cell(25,c).value,
intitule = sh_g.cell(24,c).value,
duree = int0(sh_g.cell(26,c).value),
periode = [int(i) for i in sh_g.cell(27,c).value.split()])
# print self.projets
#
# options
#
sh_g = wb.sheet_by_name(u"Généralités")
lig = [l for l in range(10, 17) if sh_g.cell(l,0).value != u""]
for l in lig:
self.options[str(sh_g.cell(l,0).value)] = sh_g.cell(l,1).value
#
# tronc commun
#
if sh_g.cell(21,0).value != u"":
self.tr_com = [sh_g.cell(21,0).value, sh_g.cell(21,1).value]
# #
# # projet
# #
# self.projet = sh_g.cell(23,1).value[0].upper() == "O"
# if self.projet:
# self.duree_prj = int(sh_g.cell(24,1).value)
# self.periode_prj = [int(i) for i in sh_g.cell(25,1).value.split()]
#
# Bulletins Officiels
#
# print self.Code, sh_g.nrows
self.BO_dossier = [sh_g.cell(l,0).value for l in range(31, sh_g.nrows) if sh_g.cell(l,0).value != u""]
self.BO_URL = [[sh_g.cell(l,1).value, sh_g.cell(l,2).value] for l in range(32, sh_g.nrows) if sh_g.cell(l,1).value != u""]
# self.BO_URL = sh_g.cell(29,1).value
#
# if sh_g.nrows > 28:
# self.BO_dossier = [sh_g.cell(ll,0).value for l in [29, 30, 31]]
# self.BO_URL = sh_g.cell(29,1).value
#
# CI
#
sh_ci = wb.sheet_by_name(u"CI")
self.CI_BO = sh_ci.cell(0,1).value[0].upper() == "O"
self.CI_cible = sh_ci.cell(1,1).value[0].upper() == "O"
self.nomCI = sh_ci.cell(2,0).value
self.abrevCI = sh_ci.cell(2,1).value
continuer = True
l = 4
while continuer:
if l < sh_ci.nrows:
ci = sh_ci.cell(l,0).value
if ci != u"":
self.CentresInterets.append(ci)
if self.CI_cible:
t = ''
for c in range(2,8):
if sh_ci.cell(l,c).value != u"":
t += sh_ci.cell(3,c).value
else:
t += ' '
if c == 4:
t += '_'
self.positions_CI.append(t)
l += 1
else:
continuer = False
else:
continuer = False
#
# Savoirs
#
sh_va = wb.sheet_by_name(u"Savoirs")
self.nomSavoirs = sh_va.cell(0,0).value
self.surnomSavoirs = sh_va.cell(1,0).value
self.dicSavoirs = remplir(sh_va, 0, range(2, sh_va.nrows))
#
# Compétences
#
sh_va = wb.sheet_by_name(u"Compétences")
self.nomCompetences = sh_va.cell(0,0).value
self.nomIndicateurs = sh_va.cell(0,5).value
# Décomposition des projets en parties
self._colParties = []
col = [c for c in range(8, sh_va.ncols) if sh_va.cell(1,c).value != u""]
# print ">>>", col
for i, c in enumerate(col):
if i == len(col)-1:
n = sh_va.ncols
else:
n = col[i+1]
for j in range((n-c)/3):
cp = c+j*3
part = sh_va.cell(3,cp).value
self._colParties.append((part, cp))
t = sh_va.cell(1,c).value
for p in self.projets.values():
if t == p.intitule:
p.listeParties.append(part)
p.parties[part] = sh_va.cell(2,cp).value
self.compImposees[part] = False
# t = sh_va.cell(1,c).value
# for p in self.projets.values():
# if t == p.intitule:
# if i == len(col)-1:
# n = sh_va.ncols
# else:
# n = col[i+1]
# # print " ",n, t
# for j in range((n-c)/3):
# cp = c+j*3
# # print " --", cp
# part = sh_va.cell(3,cp).value
# p.parties[part] = sh_va.cell(2,cp).value
# self._colParties.append((part, cp))
# self.compImposees[part] = False
# # print " >", p.parties
# print "_colParties :", self, self._colParties
# print "compImposees :", self, self.compImposees
for part, col in list(set([cp for cp in self._colParties])):
self.parties[part] = sh_va.cell(2,col).value
for p in self.projets.values():
# print " importer", self, p
p.importer(wb)
# self.prof_Comp = 0 # compteur de profondeur
# self.dicCompetences = remplir(sh_va, 0, range(1, sh_va.nrows), mode = 2)
# print ">>>", self.Code
# Pour enregistrer s'il y a des colonnes "non" dans les grilles 'R' ou 'S'
# self.aColNon = {'R' : False, 'S' : False}
self.dicCompetences = getArbre(sh_va, range(2, sh_va.nrows), 0, prems = True, debug = False)
# print "dicCompetences", self.dicCompetences
# print "_aColNon", self.Code, ":", self._aColNon
#
# Fonctions
#
if u"Fonctions" in wb.sheet_names():
sh_va = wb.sheet_by_name(u"Fonctions")
self.nomFonctions = sh_va.cell(0,0).value
self.nomTaches = sh_va.cell(0,5).value
self.dicFonctions = getArbre(sh_va, range(2, sh_va.nrows), 0, prems = True, fonction = True, debug = False)
# print "dicFonctions", self.dicFonctions
#
# Pratique pédagogiques
#
sh_g = wb.sheet_by_name(u"Activité-Démarche")
# Démarches
for l in range(2, 5):
if sh_g.cell(l,1).value != u"":
self.demarches[str(sh_g.cell(l,0).value)] = [sh_g.cell(l,1).value, sh_g.cell(l,2).value]
self.listeDemarches.append(sh_g.cell(l,0).value)
# Activités
for l in range(8, 11):
if sh_g.cell(l,0).value != u"":
self.activites[str(sh_g.cell(l,0).value)] = [sh_g.cell(l,1).value, sh_g.cell(l,2).value]
self.listeTypeActivite.append(sh_g.cell(l,0).value)
self.seances.update(self.activites)
# Hors classe
for l in range(24, 26):
if l < sh_g.nrows and sh_g.cell(l,0).value != u"":
self.horsClasse[str(sh_g.cell(l,0).value)] = [sh_g.cell(l,1).value, sh_g.cell(l,2).value]
self.listeTypeHorsClasse.append(sh_g.cell(l,0).value)
self.seances.update(self.horsClasse)
# Autres Séances
self.listeTypeSeance = self.listeTypeActivite[:] + self.listeTypeHorsClasse[:]
for l in range(14, 21):
if sh_g.cell(l,0).value != u"":
self.seances[str(sh_g.cell(l,0).value)] = [sh_g.cell(l,1).value, sh_g.cell(l,2).value]
self.listeTypeSeance.append(sh_g.cell(l,0).value)
# print self, self.listeTypeSeance
# Croisement démarche/activité
for l, s in enumerate(self.listeTypeActivite):
l = l + 3
# print l
self.demarcheSeance[str(s)] = [sh_g.cell(2,c).value for c in range(5,8) if sh_g.cell(l,c).value != u""]
#
# effectifs
#
sh_g = wb.sheet_by_name(u"Activité-Effectif")
for l in range(2, 8):
if sh_g.cell(l,0).value != u"":
self.effectifs[str(sh_g.cell(l,0).value)] = [sh_g.cell(l,1).value, sh_g.cell(l,2).value]
self.listeEffectifs.append(sh_g.cell(l,0).value)
for l, s in enumerate(self.listeTypeSeance):
l = l + 3
self.effectifsSeance[str(sh_g.cell(l,4).value)] = [sh_g.cell(2,c).value for c in range(5,11) if sh_g.cell(l,c).value != u""]
#
# Savoirs Math
#
if u"Math" in wb.sheet_names():
sh_va = wb.sheet_by_name(u"Math")
self.nomSavoirs_Math = sh_va.cell(0,0).value
self.dicSavoirs_Math = remplir(sh_va, 0, range(1, sh_va.nrows), debug = False, niveau = 0)
self.objSavoirs_Math = 'O' in sh_va.cell(0,5).value
self.preSavoirs_Math = 'P' in sh_va.cell(0,5).value
#
# Savoirs Physique
#
if u"Phys" in wb.sheet_names():
sh_va = wb.sheet_by_name(u"Phys")
self.nomSavoirs_Phys = sh_va.cell(0,0).value
self.dicSavoirs_Phys = remplir(sh_va, 0, range(1, sh_va.nrows))
self.objSavoirs_Phys = 'O' in sh_va.cell(0,5).value
self.preSavoirs_Phys = 'P' in sh_va.cell(0,5).value
###########################################################
def defPeriode(self):
""" Définit les periodes
(dans le cas ou elles ne sont pas définies dans le référentiel intégré
versions < 5.7)
"""
# print "defPeriode"
self.periode_prj = []
if self.Famille == 'CLG':
return [[u"Année", 6]]
elif self.Famille in ['STI', 'SSI']:
self.periode_prj = [7, 10]
return [[u"1_ère", 5], [u"T_ale", 5]]
return [[u"Année", 6]]
# ###########################################################
# def getNbrPeriodes(self):
# if self.Famille == 'CLG':
# return 5
# elif self.Famille in ['STI', 'SSI']:
# return 10
# return 10
#########################################################################
def postTraiter(self):
""" Complète les données selon que le référentiel ait un tronc commun ou des options
--> le "_" évite que les attributs ne soient sauvegardés dans les XML
"""
# print "postTraiter", self, self.parties
# self._parties = []
# for proj in self.projets.values():
# for part in proj.parties:
# if not part in self._parties:
# self._parties.append(part)
for p in self.projets.values():
p.postTraiter(self)
#########################################################################
def completer(self, forcer = False):
""" Complète les données selon que le référentiel ait un tronc commun ou des options
Exécuté lorsque tous les référentiels sont chargés !
--> le "_" évite que les attributs ne soient sauvegardés dans les XML
"""
# C'est une option (il y a un tronc commun) ==> on complète plus tard
if not forcer and len(self.tr_com) != 0:
return
# print "completer ref :", self, self.options
if len(self.options) != 0:
self.parties = {}
for ro in self.options.keys():
for proj in REFERENTIELS[ro].projets.values():
for part, n in proj.parties.items():
if not part in self.parties.keys():
self.parties[part] = n
# print " ", self.parties
# print " ", self.dicCompetences
self._dicCompetences = self.getArbreProjet(self.dicCompetences, debug = False)
# print " >", self._dicCompetences
self._dicIndicateurs = self.getPremierEtDernierNiveauArbre(self._dicCompetences)
self.normaliserPoids(self._dicIndicateurs, debug = False)
# print " ", self._dicIndicateurs_prj
self._niveau = 0
self._dicIndicateurs_famille = self.getDeuxiemeNiveauArbre(self._dicCompetences)
self._dicIndicateurs_simple = self.getDernierNiveauArbre2(self._dicIndicateurs_famille)
for ro in self.options:
REFERENTIELS[ro].completer(forcer = True)
for p in self.projets.values():
p.completer(self)
#########################################################################
def getNbrRevuesDefaut(self, codePrj):
return self.projets[codePrj].getNbrRevuesDefaut()
#########################################################################
def getPosRevuesDefaut(self, codePrj):
return self.projets[codePrj].getPosRevuesDefaut()
#########################################################################
def getIntituleIndicateur(self, comp):
sep = "\n\t"+constantes.CHAR_POINT
indicateurs = self.getIndicateur(comp)
if type(indicateurs) == list:
return "\t"+constantes.CHAR_POINT + sep.join([i[0] for i in indicateurs])
else:
t = u""
for k, v in indicateurs.items():
t += k + u" : " + v[0]
#########################################################################
def getNbrPeriodes(self):
n = 0
for p in self.periodes:
n += p[1]
return n
#############################################################################
def getPeriodeEval(self, codePrj):
return self.projets[codePrj].getPeriodeEval()
#############################################################################
def getAnnee(self, position):
n = 0
for a, p in enumerate(self.periodes):
if position+n in range(p[1]):
return a
n += p[1]
return
#############################################################################
def getProjetEval(self, position):
""" Renvoie l'épreuve de projet (évaluation)
situé à la position <position>
"""
# print "getProjetEval", position
for k, p in self.projets.items():
# print " ", p.periode
if position in p.periode:
return k
#############################################################################
def getCodeProjetDefaut(self):
""" Renvoie l'épreuve de projet (évaluation)
par défaut (pour les projets d'"entrainement" en cours d'année)
"""
pos = []
prj = []
for k, p in self.projets.items():
prj.append(k)
pos.append(max(p.periode))
return prj[pos.index(max(pos))]
#############################################################################
def getProjetDefaut(self):
""" Renvoie l'épreuve de projet (évaluation)
par défaut (pour les projets d'"entrainement" en cours d'année)
"""
return self.projets[self.getCodeProjetDefaut()]
#############################################################################
def estPeriodeEval(self, position):
pp = self.periode_prj
return position+1 in range(pp[0], pp[1]+1)
#########################################################################
def getIntituleCompetence(self, comp, sousComp = False):
sep = "\n\t"+constantes.CHAR_POINT
competence = self.getCompetence_prj(comp)
if sousComp and type(competence[1]) == dict:
return sep.join([competence[0]] + [v for v in competence[1]])
else:
competence
#########################################################################
def getCompetence(self, comp):
# print "getCompetence", comp
# print " ", self.dicCompetences
if comp in self.dicCompetences.keys():
# print " 1>>"
return self.dicCompetences[comp]
else:
for k0, v0 in self.dicCompetences.items():
# print " ", k0, type(v0[1])
if type(v0[1]) == dict:
if comp in v0[1].keys():
# print " 2>>"
return v0[1][comp]
else:
for k1, v1 in v0[1].items():
if type(v1[1]) == dict and comp in v1[1].keys():
# print " 3>>"
return v1[1][comp]
#########################################################################
def calculerLargeurCompetences(self, tailleReference):
t = 1
for k, v in self._dicIndicateurs_prj_simple.items():
t = float(max(t, len(v)))
r = t/5 # 5 = nombre max d'indicateurs à loger dans tailleReference
return r*tailleReference
#########################################################################
def getSavoir(self, code, dic = None, c = 1, gene = None):
# print "getSavoir", code,
if dic == None:
if gene == "M":
if self.tr_com != []:
dic = REFERENTIELS[self.tr_com[0]].dicSavoirs_Math
else:
dic = self.dicSavoirs_Math
elif gene == "P":
if self.tr_com != []:
dic = REFERENTIELS[self.tr_com[0]].dicSavoirs_Phys
else:
dic = self.dicSavoirs_Phys
else:
dic = self.dicSavoirs
# print dic
if dic.has_key(code):
return dic[code][0]
else:
cd = ".".join(code.split(".")[:c])
return self.getSavoir(code, dic[cd][1], c+1)
#########################################################################
def getLogo(self):
if self._bmp == None:
if self.CI_cible:
self._bmp = constantes.images.Cible.GetBitmap()
elif self.Code == "AC":
self._bmp = constantes.images.ImageAC.GetBitmap()
elif self.Code == "SIN":
self._bmp = constantes.images.ImageSIN.GetBitmap()
elif self.Code == "ITEC":
self._bmp = constantes.images.ImageITEC.GetBitmap()
elif self.Code == "EE":
self._bmp = constantes.images.ImageEE.GetBitmap()
elif self.Code == "SSI":
self._bmp = constantes.images.SSI_ASR.GetBitmap()
elif self.FichierLogo != r"":
self._bmp = wx.Bitmap(os.path.join(DOSSIER_REF, constantes.toFileEncoding(self.FichierLogo)))
# try:
# self._bmp = wx.Bitmap(os.path.join(constantes.PATH, r"..", DOSSIER_REF, self.FichierLogo))
# except:
# self._bmp = self._bmp = constantes.images.SSI_ASR.GetBitmap()
else:
self._bmp = constantes.images.SSI_ASR.GetBitmap()
return self._bmp
#########################################################################
def getTypeEtab(self):
if self.Famille in ["STI", "SSI", "STS"]:
return 'L' # Lycée
else:
return 'C' # Collège
# #########################################################################
# def getCompetence(self, code, dic = None, c = None):
# """ Pour obtenir l'intitulé d'une compétence à partir de son code
# fonction recursive
# """
## print "getCompetence", code, dic, c
# if dic == None:
# dic = self.dicCompetences
#
# if dic.has_key(code):
# if type(dic[code]) == list:
# return dic[code][0]
# else:
# return dic[code]
#
# else:
# for c, v in dic.items():
# if type(v) == list:
# co = self.getCompetence(code, v[1])
# if co != None:
# return co
# return
#########################################################################
def findEffectif(self, lst, eff):
continuer = True
i = 0
while continuer:
if i > len(lst):
continuer = False
else:
if lst[i][:2] == self.effectifs[eff][0][:2]:
continuer = False
else:
i += 1
return i
#################################################################################################################################
#
# Projet
#
#################################################################################################################################
class Projet(XMLelem):
def __init__(self, code = "", intitule = u"", duree = 0, periode = [], importer = None):
self._codeXML = "Projet"
self.code = code
self.intitule = intitule
self.duree = duree
self.periode = periode
self.parties = {} # Le dictionnaire des parties (code, nom)
self.listeParties = [] # La liste ordonnée des parties
#
# grilles d'évaluation de projet
#
self.grilles = {}
self.cellulesInfo = {}
#
# phases de projet
#
self.phases = {}
self.listPhasesEval = []
self.listPhases = []
self.posRevues = {}
#
# Effectifs
#
self.maxEleves = 5
self.minEleves = 3
#
# Généralités sur le projet
#
self.ficheValid = r""
self.attributs = {}
if importer != None:
self.importer(importer)
##################################################################################################################
def __repr__(self):
return self.code + " : " + self.intitule + u" (" + str(self.duree) + u"h)"
######################################################################################
def corrigerVersion(self, nomerr):
""" Correction d'erreur de lecture de la branche XML
pour cause de changement de version
"""
# print "corrigerVersion", nomerr
if "I_maxEleves" in nomerr:
self.maxEleves = 5
if "I_minEleves" in nomerr:
self.minEleves = 3
#########################################################################
def getNbrRevuesDefaut(self):
return min(self.posRevues.keys())
#########################################################################
def getPosRevuesDefaut(self):
return self.posRevues[self.getNbrRevuesDefaut()]
#############################################################################
def getPeriodeEval(self):
return self.periode[0]-1
#########################################################################
def getIndicateur(self, codeIndic):
if '_' in codeIndic:
code, i = codeIndic.split('_')
i = int(i)
if code in self._dicIndicateurs_simple.keys():
indics = self._dicIndicateurs_simple[code]
if len(indics) >= i:
indic = indics[i-1]
return indic
else:
comp = self.getCompetence(codeIndic)
if type(comp[1]) == dict:
return self.getPremierEtDernierNiveauArbre(comp[1])
else:
return comp[1]
#########################################################################
def getTypeIndicateur(self, codeIndic):
# print "getTypeIndicateur", codeIndic, type(codeIndic)
if type(codeIndic) == str:
indic = self.getIndicateur(codeIndic)
else:
indic = codeIndic
if indic != None:
return indic.getType()
#########################################################################
def getCompetence(self, comp):
if comp in self._dicCompetences.keys():
return self._dicCompetences[comp]
else:
for k0, v0 in self._dicCompetences.items():
if type(v0[1]) == dict:
if comp in v0[1].keys():
return v0[1][comp]
else:
for k1, v1 in v0[1].items():
if type(v1[1]) == dict and comp in v1[1].keys():
return v1[1][comp]
##################################################################################################################
def importer(self, wb):
# print "importer", self.parties.keys()
for part in self.parties.keys():
#
# Grilles d'évaluation projet
#
sh_g = wb.sheet_by_name(u"Grille_"+self.code+"_"+part)
for l in range(2,3):
# print sh_g.cell(l,0).value
if sh_g.cell(l,0).value != u"":
self.grilles[part] = [sh_g.cell(l,0).value, sh_g.cell(l,3).value]
# print "self.grilles", self.grilles
self.cellulesInfo[part] = {}
for l in range(6, sh_g.nrows):
k = str(sh_g.cell(l,0).value)
if k != u"":
i = [sh_g.cell(l,1).value, # Feuille
[int0(sh_g.cell(l,2).value), # Ligne
int0(sh_g.cell(l,3).value), # Colonne
int0(sh_g.cell(l,4).value)], #Période
sh_g.cell(l,5).value] # Préfixe
if k in self.cellulesInfo[part].keys():
self.cellulesInfo[part][k].append(i)
else:
self.cellulesInfo[part][k] = [i]
#
# Phases du projet
#
shp = wb.sheet_by_name(u"Phase_"+self.code)
# print self.Code
for co in range(5, shp.ncols):
if shp.cell(1,co).value != "":
# print " ", shp.cell(1,co).value
self.posRevues[int(shp.cell(1,co).value)] = []
for l in range(2, shp.nrows):
if shp.cell(l,0).value != u"":
if shp.cell(l,1).value != u"":
self.phases[str(shp.cell(l,0).value)] = [shp.cell(l,1).value, shp.cell(l,2).value, shp.cell(l,3).value]
if shp.cell(l,4).value != "":
self.listPhasesEval.append(shp.cell(l,0).value)
self.listPhases.append(shp.cell(l,0).value)
for co in range(len(self.posRevues)):
if shp.cell(l,5+co).value != "":
self.posRevues[int(shp.cell(1,co+5).value)].append(shp.cell(l,0).value)
# if shp.cell(l,6).value != "":
# self.posRevues[3].append(shp.cell(l,0).value)
#
# Généralités sur le projet
#
shp = wb.sheet_by_name(u"Généralités_"+self.code)
if shp.nrows > 16:
self.ficheValid = shp.cell(16,0).value
for l in range(2, 13):
self.attributs[str(shp.cell(l,0).value)] = [shp.cell(l,1).value, shp.cell(l,2).value, shp.cell(l,3).value]
##################################################################################################################
def postTraiter(self, ref):
# print " postTraiter", ref, self
# ###########################################################
# def getArbreProjet(dic, debug = False):
# sdic = {}
# for k0, v0 in dic.items():
# if debug: print k0
# if len(v0) > 1 and type(v0[1]) == dict:
# if debug: print " ", v0[0]
# if len(v0) == 2:
# sdic[k0] = [v0[0], getArbreProjet(v0[1], debug = debug)]
# else:
# if debug: print " prem's", v0[2]
#
# if includeElem(self.parties.keys(), v0[2].keys()):
## if len(v0[2]) > 0 and not v0[2].keys() == ['E']:
## if v0[2][1] != 0 or v0[2][2] != 0: # Conduite ou Soutenance
# sdic[k0] = [v0[0], getArbreProjet(v0[1], debug = debug), v0[2]]
# else:
# lst = []
# for l in v0[1]:
# if debug: print l[1]
# if l.estProjet(): # Conduite ou Soutenance
# lst.append(l)
# if lst != []:
# if len(v0) > 2:
# sdic[k0] = [v0[0], lst, v0[2]]
# else:
# sdic[k0] = [v0[0], lst]
# return sdic
###########################################################
def chercherIndicIdem(dic, debug = False):
ii = None
for k0, v0 in dic.items():
if debug: print k0
if len(v0) > 1 and type(v0[1]) == dict:
if debug: print " ", v0[0]
ii = chercherIndicIdem(v0[1], debug = debug)
if debug: print " ii", ii
if ii != None : return ii
else:
lst = []
for l in v0[1]:
if isinstance(l, Indicateur) and "idem" in l.intitule:
if debug: print l.intitule
if debug: print " idem"
codeindic = str(l.intitule.split(" ")[1])
return l, codeindic, k0
if ii != None:
return ii
###########################################################
def chercherDicIndic(dic, code, debug = False):
if code in dic.keys():
return dic
else:
for k0, v0 in dic.items():
if debug: print k0
if len(v0) > 1 and type(v0[1]) == dict:
if debug: print " ", v0[0]
sdic = chercherDicIndic(v0[1], code, debug = debug)
if sdic != None : return sdic
return
# print "dicCompetences ref", ref.dicCompetences
self._dicCompetences = self.getArbreProjet(ref.dicCompetences, self, debug = False)
# print ">> _dicCompetences prj", self._dicCompetences
# On regroupe les compétences qui ont les mêmes indicateurs dans la grille (cas de STI2D EE !!)
lst_codeindic = chercherIndicIdem(self._dicCompetences, debug = False)
# print "lst_codeindic", lst_codeindic
if type(lst_codeindic) == tuple:
dic = chercherDicIndic(self._dicCompetences, lst_codeindic[2])
# print " >>", dic
new_code = lst_codeindic[1]+"\n"+lst_codeindic[2]
dic[new_code] = [dic[lst_codeindic[1]][0]+"\n"+dic[lst_codeindic[2]][0], dic[lst_codeindic[1]][1]]
del dic[lst_codeindic[2]]
del dic[lst_codeindic[1]]
self._dicIndicateurs = ref.getPremierEtDernierNiveauArbre(self._dicCompetences)
self.normaliserPoids(self._dicIndicateurs, debug = False)
# print " ", self._dicIndicateurs_prj
self._niveau = 0
self._dicIndicateurs_famille = self.getDeuxiemeNiveauArbre(self._dicCompetences)
self._dicIndicateurs_simple = self.getDernierNiveauArbre2(self._dicIndicateurs_famille)
# print "_dicIndicateurs_prj_simple", self._dicIndicateurs_prj_simple
# lst.extend()
#########################################################################
def getClefDic(self, dicattr, nom, num = None):
dic = getattr(self, dicattr)
for k,v in dic.items():
if num != None:
v = v[num]
if v == nom:
return k
return None
##################################################################################################################
def completer(self, ref):
""" Complète le projet
"""
# print " completer", ref, self
# print " ", self._dicCompetences
###########################################################
def aplatir(dic, niv=1):
ddic = {}
for k0, v0 in dic.items():
for k1, v1 in v0[1].items():
if type(v1) == list:
ddic[k1] = [v1[0]]
if type(v1[1]) == dict:
for i in sorted(v1[1].keys()):
ddic[k1].append(v1[1][i])
# ddic[k1].extend(v1[1].values())
else:
ddic[k1] = [v1]
return ddic
###########################################################
def getListeIndic(dic):
# print "getListeIndic"
# print dic
if type(dic) == dict:
l = []
sdic = {}
for k0, v0 in dic.items():
if type(v0) == dict:
sdic.update(getDernierNiveauArbre(v0))
else:
sdic[k0] = v0
for indics in sdic.values():
for indic in indics[1]:
l.append(indic)
# l = [indics[1] for indics in sdic.values()]
else:
l = []
for i, v0 in enumerate(dic):
l.append(v0)
# print " >>>", l
return l
###########################################################
def getDernierNiveauArbre(dic):
sdic = {}
for k0, v0 in dic.items():
if len(v0) > 1 and type(v0[1]) == dict:
sdic.update(getDernierNiveauArbre(v0[1]))
else:
sdic[k0] = v0
return sdic
#
# Ajout des compétences du tronc commun
#
if ref.tr_com != []:
t = ref.tr_com[0]
# print " ++", t, REFERENTIELS.keys()
if t in REFERENTIELS.keys():
# print " ",REFERENTIELS[t]._dicCompetences
# print " ++", self._dicCompetences
self._dicCompetences.update(REFERENTIELS[t]._dicCompetences)
self._dicIndicateurs.update(REFERENTIELS[t]._dicIndicateurs)
self._dicIndicateurs_simple.update(REFERENTIELS[t]._dicIndicateurs_simple)
self._dicGrpIndicateur = {}
for p in self.parties.keys():
self._dicGrpIndicateur[p] = []
for comp, dic in self._dicIndicateurs.items():
for indic in getListeIndic(dic[1]):
for part in indic.poids.keys():
if part in self._dicGrpIndicateur.keys():
self._dicGrpIndicateur[part].append(comp)
for p in self.parties.keys():
self._dicGrpIndicateur[p] = list(set(self._dicGrpIndicateur[p]))
# if ref.tr_com != []:
# self.grilles.update(REFERENTIELS[ref.tr_com[0]].projets[self.code].grilles)
#################################################################################################################################
#
# Indicateur
#
#################################################################################################################################
class Indicateur(XMLelem):
def __init__(self, intitule = u"", poids = {}, ligne = {}, revue = {}):
self._codeXML = "Indicateur"
self.poids = poids
self.ligne = ligne
self.intitule = intitule
self.revue = revue
def estProjet(self):
return self.getType() != 'E'
# return self.poids[1] != 0 or self.poids[2] != 0
def getType(self):
""" E : écrit
C : conduite
S : soutenance
...
"""
for t, p in self.poids.items():
if p !=0:
return t
# if self.poids[0] != 0:
# return "E"
# elif self.poids[1] != 0:
# return "C"
# elif self.poids[2] != 0:
# return "S"
def getRevue(self):
return 'R'+str(self.revue[self.getType()])
#################################################################################################################################
#
# Compétence
#
#################################################################################################################################
class Competence(XMLelem):
def __init__(self, intitule = u"", indicateurs = []):
self._codeXML = "Competence"
self.intitule = intitule
self.indicateurs = indicateurs
#########################################################################################
def getEnseignementLabel(label):
""" Renvoie le code et la famille d'enseignement
à partir de son label
"""
for r in REFERENTIELS.values():
if r.Enseignement[0] == label:
return r.Code, r.Famille
##########################################################################################
def enregistrer(code, nomFichier):
fichier = file(nomFichier, 'w')
root = REFERENTIELS[code].getBranche()
constantes.indent(root)
ET.ElementTree(root).write(fichier)
fichier.close()
#enregistrer("SSI", "testSauvRef.xml")
##########################################################################################
def ouvrir(nomFichier):
fichier = open(nomFichier,'r')
root = ET.parse(fichier).getroot()
ref = Referentiel()
ref.initParam()
err = ref.setBranche(root)[1]
ref.corrigerVersion(err)
ref.postTraiter()
ref.completer()
fichier.close()
return ref
# print REFERENTIELS["SSI"] == ref
#ouvrir("testSauvRef.xml")
##########################################################################################
SAUVEGARDE = False
#######################################################################################
#import sys
#FILE_ENCODING = sys.getfilesystemencoding()
#DEFAUT_ENCODING = "utf-8"
#def toFileEncoding(path):
# try:
# path = path.decode(DEFAUT_ENCODING)
# return path.encode(FILE_ENCODING)
# except:
# return path
##########################################################################################
def chargerReferentiels():
global REFERENTIELS, ARBRE_REF
#
# Chargement des fichiers .xls
#
# print path_ref
liste = os.listdir(DOSSIER_REF)
for fich_ref in liste:#["Ref_STS-SN_EC-1.xls", "Ref_SSI.xls"]:#, "Ref_STI2D-EE.xls", "Ref_STI2D-ETT.xls"]:#["Ref_6CLG.xls"]:#
if os.path.splitext(fich_ref)[1] == ".xls":
# print
# print fich_ref
ref = Referentiel(os.path.join(DOSSIER_REF, fich_ref))
ref.postTraiter()
REFERENTIELS[ref.Code] = ref
for r in REFERENTIELS.values():
# print r
r.completer()
# if r.Code == "ITEC":
# print r
#
# Vérification intégrité en comparant avec le fichier .xml (s'il existe)
#
if not SAUVEGARDE:
dicOk = {}
for k, r in REFERENTIELS.items():
f = os.path.join(DOSSIER_REF, constantes.toFileEncoding(r"Ref_"+r.Enseignement[0]+r".xml"))
dicOk[k] = False
if os.path.exists(f):
ref = ouvrir(f)
# for p in ref.projets.values():
# print p.grilles
if ref == r:
dicOk[k] = True
else:
enregistrer(r.Code, f)
dicOk[k] = None
print u"Intégrité référentiels :", dicOk
#
# Construction de la structure en arbre
#
# Types d'enseignement qui n'ont pas de tronc commun (parents)
for k, r in REFERENTIELS.items():
if r.tr_com == []:
ARBRE_REF[k] = []
# Types d'enseignement qui ont un tronc commun (enfants)
d = []
for k, r in REFERENTIELS.items():
if r.tr_com != []:
ARBRE_REF[r.tr_com[0]].append(k)
d.append(r.tr_com[0])
for k, r in REFERENTIELS.items():
if "_"+r.Famille in ARBRE_REF.keys():
ARBRE_REF["_"+r.Famille].append(k)
else:
ARBRE_REF["_"+r.Famille] = [k]
for k, r in ARBRE_REF.items():
if k[0] == "_":
if len(r) == 1:
del ARBRE_REF[k]
for k, r in ARBRE_REF.items():
if k[0] == "_":
for kk in ARBRE_REF.keys():
if kk in r:
if ARBRE_REF[kk] == []:
del ARBRE_REF[kk]
else:
del ARBRE_REF[k]
break
r.sort()
r.reverse()
# print ARBRE_REF
chargerReferentiels()
def sauvegarderOriginaux():
global SAUVEGARDE
SAUVEGARDE = True
for r in REFERENTIELS.values():
f = os.path.join(DOSSIER_REF, "Ref_"+r.Enseignement[0]+".xml")
enregistrer(r.Code, f)
#
# Ligne à décommenter pour faire une sauvegarde XML des référentiels "originaux"
# Commenter en parallèle la partie "Vérification" de chargerReferentiels()
#
#sauvegarderOriginaux()
| gpl-3.0 | -7,552,781,065,959,422,000 | 36.609036 | 138 | 0.403817 | false |
stfc/cvmfs-stratum-uploader | uploader/packages/tests.py | 1 | 1390 | import os
import uuid
import shutil
from django.utils.unittest import TestCase
from uploader.projects.models import FileSystem, Project
from uploader.packages.models import Package
from uploader import settings
class PackageTestCase(TestCase):
def setUp(self):
uid = str(uuid.uuid4())[0:8]
self.fs = FileSystem.objects.create(mount_point=('/tmp/cvmfs-%s' % uid))
self.project = Project(file_system=self.fs, directory='project')
os.makedirs(self.fs.mount_point)
settings.MEDIA_ROOT = '/tmp/media-%s' % uid
os.makedirs(settings.MEDIA_ROOT)
self.filepath = '%s/package.tar.gz' % settings.MEDIA_ROOT
open(self.filepath, 'w').close()
def test_new_after_init(self):
package = Package(project=self.project, file=self.filepath)
self.assertEqual(package.status, Package.Status.new)
def test_filepath(self):
pass
# file = open(os.path.join(self.project.full_path(), 'filename.tar.gz'), 'w')
# file.close()
#
# p = Package(project=self.project, file=file)
#
# self.assertEqual(p.filepath(), os.path.join())
def test_filename(self):
pass
def test_get_file_list(self):
pass
def test_deploy(self):
pass
def test_remove(self):
pass
def tearDown(self):
shutil.rmtree(settings.MEDIA_ROOT) | apache-2.0 | -1,488,561,087,601,985,300 | 27.387755 | 85 | 0.63741 | false |
Daksh/sugar-toolkit-gtk3 | src/sugar3/dispatch/dispatcher.py | 10 | 6905 | import weakref
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
from sugar3.dispatch import saferef
WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref)
def _make_id(target):
if hasattr(target, 'im_func'):
return (id(target.im_self), id(target.im_func))
return id(target)
class Signal(object):
"""Base class for all signals
Internal attributes:
receivers -- { receriverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None):
"""providing_args -- A list of the arguments
this signal can pass along in
a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""Connect receiver to sender for signal
receiver -- a function or an instance method which is to
receive signals. Receivers must be
hashable objects.
if weak is True, then receiver must be weak-referencable
(more precisely saferef.safeRef() must be able to create
a reference to the receiver).
Receivers must be able to accept keyword arguments.
If receivers have a dispatch_uid attribute, the receiver will
not be added if another receiver already exists with that
dispatch_uid.
sender -- the sender to which the receiver should respond
Must either be of type Signal, or None to receive events
from any sender.
weak -- whether to use weak references to the receiver
By default, the module will attempt to use weak
references to the receiver objects. If this parameter
is false, then strong references will be used.
dispatch_uid -- an identifier used to uniquely identify a particular
instance of a receiver. This will usually be a string, though it
may be anything hashable.
returns None
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
receiver = saferef.safeRef(
receiver, onDelete=self._remove_receiver)
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
def disconnect(self, receiver=None, sender=None, weak=True,
dispatch_uid=None):
"""Disconnect receiver from sender for signal
receiver -- the registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender -- the registered sender to disconnect
weak -- the weakref state to disconnect
dispatch_uid -- the unique identifier of the receiver to disconnect
disconnect reverses the process of connect.
If weak references are used, disconnect need not be called.
The receiver will be remove from dispatch automatically.
returns None
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
for idx, (r_key, _) in enumerate(self.receivers):
if r_key == lookup_key:
del self.receivers[idx]
def send(self, sender, **named):
"""Send signal from sender to all connected receivers.
sender -- the sender of the signal
Either a specific object or None.
named -- named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
If any receiver raises an error, the error propagates back
through send, terminating the dispatch loop, so it is quite
possible to not have all receivers called if a raises an
error.
"""
responses = []
if not self.receivers:
return responses
for receiver in self._live_receivers(_make_id(sender)):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""Send signal from sender to all connected receivers catching errors
sender -- the sender of the signal
Can be any python object (normally one registered with
a connect if you actually want something to occur).
named -- named arguments which will be passed to receivers.
These arguments must be a subset of the argument names
defined in providing_args.
Return a list of tuple pairs [(receiver, response), ... ],
may raise DispatcherKeyError
if any receiver raises an error (specifically any subclass of
Exception),
the error instance is returned as the result for that receiver.
"""
responses = []
if not self.receivers:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(_make_id(sender)):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception, err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _live_receivers(self, senderkey):
"""Filter sequence of receivers to get resolved, live receivers
This checks for weak references
and resolves them, then returning only live
receivers.
"""
none_senderkey = _make_id(None)
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == none_senderkey or r_senderkey == senderkey:
if isinstance(receiver, WEAKREF_TYPES):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
yield receiver
else:
yield receiver
def _remove_receiver(self, receiver):
"""Remove dead receivers from connections."""
to_remove = []
for key, connected_receiver in self.receivers:
if connected_receiver == receiver:
to_remove.append(key)
for key in to_remove:
for idx, (r_key, _) in enumerate(self.receivers):
if r_key == key:
del self.receivers[idx]
| lgpl-2.1 | -2,754,231,332,817,313,000 | 34.050761 | 77 | 0.597683 | false |
andyh616/mne-python | mne/io/compensator.py | 21 | 5199 | import numpy as np
from .constants import FIFF
def get_current_comp(info):
"""Get the current compensation in effect in the data
"""
comp = None
first_comp = -1
for k, chan in enumerate(info['chs']):
if chan['kind'] == FIFF.FIFFV_MEG_CH:
comp = int(chan['coil_type']) >> 16
if first_comp < 0:
first_comp = comp
elif comp != first_comp:
raise ValueError('Compensation is not set equally on '
'all MEG channels')
return comp
def set_current_comp(info, comp):
"""Set the current compensation in effect in the data
"""
comp_now = get_current_comp(info)
for k, chan in enumerate(info['chs']):
if chan['kind'] == FIFF.FIFFV_MEG_CH:
rem = chan['coil_type'] - (comp_now << 16)
chan['coil_type'] = int(rem + (comp << 16))
def _make_compensator(info, kind):
"""Auxiliary function for make_compensator
"""
for k in range(len(info['comps'])):
if info['comps'][k]['kind'] == kind:
this_data = info['comps'][k]['data']
# Create the preselector
presel = np.zeros((this_data['ncol'], info['nchan']))
for col, col_name in enumerate(this_data['col_names']):
ind = [k for k, ch in enumerate(info['ch_names'])
if ch == col_name]
if len(ind) == 0:
raise ValueError('Channel %s is not available in '
'data' % col_name)
elif len(ind) > 1:
raise ValueError('Ambiguous channel %s' % col_name)
presel[col, ind[0]] = 1.0
# Create the postselector
postsel = np.zeros((info['nchan'], this_data['nrow']))
for c, ch_name in enumerate(info['ch_names']):
ind = [k for k, ch in enumerate(this_data['row_names'])
if ch == ch_name]
if len(ind) > 1:
raise ValueError('Ambiguous channel %s' % ch_name)
elif len(ind) == 1:
postsel[c, ind[0]] = 1.0
this_comp = np.dot(postsel, np.dot(this_data['data'], presel))
return this_comp
raise ValueError('Desired compensation matrix (kind = %d) not'
' found' % kind)
def make_compensator(info, from_, to, exclude_comp_chs=False):
"""Returns compensation matrix eg. for CTF system.
Create a compensation matrix to bring the data from one compensation
state to another.
Parameters
----------
info : dict
The measurement info.
from_ : int
Compensation in the input data.
to : int
Desired compensation in the output.
exclude_comp_chs : bool
Exclude compensation channels from the output.
Returns
-------
comp : array | None.
The compensation matrix. Might be None if no compensation
is needed (from == to).
"""
if from_ == to:
return None
if from_ == 0:
C1 = np.zeros((info['nchan'], info['nchan']))
else:
C1 = _make_compensator(info, from_)
if to == 0:
C2 = np.zeros((info['nchan'], info['nchan']))
else:
C2 = _make_compensator(info, to)
# s_orig = s_from + C1*s_from = (I + C1)*s_from
# s_to = s_orig - C2*s_orig = (I - C2)*s_orig
# s_to = (I - C2)*(I + C1)*s_from = (I + C1 - C2 - C2*C1)*s_from
comp = np.eye(info['nchan']) + C1 - C2 - np.dot(C2, C1)
if exclude_comp_chs:
pick = [k for k, c in enumerate(info['chs'])
if c['kind'] != FIFF.FIFFV_REF_MEG_CH]
if len(pick) == 0:
raise ValueError('Nothing remains after excluding the '
'compensation channels')
comp = comp[pick, :]
return comp
# @verbose
# def compensate_to(data, to, verbose=None):
# """
# %
# % [newdata] = mne_compensate_to(data,to)
# %
# % Apply compensation to the data as desired
# %
# """
#
# newdata = data.copy()
# now = get_current_comp(newdata['info'])
#
# # Are we there already?
# if now == to:
# logger.info('Data are already compensated as desired')
#
# # Make the compensator and apply it to all data sets
# comp = make_compensator(newdata['info'], now, to)
# for k in range(len(newdata['evoked'])):
# newdata['evoked'][k]['epochs'] = np.dot(comp,
# newdata['evoked'][k]['epochs'])
#
# # Update the compensation info in the channel descriptors
# newdata['info']['chs'] = set_current_comp(newdata['info']['chs'], to)
# return newdata
# def set_current_comp(chs, value):
# """Set the current compensation value in the channel info structures
# """
# new_chs = chs
#
# lower_half = int('FFFF', 16) # hex2dec('FFFF')
# for k in range(len(chs)):
# if chs[k]['kind'] == FIFF.FIFFV_MEG_CH:
# coil_type = float(chs[k]['coil_type']) & lower_half
# new_chs[k]['coil_type'] = int(coil_type | (value << 16))
#
# return new_chs
| bsd-3-clause | -3,155,455,671,426,067,500 | 31.49375 | 79 | 0.523562 | false |
adereis/avocado | avocado/utils/process.py | 1 | 41941 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <[email protected]>
"""
Functions dedicated to find and run external commands.
"""
import errno
import fnmatch
import logging
import os
import re
import shlex
import shutil
import signal
import stat
import threading
import time
try:
import subprocess32 as subprocess
SUBPROCESS32_SUPPORT = True
except ImportError:
import subprocess
SUBPROCESS32_SUPPORT = False
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from . import gdb
from . import runtime
from . import path
log = logging.getLogger('avocado.test')
stdout_log = logging.getLogger('avocado.test.stdout')
stderr_log = logging.getLogger('avocado.test.stderr')
#: The active wrapper utility script.
CURRENT_WRAPPER = None
#: The global wrapper.
#: If set, run every process under this wrapper.
WRAP_PROCESS = None
#: Set wrapper per program names.
#: A list of wrappers and program names.
#: Format: [ ('/path/to/wrapper.sh', 'progname'), ... ]
WRAP_PROCESS_NAMES_EXPR = []
#: Exception to be raised when users of this API need to know that the
#: execution of a given process resulted in undefined behavior. One
#: concrete example when a user, in an interactive session, let the
#: inferior process exit before before avocado resumed the debugger
#: session. Since the information is unknown, and the behavior is
#: undefined, this situation will be flagged by an exception.
UNDEFINED_BEHAVIOR_EXCEPTION = None
# variable=value bash assignment
_RE_BASH_SET_VARIABLE = re.compile(r"[a-zA-Z]\w*=.*")
class CmdError(Exception):
def __init__(self, command=None, result=None, additional_text=None):
self.command = command
self.result = result
self.additional_text = additional_text
def __str__(self):
if self.result is not None:
if self.result.interrupted:
msg = "Command '%s' interrupted by %s"
msg %= (self.command, self.result.interrupted)
elif self.result.exit_status is None:
msg = "Command '%s' failed and is not responding to signals"
msg %= self.command
else:
msg = "Command '%s' failed (rc=%d)"
msg %= (self.command, self.result.exit_status)
if self.additional_text:
msg += ", " + self.additional_text
return msg
else:
return "CmdError"
def can_sudo():
"""
:return: True when sudo is available (or is root)
"""
if os.getuid() == 0:
return True
elif system_output("id -u", ignore_status=True, sudo=True).strip() == "0":
return True
else:
return False
def pid_exists(pid):
"""
Return True if a given PID exists.
:param pid: Process ID number.
"""
try:
os.kill(pid, 0)
except OSError as detail:
if detail.errno == errno.ESRCH:
return False
return True
def safe_kill(pid, signal):
"""
Attempt to send a signal to a given process that may or may not exist.
:param signal: Signal number.
"""
try:
os.kill(pid, signal)
return True
except Exception:
return False
def kill_process_tree(pid, sig=signal.SIGKILL, send_sigcont=True):
"""
Signal a process and all of its children.
If the process does not exist -- return.
:param pid: The pid of the process to signal.
:param sig: The signal to send to the processes.
"""
if not safe_kill(pid, signal.SIGSTOP):
return
children = system_output("ps --ppid=%d -o pid=" % pid, ignore_status=True,
verbose=False).split()
for child in children:
kill_process_tree(int(child), sig)
safe_kill(pid, sig)
if send_sigcont:
safe_kill(pid, signal.SIGCONT)
def kill_process_by_pattern(pattern):
"""
Send SIGTERM signal to a process with matched pattern.
:param pattern: normally only matched against the process name
"""
cmd = "pkill -f %s" % pattern
result = run(cmd, ignore_status=True)
if result.exit_status:
logging.error("Failed to run '%s': %s", cmd, result)
else:
logging.info("Succeed to run '%s'.", cmd)
def process_in_ptree_is_defunct(ppid):
"""
Verify if any processes deriving from PPID are in the defunct state.
Attempt to verify if parent process and any children from PPID is defunct
(zombie) or not.
:param ppid: The parent PID of the process to verify.
"""
defunct = False
try:
pids = get_children_pids(ppid)
except CmdError: # Process doesn't exist
return True
for pid in pids:
cmd = "ps --no-headers -o cmd %d" % int(pid)
proc_name = system_output(cmd, ignore_status=True, verbose=False)
if '<defunct>' in proc_name:
defunct = True
break
return defunct
def get_children_pids(ppid):
"""
Get all PIDs of children/threads of parent ppid
param ppid: parent PID
return: list of PIDs of all children/threads of ppid
"""
return system_output("ps -L --ppid=%d -o lwp" % ppid, verbose=False).split('\n')[1:]
def binary_from_shell_cmd(cmd):
"""
Tries to find the first binary path from a simple shell-like command.
:note: It's a naive implementation, but for commands like:
`VAR=VAL binary -args || true` gives the right result (binary)
:param cmd: simple shell-like binary
:return: first found binary from the cmd
"""
try:
cmds = shlex.split(cmd)
except ValueError:
log.warning("binary_from_shell_cmd: Shlex split of %s failed, using "
"using simple split.", cmd)
cmds = cmd.split(" ")
for item in cmds:
if not _RE_BASH_SET_VARIABLE.match(item):
return item
raise ValueError("Unable to parse first binary from '%s'" % cmd)
class CmdResult(object):
"""
Command execution result.
:param command: String containing the command line itself
:param exit_status: Integer exit code of the process
:param stdout: String containing stdout of the process
:param stderr: String containing stderr of the process
:param duration: Elapsed wall clock time running the process
:param pid: ID of the process
"""
def __init__(self, command="", stdout="", stderr="",
exit_status=None, duration=0, pid=None):
self.command = command
self.exit_status = exit_status
self.stdout = stdout
self.stderr = stderr
self.duration = duration
self.interrupted = False
self.pid = pid
def __repr__(self):
cmd_rep = ("Command: %s\n"
"Exit status: %s\n"
"Duration: %s\n"
"Stdout:\n%s\n"
"Stderr:\n%s\n"
"PID:\n%s\n" % (self.command, self.exit_status,
self.duration, self.stdout, self.stderr,
self.pid))
if self.interrupted:
cmd_rep += "Command interrupted by %s\n" % self.interrupted
return cmd_rep
class SubProcess(object):
"""
Run a subprocess in the background, collecting stdout/stderr streams.
"""
def __init__(self, cmd, verbose=True, allow_output_check='all',
shell=False, env=None, sudo=False):
"""
Creates the subprocess object, stdout/err, reader threads and locks.
:param cmd: Command line to run.
:type cmd: str
:param verbose: Whether to log the command run and stdout/stderr.
:type verbose: bool
:param allow_output_check: Whether to log the command stream outputs
(stdout and stderr) in the test stream
files. Valid values: 'stdout', for
allowing only standard output, 'stderr',
to allow only standard error, 'all',
to allow both standard output and error
(default), and 'none', to allow
none to be recorded.
:type allow_output_check: str
:param shell: Whether to run the subprocess in a subshell.
:type shell: bool
:param env: Use extra environment variables.
:type env: dict
:param sudo: Whether the command requires admin privileges to run,
so that sudo will be prepended to the command.
The assumption here is that the user running the command
has a sudo configuration such that a password won't be
prompted. If that's not the case, the command will
straight out fail.
"""
# Now assemble the final command considering the need for sudo
self.cmd = self._prepend_sudo(cmd, sudo, shell)
self.verbose = verbose
self.allow_output_check = allow_output_check
self.result = CmdResult(self.cmd)
self.shell = shell
if env:
self.env = os.environ.copy()
self.env.update(env)
else:
self.env = None
self._popen = None
def __repr__(self):
if self._popen is None:
rc = '(not started)'
elif self.result.exit_status is None:
rc = '(running)'
else:
rc = self.result.exit_status
return '%s(cmd=%r, rc=%r)' % (self.__class__.__name__, self.cmd, rc)
def __str__(self):
if self._popen is None:
rc = '(not started)'
elif self.result.exit_status is None:
rc = '(running)'
else:
rc = '(finished with exit status=%d)' % self.result.exit_status
return '%s %s' % (self.cmd, rc)
@staticmethod
def _prepend_sudo(cmd, sudo, shell):
if sudo and os.getuid() != 0:
try:
sudo_cmd = '%s -n' % path.find_command('sudo')
except path.CmdNotFoundError as details:
log.error(details)
log.error('Parameter sudo=True provided, but sudo was '
'not found. Please consider adding sudo to '
'your OS image')
return cmd
if shell:
if ' -s' not in sudo_cmd:
sudo_cmd = '%s -s' % sudo_cmd
cmd = '%s %s' % (sudo_cmd, cmd)
return cmd
def _init_subprocess(self):
if self._popen is None:
if self.verbose:
log.info("Running '%s'", self.cmd)
if self.shell is False:
cmd = shlex.split(self.cmd)
else:
cmd = self.cmd
try:
self._popen = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=self.shell,
env=self.env)
except OSError as details:
if details.errno == 2:
exc = OSError("File '%s' not found" % self.cmd.split()[0])
exc.errno = 2
raise exc
else:
raise
self.start_time = time.time()
self.stdout_file = StringIO()
self.stderr_file = StringIO()
self.stdout_lock = threading.Lock()
self.stdout_thread = threading.Thread(target=self._fd_drainer,
name="%s-stdout" % self.cmd,
args=[self._popen.stdout])
self.stdout_thread.daemon = True
self.stderr_lock = threading.Lock()
self.stderr_thread = threading.Thread(target=self._fd_drainer,
name="%s-stderr" % self.cmd,
args=[self._popen.stderr])
self.stderr_thread.daemon = True
self.stdout_thread.start()
self.stderr_thread.start()
def signal_handler(signum, frame):
self.result.interrupted = "signal/ctrl+c"
self.wait()
try:
signal.signal(signal.SIGINT, signal_handler)
except ValueError:
if self.verbose:
log.info("Command %s running on a thread", self.cmd)
def _fd_drainer(self, input_pipe):
"""
Read from input_pipe, storing and logging output.
:param input_pipe: File like object to the stream.
"""
stream_prefix = "%s"
if input_pipe == self._popen.stdout:
prefix = '[stdout] %s'
if self.allow_output_check in ['none', 'stderr']:
stream_logger = None
else:
stream_logger = stdout_log
output_file = self.stdout_file
lock = self.stdout_lock
elif input_pipe == self._popen.stderr:
prefix = '[stderr] %s'
if self.allow_output_check in ['none', 'stdout']:
stream_logger = None
else:
stream_logger = stderr_log
output_file = self.stderr_file
lock = self.stderr_lock
fileno = input_pipe.fileno()
bfr = ''
while True:
tmp = os.read(fileno, 1024)
if tmp == '':
if self.verbose and bfr:
for line in bfr.splitlines():
log.debug(prefix, line)
if stream_logger is not None:
stream_logger.debug(stream_prefix, line)
break
lock.acquire()
try:
output_file.write(tmp)
if self.verbose:
bfr += tmp
if tmp.endswith('\n'):
for line in bfr.splitlines():
log.debug(prefix, line)
if stream_logger is not None:
stream_logger.debug(stream_prefix, line)
bfr = ''
finally:
lock.release()
def _fill_results(self, rc):
self._init_subprocess()
self.result.exit_status = rc
if self.result.duration == 0:
self.result.duration = time.time() - self.start_time
if self.verbose:
log.info("Command '%s' finished with %s after %ss", self.cmd, rc,
self.result.duration)
self.result.pid = self._popen.pid
self._fill_streams()
def _fill_streams(self):
"""
Close subprocess stdout and stderr, and put values into result obj.
"""
# Cleaning up threads
self.stdout_thread.join()
self.stderr_thread.join()
# Clean subprocess pipes and populate stdout/err
self._popen.stdout.close()
self._popen.stderr.close()
self.result.stdout = self.get_stdout()
self.result.stderr = self.get_stderr()
def start(self):
"""
Start running the subprocess.
This method is particularly useful for background processes, since
you can start the subprocess and not block your test flow.
:return: Subprocess PID.
:rtype: int
"""
self._init_subprocess()
return self._popen.pid
def get_stdout(self):
"""
Get the full stdout of the subprocess so far.
:return: Standard output of the process.
:rtype: str
"""
self._init_subprocess()
self.stdout_lock.acquire()
stdout = self.stdout_file.getvalue()
self.stdout_lock.release()
return stdout
def get_stderr(self):
"""
Get the full stderr of the subprocess so far.
:return: Standard error of the process.
:rtype: str
"""
self._init_subprocess()
self.stderr_lock.acquire()
stderr = self.stderr_file.getvalue()
self.stderr_lock.release()
return stderr
def terminate(self):
"""
Send a :attr:`signal.SIGTERM` to the process.
"""
self._init_subprocess()
self.send_signal(signal.SIGTERM)
def kill(self):
"""
Send a :attr:`signal.SIGKILL` to the process.
"""
self._init_subprocess()
self.send_signal(signal.SIGKILL)
def send_signal(self, sig):
"""
Send the specified signal to the process.
:param sig: Signal to send.
"""
self._init_subprocess()
self._popen.send_signal(sig)
def poll(self):
"""
Call the subprocess poll() method, fill results if rc is not None.
"""
self._init_subprocess()
rc = self._popen.poll()
if rc is not None:
self._fill_results(rc)
return rc
def wait(self):
"""
Call the subprocess poll() method, fill results if rc is not None.
"""
self._init_subprocess()
rc = self._popen.wait()
if rc is not None:
self._fill_results(rc)
return rc
def stop(self):
"""
Stop background subprocess.
Call this method to terminate the background subprocess and
wait for it results.
"""
self._init_subprocess()
if self.result.exit_status is None:
self.terminate()
return self.wait()
def get_pid(self):
"""
Reports PID of this process
"""
self._init_subprocess()
return self._popen.pid
def run(self, timeout=None, sig=signal.SIGTERM):
"""
Start a process and wait for it to end, returning the result attr.
If the process was already started using .start(), this will simply
wait for it to end.
:param timeout: Time (seconds) we'll wait until the process is
finished. If it's not, we'll try to terminate it
and get a status.
:type timeout: float
:param sig: Signal to send to the process in case it did not end after
the specified timeout.
:type sig: int
:returns: The command result object.
:rtype: A :class:`CmdResult` instance.
"""
def timeout_handler():
self.send_signal(sig)
self.result.interrupted = "timeout after %ss" % timeout
self._init_subprocess()
if timeout is None:
self.wait()
elif timeout > 0.0:
timer = threading.Timer(timeout, timeout_handler)
try:
timer.start()
self.wait()
finally:
timer.cancel()
if self.result.exit_status is None:
stop_time = time.time() + 1
while time.time() < stop_time:
self.poll()
if self.result.exit_status is not None:
break
else:
self.kill()
self.poll()
# If all this work fails, we're dealing with a zombie process.
e_msg = 'Zombie Process %s' % self._popen.pid
assert self.result.exit_status is not None, e_msg
return self.result
class WrapSubProcess(SubProcess):
"""
Wrap subprocess inside an utility program.
"""
def __init__(self, cmd, verbose=True, allow_output_check='all',
shell=False, env=None, wrapper=None, sudo=False):
if wrapper is None and CURRENT_WRAPPER is not None:
wrapper = CURRENT_WRAPPER
self.wrapper = wrapper
if self.wrapper:
if not os.path.exists(self.wrapper):
raise IOError("No such wrapper: '%s'" % self.wrapper)
cmd = wrapper + ' ' + cmd
super(WrapSubProcess, self).__init__(cmd, verbose, allow_output_check,
shell, env, sudo)
class GDBSubProcess(object):
"""
Runs a subprocess inside the GNU Debugger
"""
def __init__(self, cmd, verbose=True, allow_output_check='all',
shell=False, env=None, sudo=False):
"""
Creates the subprocess object, stdout/err, reader threads and locks.
:param cmd: Command line to run.
:type cmd: str
:param verbose: Whether to log the command run and stdout/stderr.
Currently unused and provided for compatibility only.
:type verbose: bool
:param allow_output_check: Whether to log the command stream outputs
(stdout and stderr) in the test stream
files. Valid values: 'stdout', for
allowing only standard output, 'stderr',
to allow only standard error, 'all',
to allow both standard output and error
(default), and 'none', to allow
none to be recorded. Currently unused and
provided for compatibility only.
:type allow_output_check: str
:param sudo: This param will be ignored in this implementation,
since the GDB wrapping code does not have support to run
commands under sudo just yet.
"""
self.cmd = cmd
self.args = shlex.split(cmd)
self.binary = self.args[0]
self.binary_path = os.path.abspath(self.cmd)
self.result = CmdResult(cmd)
self.gdb_server = gdb.GDBServer(gdb.GDBSERVER_PATH)
self.gdb = gdb.GDB(gdb.GDB_PATH)
self.gdb.connect(self.gdb_server.port)
self.gdb.set_file(self.binary)
def _get_breakpoints(self):
breakpoints = []
for expr in gdb.GDB_RUN_BINARY_NAMES_EXPR:
expr_binary_name, breakpoint = split_gdb_expr(expr)
binary_name = os.path.basename(self.binary)
if expr_binary_name == binary_name:
breakpoints.append(breakpoint)
if not breakpoints:
breakpoints.append(gdb.GDB.DEFAULT_BREAK)
return breakpoints
def create_and_wait_on_resume_fifo(self, path):
"""
Creates a FIFO file and waits until it's written to
:param path: the path that the file will be created
:type path: str
:returns: first character that was written to the fifo
:rtype: str
"""
os.mkfifo(path)
f = open(path, 'r')
c = f.read(1)
f.close()
os.unlink(path)
return c
def generate_gdb_connect_cmds(self):
current_test = runtime.CURRENT_TEST
if current_test is not None:
binary_name = os.path.basename(self.binary)
script_name = '%s.gdb.connect_commands' % binary_name
path = os.path.join(current_test.outputdir, script_name)
cmds = open(path, 'w')
cmds.write('file %s\n' % os.path.abspath(self.binary))
cmds.write('target extended-remote :%s\n' % self.gdb_server.port)
cmds.close()
return path
def generate_gdb_connect_sh(self):
cmds = self.generate_gdb_connect_cmds()
if not cmds:
return
current_test = runtime.CURRENT_TEST
if current_test is not None:
binary_name = os.path.basename(self.binary)
fifo_name = "%s.gdb.cont.fifo" % os.path.basename(binary_name)
fifo_path = os.path.join(current_test.outputdir, fifo_name)
script_name = '%s.gdb.sh' % binary_name
script_path = os.path.join(current_test.outputdir, script_name)
script = open(script_path, 'w')
script.write("#!/bin/sh\n")
script.write("%s -x %s\n" % (gdb.GDB_PATH, cmds))
script.write("echo -n 'C' > %s\n" % fifo_path)
script.close()
os.chmod(script_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
return (script_path, fifo_path)
def generate_core(self):
core_name = "%s.core" % os.path.basename(self.binary)
core_path = os.path.join(runtime.CURRENT_TEST.outputdir, core_name)
gcore_cmd = 'gcore %s' % core_path
r = self.gdb.cli_cmd(gcore_cmd)
if not r.result.class_ == 'done':
raise gdb.UnexpectedResponseError
# also copy the binary as it's needed with the core
shutil.copy(self.binary, runtime.CURRENT_TEST.outputdir)
return core_path
def handle_break_hit(self, response):
self.gdb.disconnect()
script_path, fifo_path = self.generate_gdb_connect_sh()
msg = ("\n\nTEST PAUSED because of debugger breakpoint. "
"To DEBUG your application run:\n%s\n\n"
"NOTE: please use *disconnect* command in gdb before exiting, "
"or else the debugged process will be KILLED\n" % script_path)
runtime.CURRENT_TEST.paused = True
runtime.CURRENT_TEST.paused_msg = msg
runtime.CURRENT_TEST.report_state()
runtime.CURRENT_TEST.paused_msg = ''
ret = self.create_and_wait_on_resume_fifo(fifo_path)
runtime.CURRENT_TEST.paused_msg = ("\rResuming ...")
runtime.CURRENT_TEST.report_state()
runtime.CURRENT_TEST.paused_msg = ''
return ret
def handle_fatal_signal(self, response):
script_path, fifo_path = self.generate_gdb_connect_sh()
msg = ("\n\nTEST PAUSED because inferior process received a FATAL SIGNAL. "
"To DEBUG your application run:\n%s\n\n" % script_path)
if gdb.GDB_ENABLE_CORE:
core = self.generate_core()
msg += ("\nAs requested, a core dump has been generated "
"automatically at the following location:\n%s\n") % core
self.gdb.disconnect()
runtime.CURRENT_TEST.paused = True
runtime.CURRENT_TEST.paused_msg = msg
runtime.CURRENT_TEST.report_state()
runtime.CURRENT_TEST.paused_msg = ''
ret = self.create_and_wait_on_resume_fifo(fifo_path)
runtime.CURRENT_TEST.paused_msg = ("\rResuming ...")
runtime.CURRENT_TEST.report_state()
runtime.CURRENT_TEST.paused_msg = ''
return ret
def _is_thread_stopped(self):
result = False
thread_info_result = self.gdb.cmd("-thread-info")
thread_info_mi_result = thread_info_result.result
if hasattr(thread_info_mi_result, 'result'):
thread_info = thread_info_mi_result.result
current_thread = thread_info.current_thread_id
for thread in thread_info.threads:
if current_thread == thread.id and thread.state == "stopped":
result = True
break
return result
@staticmethod
def _get_exit_status(parsed_msg):
"""
Returns the exit code converted to an integer
"""
code = parsed_msg.result.exit_code
if (code.startswith('0x') and len(code) > 2):
return int(code[2:], 16)
elif (code.startswith('0') and len(code) > 1):
return int(code[1:], 8)
else:
return int(code)
def wait_for_exit(self):
"""
Waits until debugger receives a message about the binary exit
"""
result = False
messages = []
while True:
try:
msgs = self.gdb.read_until_break()
messages += msgs
except Exception:
pass
try:
msg = messages.pop(0)
parsed_msg = gdb.parse_mi(msg)
if gdb.is_exit(parsed_msg):
self.result.exit_status = self._get_exit_status(parsed_msg)
result = True
break
elif gdb.is_break_hit(parsed_msg):
# waits on fifo read() until end of debug session is notified
r = self.handle_break_hit(parsed_msg)
if r == 'C':
self.gdb.connect(self.gdb_server.port)
if self._is_thread_stopped():
r = self.gdb.cli_cmd("continue")
else:
log.warn('Binary "%s" terminated inside the '
'debugger before avocado was resumed. '
'Because important information about the '
'process was lost the results is '
'undefined. The test is going to be '
'skipped. Please let avocado finish the '
'the execution of your binary to have '
'dependable results.', self.binary)
# pylint: disable=E0702
if UNDEFINED_BEHAVIOR_EXCEPTION is not None:
raise UNDEFINED_BEHAVIOR_EXCEPTION
elif gdb.is_fatal_signal(parsed_msg):
# waits on fifo read() until end of debug session is notified
r = self.handle_fatal_signal(parsed_msg)
log.warn('Because "%s" received a fatal signal, this test '
'is going to be skipped.', self.binary)
# pylint: disable=E0702
if UNDEFINED_BEHAVIOR_EXCEPTION is not None:
raise UNDEFINED_BEHAVIOR_EXCEPTION
except IndexError:
continue
return result
def _run_pre_commands(self):
"""
Run commands if user passed a commands file with --gdb-prerun-commands
"""
binary_name = os.path.basename(self.binary)
# The commands file can be specific to a given binary or universal,
# start checking for specific ones first
prerun_commands_path = gdb.GDB_PRERUN_COMMANDS.get(
binary_name,
gdb.GDB_PRERUN_COMMANDS.get('', None))
if prerun_commands_path is not None:
prerun_commands = open(prerun_commands_path).readlines()
for command in prerun_commands:
self.gdb.cmd(command)
def run(self, timeout=None):
for b in self._get_breakpoints():
self.gdb.set_break(b, ignore_error=True)
self._run_pre_commands()
result = self.gdb.run(self.args[1:])
# Collect gdbserver stdout and stderr file information for debugging
# based on its process ID and stream (stdout or stderr)
current_test = runtime.CURRENT_TEST
if current_test is not None:
stdout_name = 'gdbserver.%s.stdout' % self.gdb_server.process.pid
stdout_path = os.path.join(current_test.logdir, stdout_name)
stderr_name = 'gdbserver.%s.stderr' % self.gdb_server.process.pid
stderr_path = os.path.join(current_test.logdir, stderr_name)
while True:
r = self.wait_for_exit()
if r:
self.gdb.disconnect()
# Now collect the gdbserver stdout and stderr file themselves
# and populate the CommandResult stdout and stderr
if current_test is not None:
if os.path.exists(self.gdb_server.stdout_path):
shutil.copy(self.gdb_server.stdout_path, stdout_path)
self.result.stdout = open(stdout_path, 'r').read()
if os.path.exists(self.gdb_server.stderr_path):
shutil.copy(self.gdb_server.stderr_path, stderr_path)
self.result.stderr = open(stderr_path, 'r').read()
self.gdb_server.exit()
return self.result
def split_gdb_expr(expr):
"""
Splits a GDB expr into (binary_name, breakpoint_location)
Returns :attr:`avocado.gdb.GDB.DEFAULT_BREAK` as the default breakpoint
if one is not given.
:param expr: an expression of the form <binary_name>[:<breakpoint>]
:type expr: str
:returns: a (binary_name, breakpoint_location) tuple
:rtype: tuple
"""
expr_split = expr.split(':', 1)
if len(expr_split) == 2:
r = tuple(expr_split)
else:
r = (expr_split[0], gdb.GDB.DEFAULT_BREAK)
return r
def should_run_inside_gdb(cmd):
"""
Wether the given command should be run inside the GNU debugger
:param cmd: the command arguments, from where we extract the binary name
"""
if not gdb.GDB_RUN_BINARY_NAMES_EXPR:
return False
try:
args = shlex.split(cmd)
except ValueError:
log.warning("Unable to check whether command '%s' should run inside "
"GDB, fallback to simplified method...", cmd)
args = cmd.split()
cmd_binary_name = os.path.basename(args[0])
for expr in gdb.GDB_RUN_BINARY_NAMES_EXPR:
binary_name = os.path.basename(expr.split(':', 1)[0])
if cmd_binary_name == binary_name:
return True
return False
def should_run_inside_wrapper(cmd):
"""
Wether the given command should be run inside the wrapper utility.
:param cmd: the command arguments, from where we extract the binary name
"""
global CURRENT_WRAPPER
CURRENT_WRAPPER = None
args = shlex.split(cmd)
cmd_binary_name = args[0]
for script, cmd_expr in WRAP_PROCESS_NAMES_EXPR:
if fnmatch.fnmatch(cmd_binary_name, cmd_expr):
CURRENT_WRAPPER = script
if WRAP_PROCESS is not None and CURRENT_WRAPPER is None:
CURRENT_WRAPPER = WRAP_PROCESS
if CURRENT_WRAPPER is None:
return False
else:
return True
def get_sub_process_klass(cmd):
"""
Which sub process implementation should be used
Either the regular one, or the GNU Debugger version
:param cmd: the command arguments, from where we extract the binary name
"""
if should_run_inside_gdb(cmd):
return GDBSubProcess
elif should_run_inside_wrapper(cmd):
return WrapSubProcess
else:
return SubProcess
def run(cmd, timeout=None, verbose=True, ignore_status=False,
allow_output_check='all', shell=False, env=None, sudo=False):
"""
Run a subprocess, returning a CmdResult object.
:param cmd: Command line to run.
:type cmd: str
:param timeout: Time limit in seconds before attempting to kill the
running process. This function will take a few seconds
longer than 'timeout' to complete if it has to kill the
process.
:type timeout: float
:param verbose: Whether to log the command run and stdout/stderr.
:type verbose: bool
:param ignore_status: Whether to raise an exception when command returns
=! 0 (False), or not (True).
:type ignore_status: bool
:param allow_output_check: Whether to log the command stream outputs
(stdout and stderr) in the test stream
files. Valid values: 'stdout', for
allowing only standard output, 'stderr',
to allow only standard error, 'all',
to allow both standard output and error
(default), and 'none', to allow
none to be recorded.
:type allow_output_check: str
:param shell: Whether to run the command on a subshell
:type shell: bool
:param env: Use extra environment variables
:type env: dict
:param sudo: Whether the command requires admin privileges to run,
so that sudo will be prepended to the command.
The assumption here is that the user running the command
has a sudo configuration such that a password won't be
prompted. If that's not the case, the command will
straight out fail.
:return: An :class:`CmdResult` object.
:raise: :class:`CmdError`, if ``ignore_status=False``.
"""
klass = get_sub_process_klass(cmd)
sp = klass(cmd=cmd, verbose=verbose,
allow_output_check=allow_output_check, shell=shell, env=env,
sudo=sudo)
cmd_result = sp.run(timeout=timeout)
fail_condition = cmd_result.exit_status != 0 or cmd_result.interrupted
if fail_condition and not ignore_status:
raise CmdError(cmd, sp.result)
return cmd_result
def system(cmd, timeout=None, verbose=True, ignore_status=False,
allow_output_check='all', shell=False, env=None, sudo=False):
"""
Run a subprocess, returning its exit code.
:param cmd: Command line to run.
:type cmd: str
:param timeout: Time limit in seconds before attempting to kill the
running process. This function will take a few seconds
longer than 'timeout' to complete if it has to kill the
process.
:type timeout: float
:param verbose: Whether to log the command run and stdout/stderr.
:type verbose: bool
:param ignore_status: Whether to raise an exception when command returns
=! 0 (False), or not (True).
:type ignore_status: bool
:param allow_output_check: Whether to log the command stream outputs
(stdout and stderr) in the test stream
files. Valid values: 'stdout', for
allowing only standard output, 'stderr',
to allow only standard error, 'all',
to allow both standard output and error
(default), and 'none', to allow
none to be recorded.
:type allow_output_check: str
:param shell: Whether to run the command on a subshell
:type shell: bool
:param env: Use extra environment variables.
:type env: dict
:param sudo: Whether the command requires admin privileges to run,
so that sudo will be prepended to the command.
The assumption here is that the user running the command
has a sudo configuration such that a password won't be
prompted. If that's not the case, the command will
straight out fail.
:return: Exit code.
:rtype: int
:raise: :class:`CmdError`, if ``ignore_status=False``.
"""
cmd_result = run(cmd=cmd, timeout=timeout, verbose=verbose, ignore_status=ignore_status,
allow_output_check=allow_output_check, shell=shell, env=env,
sudo=sudo)
return cmd_result.exit_status
def system_output(cmd, timeout=None, verbose=True, ignore_status=False,
allow_output_check='all', shell=False, env=None, sudo=False):
"""
Run a subprocess, returning its output.
:param cmd: Command line to run.
:type cmd: str
:param timeout: Time limit in seconds before attempting to kill the
running process. This function will take a few seconds
longer than 'timeout' to complete if it has to kill the
process.
:type timeout: float
:param verbose: Whether to log the command run and stdout/stderr.
:type verbose: bool
:param ignore_status: Whether to raise an exception when command returns
=! 0 (False), or not (True).
:param allow_output_check: Whether to log the command stream outputs
(stdout and stderr) in the test stream
files. Valid values: 'stdout', for
allowing only standard output, 'stderr',
to allow only standard error, 'all',
to allow both standard output and error
(default), and 'none', to allow
none to be recorded.
:type allow_output_check: str
:param shell: Whether to run the command on a subshell
:type shell: bool
:param env: Use extra environment variables
:type env: dict
:param sudo: Whether the command requires admin privileges to run,
so that sudo will be prepended to the command.
The assumption here is that the user running the command
has a sudo configuration such that a password won't be
prompted. If that's not the case, the command will
straight out fail.
:return: Command output.
:rtype: str
:raise: :class:`CmdError`, if ``ignore_status=False``.
"""
cmd_result = run(cmd=cmd, timeout=timeout, verbose=verbose, ignore_status=ignore_status,
allow_output_check=allow_output_check, shell=shell, env=env,
sudo=sudo)
return cmd_result.stdout
| gpl-2.0 | -5,677,319,964,287,682,000 | 35.249784 | 92 | 0.559548 | false |
Mr-Linus/geekcloud | jumpserver/settings.py | 3 | 5025 | """
Django settings for jumpserver project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import ConfigParser
import getpass
config = ConfigParser.ConfigParser()
BASE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
config.read(os.path.join(BASE_DIR, 'jumpserver.conf'))
KEY_DIR = os.path.join(BASE_DIR, 'keys')
AUTH_USER_MODEL = 'juser.User'
# mail config
MAIL_ENABLE = config.get('mail', 'mail_enable')
EMAIL_HOST = config.get('mail', 'email_host')
EMAIL_PORT = config.get('mail', 'email_port')
EMAIL_HOST_USER = config.get('mail', 'email_host_user')
EMAIL_HOST_PASSWORD = config.get('mail', 'email_host_password')
EMAIL_USE_TLS = config.getboolean('mail', 'email_use_tls')
try:
EMAIL_USE_SSL = config.getboolean('mail', 'email_use_ssl')
except ConfigParser.NoOptionError:
EMAIL_USE_SSL = False
EMAIL_BACKEND = 'django_smtp_ssl.SSLEmailBackend' if EMAIL_USE_SSL else 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_TIMEOUT = 5
# ======== Log ==========
LOG_DIR = os.path.join(BASE_DIR, 'logs')
SSH_KEY_DIR = os.path.join(BASE_DIR, 'keys/role_keys')
KEY = config.get('base', 'key')
URL = config.get('base', 'url')
LOG_LEVEL = config.get('base', 'log')
IP = config.get('base', 'ip')
PORT = config.get('base', 'port')
# ======== Connect ==========
try:
NAV_SORT_BY = config.get('connect', 'nav_sort_by')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
NAV_SORT_BY = 'ip'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!%=t81uof5rhmtpi&(zr=q^fah#$enny-c@mswz49l42j0o49-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['0.0.0.0/8']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django_crontab',
'bootstrapform',
'jumpserver',
'juser',
'jasset',
'jperm',
'jlog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'jumpserver.urls'
WSGI_APPLICATION = 'jumpserver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {}
if config.get('db', 'engine') == 'mysql':
DB_HOST = config.get('db', 'host')
DB_PORT = config.getint('db', 'port')
DB_USER = config.get('db', 'user')
DB_PASSWORD = config.get('db', 'password')
DB_DATABASE = config.get('db', 'database')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': DB_DATABASE,
'USER': DB_USER,
'PASSWORD': DB_PASSWORD,
'HOST': DB_HOST,
'PORT': DB_PORT,
}
}
elif config.get('db', 'engine') == 'sqlite':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': config.get('db', 'database'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'jumpserver.context_processors.name_proc',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
BOOTSTRAP_COLUMN_COUNT = 10
CRONJOBS = [
('0 1 * * *', 'jasset.asset_api.asset_ansible_update_all'),
('*/10 * * * *', 'jlog.log_api.kill_invalid_connection'),
]
| gpl-2.0 | -8,093,519,214,317,810,000 | 27.551136 | 117 | 0.663682 | false |
RobinCPC/algorithm-practice | IntegerArray/searchRange.py | 1 | 1030 | """
#: 34
Title: Search Range
Description:
------
Given a sorted array of integers, find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return `[-1, -1]`.
For example,
Given `[5, 7, 7, 8, 8, 10]` and target value 8,
return `[3, 4]`.
------
Time: O(n)
Space: O(1)
Difficulty: Medium
"""
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
range = [-1,-1]
if target in nums:
start = nums.index(target)
end = nums[::-1].index(target)
end = len(nums)-end - 1
return [start, end]
return range
if __name__ == '__main__':
sol = Solution()
nums = [5, 7, 7, 8, 8, 9, 10]
target = 7
result = sol.searchRange(nums, target)
print 'Given\n', nums
print 'Target', target
print 'Return ', result
| mit | -3,248,157,489,963,977,700 | 21.888889 | 96 | 0.562136 | false |
erikr/django | tests/queries/models.py | 15 | 17755 | """
Various complex queries that have been problematic in the past.
"""
from __future__ import unicode_literals
import threading
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class DumbCategory(models.Model):
pass
class ProxyCategory(DumbCategory):
class Meta:
proxy = True
@python_2_unicode_compatible
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey(
'self',
models.SET_NULL,
blank=True, null=True,
related_name='children',
)
category = models.ForeignKey(NamedCategory, models.SET_NULL, null=True, default=None)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.SET_NULL, blank=True, null=True)
class Meta:
ordering = ['note']
def __str__(self):
return self.note
def __init__(self, *args, **kwargs):
super(Note, self).__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpicklable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
@python_2_unicode_compatible
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.CASCADE)
notes = models.ManyToManyField(Note)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note, models.CASCADE)
value = models.IntegerField(null=True)
class Meta:
ordering = ['info']
def __str__(self):
return self.info
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo, models.CASCADE)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True)
creator = models.ForeignKey(Author, models.CASCADE)
note = models.ForeignKey(Note, models.CASCADE)
class Meta:
ordering = ['-note', 'name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, models.SET_NULL, to_field='num', null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __str__(self):
return '%d: %s' % (self.rank, self.author.name)
@python_2_unicode_compatible
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item, models.CASCADE)
class Meta:
ordering = ['item']
def __str__(self):
return self.title
@python_2_unicode_compatible
class Number(models.Model):
num = models.IntegerField()
def __str__(self):
return six.text_type(self.num)
# Symmetrical m2m field with a normal field using the reverse accessor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y', models.CASCADE)
class Y(models.Model):
x1 = models.ForeignKey(X, models.CASCADE, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY', models.CASCADE)
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX, models.CASCADE)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self', models.CASCADE)
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_queryset(self):
qs = super(CustomManager, self).get_queryset()
return qs.filter(public=True, tag__name='t1')
@python_2_unicode_compatible
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.CASCADE)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __str__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_queryset(self):
return super(MemberManager, self).get_queryset().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, models.CASCADE, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, models.CASCADE, primary_key=True)
parent = models.ForeignKey(Member, models.CASCADE, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk, models.CASCADE, null=True)
class CustomPkTag(models.Model):
id = models.CharField(max_length=20, primary_key=True)
custom_pk = models.ManyToManyField(CustomPk)
tag = models.CharField(max_length=20)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
@python_2_unicode_compatible
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", models.SET_NULL, null=True, unique=True)
def __str__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity, models.CASCADE)
# Multiple foreign keys
@python_2_unicode_compatible
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA, models.CASCADE)
b = models.ForeignKey(LeafB, models.CASCADE)
@python_2_unicode_compatible
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __str__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
@python_2_unicode_compatible
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection, models.CASCADE)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection, models.CASCADE)
# Multi-layer ordering
@python_2_unicode_compatible
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject, models.SET_NULL, null=True)
f = models.IntegerField(null=True)
class Meta:
ordering = ['single']
@python_2_unicode_compatible
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, models.SET_NULL, null=True)
class Meta:
ordering = ['others']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Eaten(models.Model):
food = models.ForeignKey(Food, models.SET_NULL, to_field="name", null=True)
meal = models.CharField(max_length=20)
def __str__(self):
return "%s at %s" % (self.food, self.meal)
@python_2_unicode_compatible
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", models.SET_NULL, to_field="num", null=True)
def __str__(self):
return "%s" % self.num
# Bug #12252
@python_2_unicode_compatible
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def __iter__(self):
# Ticket #23721
assert False, 'type checking should happen without calling model __iter__'
class ProxyObjectA(ObjectA):
class Meta:
proxy = True
class ChildObjectA(ObjectA):
pass
@python_2_unicode_compatible
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, models.CASCADE)
num = models.PositiveSmallIntegerField()
def __str__(self):
return self.name
class ProxyObjectB(ObjectB):
class Meta:
proxy = True
@python_2_unicode_compatible
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, models.SET_NULL, null=True)
objectb = models.ForeignKey(ObjectB, models.SET_NULL, null=True)
childobjecta = models.ForeignKey(ChildObjectA, models.SET_NULL, null=True, related_name='ca_pk')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SimpleCategory(models.Model):
name = models.CharField(max_length=15)
def __str__(self):
return self.name
@python_2_unicode_compatible
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=15)
def __str__(self):
return self.name + " " + self.special_name
@python_2_unicode_compatible
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory, models.CASCADE)
def __str__(self):
return "category item: " + str(self.category)
@python_2_unicode_compatible
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory, models.CASCADE)
def __str__(self):
return "one2one " + self.new_name
class CategoryRelationship(models.Model):
first = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='first_rel')
second = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='second_rel')
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ['id']
class ModelD(models.Model):
name = models.TextField()
class ModelC(models.Model):
name = models.TextField()
class ModelB(models.Model):
name = models.TextField()
c = models.ForeignKey(ModelC, models.CASCADE)
class ModelA(models.Model):
name = models.TextField()
b = models.ForeignKey(ModelB, models.SET_NULL, null=True)
d = models.ForeignKey(ModelD, models.CASCADE)
@python_2_unicode_compatible
class Job(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class JobResponsibilities(models.Model):
job = models.ForeignKey(Job, models.CASCADE, to_field='name')
responsibility = models.ForeignKey('Responsibility', models.CASCADE, to_field='description')
@python_2_unicode_compatible
class Responsibility(models.Model):
description = models.CharField(max_length=20, unique=True)
jobs = models.ManyToManyField(Job, through=JobResponsibilities,
related_name='responsibilities')
def __str__(self):
return self.description
# Models for disjunction join promotion low level testing.
class FK1(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK2(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK3(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class BaseA(models.Model):
a = models.ForeignKey(FK1, models.SET_NULL, null=True)
b = models.ForeignKey(FK2, models.SET_NULL, null=True)
c = models.ForeignKey(FK3, models.SET_NULL, null=True)
@python_2_unicode_compatible
class Identifier(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Program(models.Model):
identifier = models.OneToOneField(Identifier, models.CASCADE)
class Channel(models.Model):
programs = models.ManyToManyField(Program)
identifier = models.OneToOneField(Identifier, models.CASCADE)
class Book(models.Model):
title = models.TextField()
chapter = models.ForeignKey('Chapter', models.CASCADE)
class Chapter(models.Model):
title = models.TextField()
paragraph = models.ForeignKey('Paragraph', models.CASCADE)
class Paragraph(models.Model):
text = models.TextField()
page = models.ManyToManyField('Page')
class Page(models.Model):
text = models.TextField()
class MyObject(models.Model):
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True, related_name='children')
data = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
# Models for #17600 regressions
@python_2_unicode_compatible
class Order(models.Model):
id = models.IntegerField(primary_key=True)
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
@python_2_unicode_compatible
class OrderItem(models.Model):
order = models.ForeignKey(Order, models.CASCADE, related_name='items')
status = models.IntegerField()
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
class BaseUser(models.Model):
pass
@python_2_unicode_compatible
class Task(models.Model):
title = models.CharField(max_length=10)
owner = models.ForeignKey(BaseUser, models.CASCADE, related_name='owner')
creator = models.ForeignKey(BaseUser, models.CASCADE, related_name='creator')
def __str__(self):
return self.title
@python_2_unicode_compatible
class Staff(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class StaffUser(BaseUser):
staff = models.OneToOneField(Staff, models.CASCADE, related_name='user')
def __str__(self):
return self.staff
class Ticket21203Parent(models.Model):
parentid = models.AutoField(primary_key=True)
parent_bool = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=True)
class Ticket21203Child(models.Model):
childid = models.AutoField(primary_key=True)
parent = models.ForeignKey(Ticket21203Parent, models.CASCADE)
class Person(models.Model):
name = models.CharField(max_length=128)
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=128)
employees = models.ManyToManyField(Person, related_name='employers', through='Employment')
def __str__(self):
return self.name
class Employment(models.Model):
employer = models.ForeignKey(Company, models.CASCADE)
employee = models.ForeignKey(Person, models.CASCADE)
title = models.CharField(max_length=128)
# Bug #22429
class School(models.Model):
pass
class Student(models.Model):
school = models.ForeignKey(School, models.CASCADE)
class Classroom(models.Model):
school = models.ForeignKey(School, models.CASCADE)
students = models.ManyToManyField(Student, related_name='classroom')
class Ticket23605AParent(models.Model):
pass
class Ticket23605A(Ticket23605AParent):
pass
class Ticket23605B(models.Model):
modela_fk = models.ForeignKey(Ticket23605A, models.CASCADE)
modelc_fk = models.ForeignKey("Ticket23605C", models.CASCADE)
field_b0 = models.IntegerField(null=True)
field_b1 = models.BooleanField(default=False)
class Ticket23605C(models.Model):
field_c0 = models.FloatField()
# db_table names have capital letters to ensure they are quoted in queries.
class Individual(models.Model):
alive = models.BooleanField()
class Meta:
db_table = 'Individual'
class RelatedIndividual(models.Model):
related = models.ForeignKey(Individual, models.CASCADE, related_name='related_individual')
class Meta:
db_table = 'RelatedIndividual'
| bsd-3-clause | -8,513,429,399,777,632,000 | 23.02571 | 103 | 0.689271 | false |
ravwojdyla/incubator-beam | sdks/python/apache_beam/examples/cookbook/bigshuffle.py | 10 | 3561 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A BigShuffle workflow."""
from __future__ import absolute_import
import argparse
import binascii
import logging
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.utils.pipeline_options import PipelineOptions
from apache_beam.utils.pipeline_options import SetupOptions
def crc32line(line):
return binascii.crc32(line) & 0xffffffff
def run(argv=None):
# pylint: disable=expression-not-assigned
parser = argparse.ArgumentParser()
parser.add_argument('--input',
required=True,
help='Input file pattern to process.')
parser.add_argument('--output',
required=True,
help='Output file pattern to write results to.')
parser.add_argument('--checksum_output',
help='Checksum output file pattern.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
p = beam.Pipeline(options=pipeline_options)
# Read the text file[pattern] into a PCollection.
lines = p | ReadFromText(known_args.input, coder=beam.coders.BytesCoder())
# Count the occurrences of each word.
output = (lines
| 'split' >> beam.Map(
lambda x: (x[:10], x[10:99]))
.with_output_types(beam.typehints.KV[str, str])
| 'group' >> beam.GroupByKey()
| 'format' >> beam.FlatMap(
lambda (key, vals): ['%s%s' % (key, val) for val in vals]))
# Write the output using a "Write" transform that has side effects.
output | WriteToText(known_args.output)
# Optionally write the input and output checksums.
if known_args.checksum_output:
input_csum = (lines
| 'input-csum' >> beam.Map(crc32line)
| 'combine-input-csum' >> beam.CombineGlobally(sum)
| 'hex-format' >> beam.Map(lambda x: '%x' % x))
input_csum | 'write-input-csum' >> WriteToText(
known_args.checksum_output + '-input')
output_csum = (output
| 'output-csum' >> beam.Map(crc32line)
| 'combine-output-csum' >> beam.CombineGlobally(sum)
| 'hex-format-output' >> beam.Map(lambda x: '%x' % x))
output_csum | 'write-output-csum' >> WriteToText(
known_args.checksum_output + '-output')
# Actually run the pipeline (all operations above are deferred).
return p.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| apache-2.0 | -2,634,683,038,395,595,000 | 36.882979 | 78 | 0.664701 | false |
Samnsparky/cdibase | prog_code/util/report_util_test.py | 1 | 9480 | """Tests for utility functions used in generating CSV downloads.
Copyright (C) 2014 A. Samuel Pottinger ("Sam Pottinger", gleap.org)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
import unittest.mock
from ..struct import models
from ..util import constants
import prog_code.util.db_util as db_util
import prog_code.util.report_util as report_util
TEST_SNAPSHOT_ID = 789
TEST_DB_ID = 123
TEST_STUDY_ID = 456
TEST_STUDY = 'test study'
TEST_BIRTHDAY = '2011/09/12'
TEST_ITEMS_EXCLUDED = 3
TEST_EXTRA_CATEGORIES = 4
TEST_NUM_LANGUAGES = 2
TEST_HARD_OF_HEARING = constants.EXPLICIT_FALSE
TEST_SNAPSHOT = models.SnapshotMetadata(
TEST_SNAPSHOT_ID,
TEST_DB_ID,
TEST_STUDY_ID,
TEST_STUDY,
constants.MALE,
24,
TEST_BIRTHDAY,
'2013/10/12',
20,
25,
100,
TEST_ITEMS_EXCLUDED,
50,
TEST_EXTRA_CATEGORIES,
0,
'english,spanish',
TEST_NUM_LANGUAGES,
'standard',
TEST_HARD_OF_HEARING,
False
)
class TestCDIFormat:
def __init__(self, details):
self.details = details
class ReportUtilTest(unittest.TestCase):
def test_sort_by_study_order(self):
test_rows = [[0]]* 20 + [['word1'], ['word3'], ['word2'], ['word4']]
test_format = TestCDIFormat(
{'categories': [
{'words': ['word1', 'word2']},
{'words': ['word3', 'word4']}
]}
)
sorted_rows = report_util.sort_by_study_order(test_rows, test_format)
self.assertEqual(sorted_rows[20][0], 'word1')
self.assertEqual(sorted_rows[21][0], 'word2')
self.assertEqual(sorted_rows[22][0], 'word3')
self.assertEqual(sorted_rows[23][0], 'word4')
def test_summarize_snapshots(self):
with unittest.mock.patch('prog_code.util.db_util.load_cdi_model') as mock_cdi:
with unittest.mock.patch('prog_code.util.db_util.load_snapshot_contents') as mock_snapshot:
test_snap_1 = TEST_SNAPSHOT.clone()
test_snap_1.cdi_type = 'cdi_type_1'
test_snap_1.session_date = '2015/01/01'
test_snap_2 = TEST_SNAPSHOT.clone()
test_snap_2.cdi_type = 'cdi_type_1'
test_snap_2.session_date = '2015/02/01'
test_snap_3 = TEST_SNAPSHOT.clone()
test_snap_3.cdi_type = 'cdi_type_2'
test_snap_3.session_date = '2015/03/01'
test_metadata = [test_snap_1, test_snap_2, test_snap_3]
test_contents_1 = [
models.SnapshotContent(0, 'word1', 1, 1),
models.SnapshotContent(0, 'word2', 0, 1),
models.SnapshotContent(0, 'word3', 0, 1)
]
test_contents_2 = [
models.SnapshotContent(0, 'word1', 1, 1),
models.SnapshotContent(0, 'word2', 2, 1),
models.SnapshotContent(0, 'word3', 0, 1)
]
test_contents_3 = [
models.SnapshotContent(0, 'word1', 1, 1),
models.SnapshotContent(0, 'word2', 1, 1),
models.SnapshotContent(0, 'word3', 1, 1),
models.SnapshotContent(0, 'word4', 2, 1)
]
mock_cdi.side_effect = [
models.CDIFormat('', '', '', {'count_as_spoken': [1, 2]}),
models.CDIFormat('', '', '', {'count_as_spoken': [1]})
]
mock_snapshot.side_effect = [
test_contents_1,
test_contents_2,
test_contents_3
]
serialization = report_util.summarize_snapshots(test_metadata)
self.assertEqual(serialization['word1'], '2015/01/01')
self.assertEqual(serialization['word2'], '2015/02/01')
self.assertEqual(serialization['word3'], '2015/03/01')
self.assertEqual(serialization['word4'], None)
self.assertEqual(len(mock_cdi.mock_calls), 2)
mock_cdi.assert_any_call('cdi_type_1')
mock_cdi.assert_any_call('cdi_type_2')
self.assertEqual(len(mock_snapshot.mock_calls), 3)
mock_snapshot.assert_any_call(test_metadata[0])
mock_snapshot.assert_any_call(test_metadata[1])
mock_snapshot.assert_any_call(test_metadata[2])
def test_generate_study_report_csv(self):
with unittest.mock.patch('prog_code.util.db_util.load_cdi_model') as mock_cdi:
with unittest.mock.patch('prog_code.util.db_util.load_snapshot_contents') as mock_snapshot:
test_snap_1 = TEST_SNAPSHOT.clone()
test_snap_1.cdi_type = 'cdi_type_1'
test_snap_1.session_date = '2015/01/01'
test_snap_2 = TEST_SNAPSHOT.clone()
test_snap_2.cdi_type = 'cdi_type_1'
test_snap_2.session_date = '2015/02/01'
test_snap_3 = TEST_SNAPSHOT.clone()
test_snap_3.cdi_type = 'cdi_type_1'
test_snap_3.session_date = '2015/03/01'
test_metadata = [test_snap_1, test_snap_2, test_snap_3]
test_contents_1 = [
models.SnapshotContent(0, 'word1', 1, 1),
models.SnapshotContent(0, 'word2', 0, 1),
models.SnapshotContent(0, 'word3', 0, 1)
]
test_contents_2 = [
models.SnapshotContent(0, 'word1', 1, 1),
models.SnapshotContent(0, 'word2', 2, 1),
models.SnapshotContent(0, 'word3', 0, 1)
]
test_contents_3 = [
models.SnapshotContent(0, 'word1', 1, 1),
models.SnapshotContent(0, 'word2', 1, 1),
models.SnapshotContent(0, 'word3', 1, 1)
]
categories = [{
'words': ['word1', 'word2', 'word3']
}]
mock_cdi.side_effect = [
models.CDIFormat('', '', '', {'count_as_spoken': [1, 2], 'categories': categories}),
]
mock_snapshot.side_effect = [
test_contents_1,
test_contents_2,
test_contents_3
]
results = report_util.generate_study_report_csv(
test_metadata,
models.CDIFormat('', '', '', {'count_as_spoken': [1, 2], 'categories': categories})
)
self.assertTrue(results != None)
def test_generate_study_report_zip(self):
with unittest.mock.patch('prog_code.util.db_util.load_cdi_model') as mock_cdi:
with unittest.mock.patch('prog_code.util.db_util.load_snapshot_contents') as mock_snapshot:
test_snap_1 = TEST_SNAPSHOT.clone()
test_snap_1.cdi_type = 'cdi_type_1'
test_snap_1.session_date = '2015/01/01'
test_snap_2 = TEST_SNAPSHOT.clone()
test_snap_2.cdi_type = 'cdi_type_1'
test_snap_2.session_date = '2015/02/01'
test_snap_3 = TEST_SNAPSHOT.clone()
test_snap_3.cdi_type = 'cdi_type_1'
test_snap_3.session_date = '2015/03/01'
test_metadata = [test_snap_1, test_snap_2, test_snap_3]
test_contents_1 = [
models.SnapshotContent(0, 'word1', 1, 1),
models.SnapshotContent(0, 'word2', 0, 1),
models.SnapshotContent(0, 'word3', 0, 1)
]
test_contents_2 = [
models.SnapshotContent(0, 'word1', 1, 1),
models.SnapshotContent(0, 'word2', 2, 1),
models.SnapshotContent(0, 'word3', 0, 1)
]
test_contents_3 = [
models.SnapshotContent(0, 'word1', 1, 1),
models.SnapshotContent(0, 'word2', 1, 1),
models.SnapshotContent(0, 'word3', 1, 1)
]
categories = [{
'words': ['word1', 'word2', 'word3']
}]
mock_cdi.side_effect = [
models.CDIFormat('', '', '', {'count_as_spoken': [1, 2], 'categories': categories}),
]
mock_snapshot.side_effect = [
test_contents_1,
test_contents_2,
test_contents_3
]
results = report_util.generate_study_report(
test_metadata,
models.CDIFormat('', '', '', {'count_as_spoken': [1, 2], 'categories': categories})
)
self.assertTrue(results != None)
| gpl-3.0 | 2,576,852,700,520,960,000 | 37.225806 | 104 | 0.51962 | false |
klahnakoski/JsonSchemaToMarkdown | vendor/jx_python/expression_compiler.py | 3 | 1448 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from pyLibrary import convert
from mo_logs import Log
from mo_dots import coalesce, Data, listwrap, wrap_leaves
from mo_times.dates import Date
true = True
false = False
null = None
EMPTY_DICT = {}
def compile_expression(source):
"""
THIS FUNCTION IS ON ITS OWN FOR MINIMAL GLOBAL NAMESPACE
:param source: PYTHON SOURCE CODE
:return: PYTHON FUNCTION
"""
# FORCE MODULES TO BE IN NAMESPACE
_ = coalesce
_ = listwrap
_ = Date
_ = convert
_ = Log
_ = Data
_ = EMPTY_DICT
_ = re
_ = wrap_leaves
fake_locals = {}
try:
exec(
"""
def output(row, rownum=None, rows=None):
_source = """ + convert.value2quote(source) + """
try:
return """ + source + """
except Exception as e:
Log.error("Problem with dynamic function {{func|quote}}", func=_source, cause=e)
""",
globals(),
fake_locals
)
except Exception as e:
Log.error("Bad source: {{source}}", source=source, cause=e)
return fake_locals['output']
| mpl-2.0 | 816,323,967,807,150,700 | 22.354839 | 89 | 0.627072 | false |
natanielruiz/android-yolo | jni-build/jni/include/tensorflow/contrib/learn/python/learn/tests/dataframe/reader_source_test.py | 19 | 3162 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learn.dataframe.transforms.reader_source."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.learn.python.learn.dataframe.transforms.reader_source as rs
class ReaderSourceTestCase(tf.test.TestCase):
"""Test class for ReaderSource."""
def setUp(self):
super(ReaderSourceTestCase, self).setUp()
self.work_units = [str(x) for x in range(1000)]
def testNoShuffle(self):
id_source = rs.ReaderSource(reader_cls=tf.IdentityReader,
work_units=self.work_units,
batch_size=1,
shuffle=False,
num_threads=1)
index_column, value_column = id_source()
index_tensor = index_column.build()
value_tensor = value_column.build()
self.assertEqual([1], index_tensor.get_shape().as_list())
self.assertEqual([1], value_tensor.get_shape().as_list())
with self.test_session() as sess:
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(50):
index, value = sess.run([index_tensor, value_tensor])
self.assertEqual(i, int(index[0]))
self.assertEqual(i, int(value[0]))
coord.request_stop()
coord.join(threads)
def testYesShuffle(self):
id_source = rs.ReaderSource(reader_cls=tf.IdentityReader,
work_units=self.work_units,
batch_size=1,
shuffle=True,
num_threads=10,
seed=1234)
index_column, value_column = id_source()
cache = {}
index_tensor = index_column.build(cache)
value_tensor = value_column.build(cache)
self.assertEqual([1], index_tensor.get_shape().as_list())
self.assertEqual([1], value_tensor.get_shape().as_list())
seen = set([])
with self.test_session() as sess:
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(500):
index, value = sess.run([index_tensor, value_tensor])
self.assertEqual(index, value)
self.assertNotIn(int(value[0]), seen)
seen.add(int(value[0]))
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 821,125,180,497,613,000 | 37.096386 | 85 | 0.629349 | false |
nozuono/calibre-webserver | src/calibre/gui2/tweak_book/spell.py | 2 | 53002 | #!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import cPickle, os, sys
from collections import defaultdict, OrderedDict
from threading import Thread
from functools import partial
from PyQt4.Qt import (
QGridLayout, QApplication, QTreeWidget, QTreeWidgetItem, Qt, QFont, QSize,
QStackedLayout, QLabel, QVBoxLayout, QWidget, QPushButton, QIcon, QMenu,
QDialogButtonBox, QLineEdit, QDialog, QToolButton, QFormLayout, QHBoxLayout,
pyqtSignal, QAbstractTableModel, QModelIndex, QTimer, QTableView, QCheckBox,
QComboBox, QListWidget, QListWidgetItem, QInputDialog, QPlainTextEdit, QKeySequence)
from calibre.constants import __appname__, plugins
from calibre.ebooks.oeb.polish.spell import replace_word, get_all_words, merge_locations, get_checkable_file_names
from calibre.gui2 import choose_files, error_dialog
from calibre.gui2.complete2 import LineEdit
from calibre.gui2.languages import LanguagesEdit
from calibre.gui2.progress_indicator import ProgressIndicator
from calibre.gui2.tweak_book import dictionaries, current_container, set_book_locale, tprefs, editors
from calibre.gui2.tweak_book.widgets import Dialog
from calibre.spell.dictionary import (
builtin_dictionaries, custom_dictionaries, best_locale_for_language,
get_dictionary, DictionaryLocale, dprefs, remove_dictionary, rename_dictionary)
from calibre.spell.import_from import import_from_oxt
from calibre.utils.localization import calibre_langcode_to_name, get_language, get_lang, canonicalize_lang
from calibre.utils.icu import sort_key, primary_sort_key, primary_contains, contains
LANG = 0
COUNTRY = 1
DICTIONARY = 2
_country_map = None
def country_map():
global _country_map
if _country_map is None:
_country_map = cPickle.loads(P('localization/iso3166.pickle', data=True, allow_user_override=False))
return _country_map
class AddDictionary(QDialog): # {{{
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.setWindowTitle(_('Add a dictionary'))
self.l = l = QFormLayout(self)
self.setLayout(l)
self.la = la = QLabel('<p>' + _(
'''{0} supports the use of OpenOffice dictionaries for spell checking. You can
download more dictionaries from <a href="{1}">the OpenOffice extensions repository</a>.
The dictionary will download as an .oxt file. Simply specify the path to the
downloaded .oxt file here to add the dictionary to {0}.'''.format(
__appname__, 'http://extensions.openoffice.org'))+'<p>')
la.setWordWrap(True)
la.setOpenExternalLinks(True)
la.setMinimumWidth(450)
l.addRow(la)
self.h = h = QHBoxLayout()
self.path = p = QLineEdit(self)
p.setPlaceholderText(_('Path to OXT file'))
h.addWidget(p)
self.b = b = QToolButton(self)
b.setIcon(QIcon(I('document_open.png')))
b.setToolTip(_('Browse for an OXT file'))
b.clicked.connect(self.choose_file)
h.addWidget(b)
l.addRow(_('&Path to OXT file:'), h)
l.labelForField(h).setBuddy(p)
self.nick = n = QLineEdit(self)
n.setPlaceholderText(_('Choose a nickname for this dictionary'))
l.addRow(_('&Nickname:'), n)
self.bb = bb = QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
bb.accepted.connect(self.accept)
bb.rejected.connect(self.reject)
l.addRow(bb)
b.setFocus(Qt.OtherFocusReason)
def choose_file(self):
path = choose_files(self, 'choose-dict-for-import', _('Choose OXT Dictionary'), filters=[
(_('Dictionaries'), ['oxt'])], all_files=False, select_only_single_file=True)
if path is not None:
self.path.setText(path[0])
if not self.nickname:
n = os.path.basename(path[0])
self.nick.setText(n.rpartition('.')[0])
@property
def nickname(self):
return unicode(self.nick.text()).strip()
def accept(self):
nick = self.nickname
if not nick:
return error_dialog(self, _('Must specify nickname'), _(
'You must specify a nickname for this dictionary'), show=True)
if nick in {d.name for d in custom_dictionaries()}:
return error_dialog(self, _('Nickname already used'), _(
'A dictionary with the nick name "%s" already exists.') % nick, show=True)
oxt = unicode(self.path.text())
try:
num = import_from_oxt(oxt, nick)
except:
import traceback
return error_dialog(self, _('Failed to import dictionaries'), _(
'Failed to import dictionaries from %s. Click "Show Details" for more information') % oxt,
det_msg=traceback.format_exc(), show=True)
if num == 0:
return error_dialog(self, _('No dictionaries'), _(
'No dictionaries were found in %s') % oxt, show=True)
QDialog.accept(self)
# }}}
# User Dictionaries {{{
class UserWordList(QListWidget):
def __init__(self, parent=None):
QListWidget.__init__(self, parent)
def contextMenuEvent(self, ev):
m = QMenu(self)
m.addAction(_('Copy selected words to clipboard'), self.copy_to_clipboard)
m.addAction(_('Select all words'), self.select_all)
m.exec_(ev.globalPos())
def select_all(self):
for item in (self.item(i) for i in xrange(self.count())):
item.setSelected(True)
def copy_to_clipboard(self):
words = []
for item in (self.item(i) for i in xrange(self.count())):
if item.isSelected():
words.append(item.data(Qt.UserRole).toPyObject()[0])
if words:
QApplication.clipboard().setText('\n'.join(words))
def keyPressEvent(self, ev):
if ev == QKeySequence.Copy:
self.copy_to_clipboard()
ev.accept()
return
return QListWidget.keyPressEvent(self, ev)
class ManageUserDictionaries(Dialog):
def __init__(self, parent=None):
self.dictionaries_changed = False
Dialog.__init__(self, _('Manage user dictionaries'), 'manage-user-dictionaries', parent=parent)
def setup_ui(self):
self.l = l = QVBoxLayout(self)
self.h = h = QHBoxLayout()
l.addLayout(h)
l.addWidget(self.bb)
self.bb.clear(), self.bb.addButton(self.bb.Close)
b = self.bb.addButton(_('&New dictionary'), self.bb.ActionRole)
b.setIcon(QIcon(I('spell-check.png')))
b.clicked.connect(self.new_dictionary)
self.dictionaries = d = QListWidget(self)
self.emph_font = f = QFont(self.font())
f.setBold(True)
self.build_dictionaries()
d.currentItemChanged.connect(self.show_current_dictionary)
h.addWidget(d)
l = QVBoxLayout()
h.addLayout(l)
h = QHBoxLayout()
self.remove_button = b = QPushButton(QIcon(I('trash.png')), _('&Remove dictionary'), self)
b.clicked.connect(self.remove_dictionary)
h.addWidget(b)
self.rename_button = b = QPushButton(QIcon(I('modified.png')), _('Re&name dictionary'), self)
b.clicked.connect(self.rename_dictionary)
h.addWidget(b)
self.dlabel = la = QLabel('')
l.addWidget(la)
l.addLayout(h)
self.is_active = a = QCheckBox(_('Mark this dictionary as active'))
self.is_active.stateChanged.connect(self.active_toggled)
l.addWidget(a)
self.la = la = QLabel(_('Words in this dictionary:'))
l.addWidget(la)
self.words = w = UserWordList(self)
w.setSelectionMode(w.ExtendedSelection)
l.addWidget(w)
self.add_word_button = b = QPushButton(_('&Add word'), self)
b.clicked.connect(self.add_word)
b.setIcon(QIcon(I('plus.png')))
l.h = h = QHBoxLayout()
l.addLayout(h)
h.addWidget(b)
self.remove_word_button = b = QPushButton(_('&Remove selected words'), self)
b.clicked.connect(self.remove_word)
b.setIcon(QIcon(I('minus.png')))
h.addWidget(b)
self.import_words_button = b = QPushButton(_('&Import list of words'), self)
b.clicked.connect(self.import_words)
l.addWidget(b)
self.show_current_dictionary()
def sizeHint(self):
return Dialog.sizeHint(self) + QSize(30, 100)
def build_dictionaries(self, current=None):
self.dictionaries.clear()
for dic in sorted(dictionaries.all_user_dictionaries, key=lambda d:sort_key(d.name)):
i = QListWidgetItem(dic.name, self.dictionaries)
i.setData(Qt.UserRole, dic)
if dic.is_active:
i.setData(Qt.FontRole, self.emph_font)
if current == dic.name:
self.dictionaries.setCurrentItem(i)
if current is None and self.dictionaries.count() > 0:
self.dictionaries.setCurrentRow(0)
def new_dictionary(self):
name, ok = QInputDialog.getText(self, _('New dictionary'), _(
'Name of the new dictionary'))
if ok:
name = unicode(name)
if name in {d.name for d in dictionaries.all_user_dictionaries}:
return error_dialog(self, _('Already used'), _(
'A dictionary with the name %s already exists') % name, show=True)
dictionaries.create_user_dictionary(name)
self.dictionaries_changed = True
self.build_dictionaries(name)
self.show_current_dictionary()
def remove_dictionary(self):
d = self.current_dictionary
if d is None:
return
if dictionaries.remove_user_dictionary(d.name):
self.build_dictionaries()
self.dictionaries_changed = True
self.show_current_dictionary()
def rename_dictionary(self):
d = self.current_dictionary
if d is None:
return
name, ok = QInputDialog.getText(self, _('New name'), _(
'New name for the dictionary'))
if ok:
name = unicode(name)
if name == d.name:
return
if name in {d.name for d in dictionaries.all_user_dictionaries}:
return error_dialog(self, _('Already used'), _(
'A dictionary with the name %s already exists') % name, show=True)
if dictionaries.rename_user_dictionary(d.name, name):
self.build_dictionaries(name)
self.dictionaries_changed = True
self.show_current_dictionary()
@property
def current_dictionary(self):
d = self.dictionaries.currentItem()
if d is None:
return
return d.data(Qt.UserRole).toPyObject()
def active_toggled(self):
d = self.current_dictionary
if d is not None:
dictionaries.mark_user_dictionary_as_active(d.name, self.is_active.isChecked())
self.dictionaries_changed = True
for item in (self.dictionaries.item(i) for i in xrange(self.dictionaries.count())):
d = item.data(Qt.UserRole).toPyObject()
item.setData(Qt.FontRole, self.emph_font if d.is_active else None)
def show_current_dictionary(self, *args):
d = self.current_dictionary
if d is None:
return
self.dlabel.setText(_('Configure the dictionary: <b>%s') % d.name)
self.is_active.blockSignals(True)
self.is_active.setChecked(d.is_active)
self.is_active.blockSignals(False)
self.words.clear()
for word, lang in sorted(d.words, key=lambda x:sort_key(x[0])):
i = QListWidgetItem('%s [%s]' % (word, get_language(lang)), self.words)
i.setData(Qt.UserRole, (word, lang))
def add_word(self):
d = QDialog(self)
d.l = l = QFormLayout(d)
d.setWindowTitle(_('Add a word'))
d.w = w = QLineEdit(d)
w.setPlaceholderText(_('Word to add'))
l.addRow(_('&Word:'), w)
d.loc = loc = LanguagesEdit(parent=d)
l.addRow(_('&Language:'), d.loc)
loc.lang_codes = [canonicalize_lang(get_lang())]
d.bb = bb = QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
bb.accepted.connect(d.accept), bb.rejected.connect(d.reject)
l.addRow(bb)
if d.exec_() != d.Accepted:
return
word = unicode(w.text())
lang = (loc.lang_codes or [canonicalize_lang(get_lang())])[0]
if not word:
return
if (word, lang) not in self.current_dictionary.words:
dictionaries.add_to_user_dictionary(self.current_dictionary.name, word, DictionaryLocale(lang, None))
dictionaries.clear_caches()
self.show_current_dictionary()
self.dictionaries_changed = True
idx = self.find_word(word, lang)
if idx > -1:
self.words.scrollToItem(self.words.item(idx))
def import_words(self):
d = QDialog(self)
d.l = l = QFormLayout(d)
d.setWindowTitle(_('Import list of words'))
d.w = w = QPlainTextEdit(d)
l.addRow(QLabel(_('Enter a list of words, one per line')))
l.addRow(w)
d.b = b = QPushButton(_('Paste from clipboard'))
l.addRow(b)
b.clicked.connect(w.paste)
d.la = la = QLabel(_('Words in the user dictionary must have an associated language. Choose the language below:'))
la.setWordWrap(True)
l.addRow(la)
d.le = le = LanguagesEdit(d)
lc = canonicalize_lang(get_lang())
if lc:
le.lang_codes = [lc]
l.addRow(_('&Language:'), le)
d.bb = bb = QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
l.addRow(bb)
bb.accepted.connect(d.accept), bb.rejected.connect(d.reject)
if d.exec_() != d.Accepted:
return
lc = le.lang_codes
if not lc:
return error_dialog(self, _('Must specify language'), _(
'You must specify a language to import words'), show=True)
words = set(filter(None, [x.strip() for x in unicode(w.toPlainText()).splitlines()]))
lang = lc[0]
words = {(w, lang) for w in words} - self.current_dictionary.words
if dictionaries.add_to_user_dictionary(self.current_dictionary.name, words, None):
dictionaries.clear_caches()
self.show_current_dictionary()
self.dictionaries_changed = True
def remove_word(self):
words = {i.data(Qt.UserRole).toPyObject() for i in self.words.selectedItems()}
if words:
kwords = [(w, DictionaryLocale(l, None)) for w, l in words]
d = self.current_dictionary
if dictionaries.remove_from_user_dictionary(d.name, kwords):
dictionaries.clear_caches()
self.show_current_dictionary()
self.dictionaries_changed = True
def find_word(self, word, lang):
key = (word, lang)
for i in xrange(self.words.count()):
if self.words.item(i).data(Qt.UserRole).toPyObject() == key:
return i
return -1
@classmethod
def test(cls):
d = cls()
d.exec_()
# }}}
class ManageDictionaries(Dialog): # {{{
def __init__(self, parent=None):
Dialog.__init__(self, _('Manage dictionaries'), 'manage-dictionaries', parent=parent)
def sizeHint(self):
ans = Dialog.sizeHint(self)
ans.setWidth(ans.width() + 250)
ans.setHeight(ans.height() + 200)
return ans
def setup_ui(self):
self.l = l = QGridLayout(self)
self.setLayout(l)
self.stack = s = QStackedLayout()
self.helpl = la = QLabel('<p>')
la.setWordWrap(True)
self.pcb = pc = QPushButton(self)
pc.clicked.connect(self.set_preferred_country)
self.lw = w = QWidget(self)
self.ll = ll = QVBoxLayout(w)
ll.addWidget(pc)
self.dw = w = QWidget(self)
self.dl = dl = QVBoxLayout(w)
self.fb = b = QPushButton(self)
b.clicked.connect(self.set_favorite)
self.remove_dictionary_button = rd = QPushButton(_('&Remove this dictionary'), w)
rd.clicked.connect(self.remove_dictionary)
dl.addWidget(b), dl.addWidget(rd)
w.setLayout(dl)
s.addWidget(la)
s.addWidget(self.lw)
s.addWidget(w)
self.dictionaries = d = QTreeWidget(self)
d.itemChanged.connect(self.data_changed, type=Qt.QueuedConnection)
self.build_dictionaries()
d.setCurrentIndex(d.model().index(0, 0))
d.header().close()
d.currentItemChanged.connect(self.current_item_changed)
self.current_item_changed()
l.addWidget(d)
l.addLayout(s, 0, 1)
self.bb.clear()
self.bb.addButton(self.bb.Close)
b = self.bb.addButton(_('Manage &user dictionaries'), self.bb.ActionRole)
b.setIcon(QIcon(I('user_profile.png')))
b.setToolTip(_(
'Mange the list of user dictionaries (dictionaries to which you can add words)'))
b.clicked.connect(self.manage_user_dictionaries)
b = self.bb.addButton(_('&Add dictionary'), self.bb.ActionRole)
b.setToolTip(_(
'Add a new dictionary that you downloaded from the internet'))
b.setIcon(QIcon(I('plus.png')))
b.clicked.connect(self.add_dictionary)
l.addWidget(self.bb, l.rowCount(), 0, 1, l.columnCount())
def manage_user_dictionaries(self):
d = ManageUserDictionaries(self)
d.exec_()
if d.dictionaries_changed:
self.dictionaries_changed = True
def data_changed(self, item, column):
if column == 0 and item.type() == DICTIONARY:
d = item.data(0, Qt.UserRole).toPyObject()
if not d.builtin and unicode(item.text(0)) != d.name:
rename_dictionary(d, unicode(item.text(0)))
def build_dictionaries(self, reread=False):
all_dictionaries = builtin_dictionaries() | custom_dictionaries(reread=reread)
languages = defaultdict(lambda : defaultdict(set))
for d in all_dictionaries:
for locale in d.locales | {d.primary_locale}:
languages[locale.langcode][locale.countrycode].add(d)
bf = QFont(self.dictionaries.font())
bf.setBold(True)
itf = QFont(self.dictionaries.font())
itf.setItalic(True)
self.dictionaries.clear()
for lc in sorted(languages, key=lambda x:sort_key(calibre_langcode_to_name(x))):
i = QTreeWidgetItem(self.dictionaries, LANG)
i.setText(0, calibre_langcode_to_name(lc))
i.setData(0, Qt.UserRole, lc)
best_country = getattr(best_locale_for_language(lc), 'countrycode', None)
for countrycode in sorted(languages[lc], key=lambda x: country_map()['names'].get(x, x)):
j = QTreeWidgetItem(i, COUNTRY)
j.setText(0, country_map()['names'].get(countrycode, countrycode))
j.setData(0, Qt.UserRole, countrycode)
if countrycode == best_country:
j.setData(0, Qt.FontRole, bf)
pd = get_dictionary(DictionaryLocale(lc, countrycode))
for dictionary in sorted(languages[lc][countrycode], key=lambda d:d.name):
k = QTreeWidgetItem(j, DICTIONARY)
pl = calibre_langcode_to_name(dictionary.primary_locale.langcode)
if dictionary.primary_locale.countrycode:
pl += '-' + dictionary.primary_locale.countrycode.upper()
k.setText(0, dictionary.name or (_('<Builtin dictionary for {0}>').format(pl)))
k.setData(0, Qt.UserRole, dictionary)
if dictionary.name:
k.setFlags(k.flags() | Qt.ItemIsEditable)
if pd == dictionary:
k.setData(0, Qt.FontRole, itf)
self.dictionaries.expandAll()
def add_dictionary(self):
d = AddDictionary(self)
if d.exec_() == d.Accepted:
self.build_dictionaries(reread=True)
def remove_dictionary(self):
item = self.dictionaries.currentItem()
if item is not None and item.type() == DICTIONARY:
dic = item.data(0, Qt.UserRole).toPyObject()
if not dic.builtin:
remove_dictionary(dic)
self.build_dictionaries(reread=True)
def current_item_changed(self):
item = self.dictionaries.currentItem()
if item is not None:
self.stack.setCurrentIndex(item.type())
if item.type() == LANG:
self.init_language(item)
elif item.type() == COUNTRY:
self.init_country(item)
elif item.type() == DICTIONARY:
self.init_dictionary(item)
def init_language(self, item):
self.helpl.setText(_(
'''<p>You can change the dictionaries used for any specified language.</p>
<p>A language can have many country specific variants. Each of these variants
can have one or more dictionaries assigned to it. The default variant for each language
is shown in bold to the left.</p>
<p>You can change the default country variant as well as changing the dictionaries used for
every variant.</p>
<p>When a book specifies its language as a plain language, without any country variant,
the default variant you choose here will be used.</p>
'''))
def init_country(self, item):
pc = self.pcb
font = item.data(0, Qt.FontRole).toPyObject()
preferred = bool(font and font.bold())
pc.setText((_(
'This is already the preferred variant for the {1} language') if preferred else _(
'Use this as the preferred variant for the {1} language')).format(
unicode(item.text(0)), unicode(item.parent().text(0))))
pc.setEnabled(not preferred)
def set_preferred_country(self):
item = self.dictionaries.currentItem()
bf = QFont(self.dictionaries.font())
bf.setBold(True)
for x in (item.parent().child(i) for i in xrange(item.parent().childCount())):
x.setData(0, Qt.FontRole, bf if x is item else None)
lc = unicode(item.parent().data(0, Qt.UserRole).toPyObject())
pl = dprefs['preferred_locales']
pl[lc] = '%s-%s' % (lc, unicode(item.data(0, Qt.UserRole).toPyObject()))
dprefs['preferred_locales'] = pl
def init_dictionary(self, item):
saf = self.fb
font = item.data(0, Qt.FontRole).toPyObject()
preferred = bool(font and font.italic())
saf.setText((_(
'This is already the preferred dictionary') if preferred else
_('Use this as the preferred dictionary')))
saf.setEnabled(not preferred)
self.remove_dictionary_button.setEnabled(not item.data(0, Qt.UserRole).toPyObject().builtin)
def set_favorite(self):
item = self.dictionaries.currentItem()
bf = QFont(self.dictionaries.font())
bf.setItalic(True)
for x in (item.parent().child(i) for i in xrange(item.parent().childCount())):
x.setData(0, Qt.FontRole, bf if x is item else None)
cc = unicode(item.parent().data(0, Qt.UserRole).toPyObject())
lc = unicode(item.parent().parent().data(0, Qt.UserRole).toPyObject())
d = item.data(0, Qt.UserRole).toPyObject()
locale = '%s-%s' % (lc, cc)
pl = dprefs['preferred_dictionaries']
pl[locale] = d.id
dprefs['preferred_dictionaries'] = pl
@classmethod
def test(cls):
d = cls()
d.exec_()
# }}}
# Spell Check Dialog {{{
class WordsModel(QAbstractTableModel):
word_ignored = pyqtSignal(object, object)
def __init__(self, parent=None):
QAbstractTableModel.__init__(self, parent)
self.counts = (0, 0)
self.words = {} # Map of (word, locale) to location data for the word
self.spell_map = {} # Map of (word, locale) to dictionaries.recognized(word, locale)
self.sort_on = (0, False)
self.items = [] # The currently displayed items
self.filter_expression = None
self.show_only_misspelt = True
self.headers = (_('Word'), _('Count'), _('Language'), _('Misspelled?'))
def rowCount(self, parent=QModelIndex()):
return len(self.items)
def columnCount(self, parent=QModelIndex()):
return len(self.headers)
def clear(self):
self.beginResetModel()
self.words = {}
self.spell_map = {}
self.items =[]
self.endResetModel()
def headerData(self, section, orientation, role=Qt.DisplayRole):
if orientation == Qt.Horizontal:
if role == Qt.DisplayRole:
try:
return self.headers[section]
except IndexError:
pass
elif role == Qt.InitialSortOrderRole:
return Qt.DescendingOrder if section == 1 else Qt.AscendingOrder
def misspelled_text(self, w):
if self.spell_map[w]:
return _('Ignored') if dictionaries.is_word_ignored(*w) else ''
return '✓'
def data(self, index, role=Qt.DisplayRole):
try:
word, locale = self.items[index.row()]
except IndexError:
return
if role == Qt.DisplayRole:
col = index.column()
if col == 0:
return word
if col == 1:
return '%d' % len(self.words[(word, locale)])
if col == 2:
pl = calibre_langcode_to_name(locale.langcode)
countrycode = locale.countrycode
if countrycode:
pl = '%s (%s)' % (pl, countrycode)
return pl
if col == 3:
return self.misspelled_text((word, locale))
if role == Qt.TextAlignmentRole:
return Qt.AlignVCenter | (Qt.AlignLeft if index.column() == 0 else Qt.AlignHCenter)
def sort(self, column, order=Qt.AscendingOrder):
reverse = order != Qt.AscendingOrder
self.sort_on = (column, reverse)
self.beginResetModel()
self.do_sort()
self.endResetModel()
def filter(self, filter_text):
self.filter_expression = filter_text or None
self.beginResetModel()
self.do_filter()
self.do_sort()
self.endResetModel()
def sort_key(self, col):
if col == 0:
f = (lambda x: x) if tprefs['spell_check_case_sensitive_sort'] else primary_sort_key
def key(w):
return f(w[0])
elif col == 1:
def key(w):
return len(self.words[w])
elif col == 2:
def key(w):
locale = w[1]
return (calibre_langcode_to_name(locale.langcode), locale.countrycode)
else:
key = self.misspelled_text
return key
def do_sort(self):
col, reverse = self.sort_on
self.items.sort(key=self.sort_key(col), reverse=reverse)
def set_data(self, words, spell_map):
self.words, self.spell_map = words, spell_map
self.beginResetModel()
self.do_filter()
self.do_sort()
self.counts = (len([None for w, recognized in spell_map.iteritems() if not recognized]), len(self.words))
self.endResetModel()
def filter_item(self, x):
if self.show_only_misspelt and self.spell_map[x]:
return False
func = contains if tprefs['spell_check_case_sensitive_search'] else primary_contains
if self.filter_expression is not None and not func(self.filter_expression, x[0]):
return False
return True
def do_filter(self):
self.items = filter(self.filter_item, self.words)
def toggle_ignored(self, row):
w = self.word_for_row(row)
if w is not None:
ignored = dictionaries.is_word_ignored(*w)
(dictionaries.unignore_word if ignored else dictionaries.ignore_word)(*w)
self.spell_map[w] = dictionaries.recognized(*w)
self.update_word(w)
self.word_ignored.emit(*w)
def ignore_words(self, rows):
words = {self.word_for_row(r) for r in rows}
words.discard(None)
for w in words:
ignored = dictionaries.is_word_ignored(*w)
(dictionaries.unignore_word if ignored else dictionaries.ignore_word)(*w)
self.spell_map[w] = dictionaries.recognized(*w)
self.update_word(w)
self.word_ignored.emit(*w)
def add_word(self, row, udname):
w = self.word_for_row(row)
if w is not None:
if dictionaries.add_to_user_dictionary(udname, *w):
self.spell_map[w] = dictionaries.recognized(*w)
self.update_word(w)
self.word_ignored.emit(*w)
def add_words(self, dicname, rows):
words = {self.word_for_row(r) for r in rows}
words.discard(None)
for w in words:
if not dictionaries.add_to_user_dictionary(dicname, *w):
dictionaries.remove_from_user_dictionary(dicname, [w])
self.spell_map[w] = dictionaries.recognized(*w)
self.update_word(w)
self.word_ignored.emit(*w)
def remove_word(self, row):
w = self.word_for_row(row)
if w is not None:
if dictionaries.remove_from_user_dictionaries(*w):
self.spell_map[w] = dictionaries.recognized(*w)
self.update_word(w)
def replace_word(self, w, new_word):
for location in self.words[w]:
location.replace(new_word)
if w[0] == new_word:
return w
new_key = (new_word, w[1])
if new_key in self.words:
self.words[new_key] = merge_locations(self.words[new_key], self.words[w])
row = self.row_for_word(w)
self.dataChanged.emit(self.index(row, 1), self.index(row, 1))
else:
self.words[new_key] = self.words[w]
self.spell_map[new_key] = dictionaries.recognized(*new_key)
self.update_word(new_key)
row = self.row_for_word(w)
if row > -1:
self.beginRemoveRows(QModelIndex(), row, row)
del self.items[row]
self.endRemoveRows()
return new_key
def update_word(self, w):
should_be_filtered = not self.filter_item(w)
row = self.row_for_word(w)
if should_be_filtered and row != -1:
self.beginRemoveRows(QModelIndex(), row, row)
del self.items[row]
self.endRemoveRows()
elif not should_be_filtered and row == -1:
self.items.append(w)
self.do_sort()
row = self.row_for_word(w)
self.beginInsertRows(QModelIndex(), row, row)
self.endInsertRows()
self.dataChanged.emit(self.index(row, 3), self.index(row, 3))
def word_for_row(self, row):
try:
return self.items[row]
except IndexError:
pass
def row_for_word(self, word):
try:
return self.items.index(word)
except ValueError:
return -1
class WordsView(QTableView):
ignore_all = pyqtSignal()
add_all = pyqtSignal(object)
change_to = pyqtSignal(object, object)
def __init__(self, parent=None):
QTableView.__init__(self, parent)
self.setSortingEnabled(True), self.setShowGrid(False), self.setAlternatingRowColors(True)
self.setSelectionBehavior(self.SelectRows)
self.setTabKeyNavigation(False)
self.verticalHeader().close()
def keyPressEvent(self, ev):
if ev == QKeySequence.Copy:
self.copy_to_clipboard()
ev.accept()
return
ret = QTableView.keyPressEvent(self, ev)
if ev.key() in (Qt.Key_PageUp, Qt.Key_PageDown, Qt.Key_Up, Qt.Key_Down):
idx = self.currentIndex()
if idx.isValid():
self.scrollTo(idx)
return ret
def highlight_row(self, row):
idx = self.model().index(row, 0)
if idx.isValid():
self.selectRow(row)
self.setCurrentIndex(idx)
self.scrollTo(idx)
def contextMenuEvent(self, ev):
m = QMenu(self)
w = self.model().word_for_row(self.currentIndex().row())
if w is not None:
a = m.addAction(_('Change %s to') % w[0])
cm = QMenu()
a.setMenu(cm)
cm.addAction(_('Specify replacement manually'), partial(self.change_to.emit, w, None))
cm.addSeparator()
for s in dictionaries.suggestions(*w):
cm.addAction(s, partial(self.change_to.emit, w, s))
m.addAction(_('Ignore/Unignore all selected words'), self.ignore_all)
a = m.addAction(_('Add/Remove all selected words'))
am = QMenu()
a.setMenu(am)
for dic in sorted(dictionaries.active_user_dictionaries, key=lambda x:sort_key(x.name)):
am.addAction(dic.name, partial(self.add_all.emit, dic.name))
m.addSeparator()
m.addAction(_('Copy selected words to clipboard'), self.copy_to_clipboard)
m.exec_(ev.globalPos())
def copy_to_clipboard(self):
rows = {i.row() for i in self.selectedIndexes()}
words = {self.model().word_for_row(r) for r in rows}
words.discard(None)
words = sorted({w[0] for w in words}, key=sort_key)
if words:
QApplication.clipboard().setText('\n'.join(words))
class SpellCheck(Dialog):
work_finished = pyqtSignal(object, object, object)
find_word = pyqtSignal(object, object)
refresh_requested = pyqtSignal()
word_replaced = pyqtSignal(object)
word_ignored = pyqtSignal(object, object)
change_requested = pyqtSignal(object, object)
def __init__(self, parent=None):
self.__current_word = None
self.thread = None
self.cancel = False
dictionaries.initialize()
self.current_word_changed_timer = t = QTimer()
t.timeout.connect(self.do_current_word_changed)
t.setSingleShot(True), t.setInterval(100)
Dialog.__init__(self, _('Check spelling'), 'spell-check', parent)
self.work_finished.connect(self.work_done, type=Qt.QueuedConnection)
self.setAttribute(Qt.WA_DeleteOnClose, False)
def setup_ui(self):
set_no_activate_on_click = plugins['progress_indicator'][0].set_no_activate_on_click
self.setWindowIcon(QIcon(I('spell-check.png')))
self.l = l = QVBoxLayout(self)
self.setLayout(l)
self.stack = s = QStackedLayout()
l.addLayout(s)
l.addWidget(self.bb)
self.bb.clear()
self.bb.addButton(self.bb.Close)
b = self.bb.addButton(_('&Refresh'), self.bb.ActionRole)
b.setToolTip('<p>' + _('Re-scan the book for words, useful if you have edited the book since opening this dialog'))
b.setIcon(QIcon(I('view-refresh.png')))
b.clicked.connect(partial(self.refresh, change_request=None))
self.progress = p = QWidget(self)
s.addWidget(p)
p.l = l = QVBoxLayout(p)
l.setAlignment(Qt.AlignCenter)
self.progress_indicator = pi = ProgressIndicator(self, 256)
l.addWidget(pi, alignment=Qt.AlignHCenter), l.addSpacing(10)
p.la = la = QLabel(_('Checking, please wait...'))
la.setStyleSheet('QLabel { font-size: 30pt; font-weight: bold }')
l.addWidget(la, alignment=Qt.AlignHCenter)
self.main = m = QWidget(self)
s.addWidget(m)
m.l = l = QVBoxLayout(m)
m.h1 = h = QHBoxLayout()
l.addLayout(h)
self.filter_text = t = QLineEdit(self)
t.setPlaceholderText(_('Filter the list of words'))
t.textChanged.connect(self.do_filter)
m.fc = b = QToolButton(m)
b.setIcon(QIcon(I('clear_left.png'))), b.setToolTip(_('Clear filter'))
b.clicked.connect(t.clear)
h.addWidget(t), h.addWidget(b)
m.h2 = h = QHBoxLayout()
l.addLayout(h)
self.words_view = w = WordsView(m)
set_no_activate_on_click(w)
w.ignore_all.connect(self.ignore_all)
w.add_all.connect(self.add_all)
w.activated.connect(self.word_activated)
w.change_to.connect(self.change_to)
w.currentChanged = self.current_word_changed
state = tprefs.get('spell-check-table-state', None)
hh = self.words_view.horizontalHeader()
h.addWidget(w)
self.words_model = m = WordsModel(self)
w.setModel(m)
m.dataChanged.connect(self.current_word_changed)
m.modelReset.connect(self.current_word_changed)
m.word_ignored.connect(self.word_ignored)
if state is not None:
hh.restoreState(state)
# Sort by the restored state, if any
w.sortByColumn(hh.sortIndicatorSection(), hh.sortIndicatorOrder())
m.show_only_misspelt = hh.isSectionHidden(3)
self.ignore_button = b = QPushButton(_('&Ignore'))
b.ign_text, b.unign_text = unicode(b.text()), _('Un&ignore')
b.ign_tt = _('Ignore the current word for the rest of this session')
b.unign_tt = _('Stop ignoring the current word')
b.clicked.connect(self.toggle_ignore)
l = QVBoxLayout()
h.addLayout(l)
h.setStretch(0, 1)
l.addWidget(b), l.addSpacing(20)
self.add_button = b = QPushButton(_('Add word to &dictionary:'))
b.add_text, b.remove_text = unicode(b.text()), _('Remove from &dictionaries')
b.add_tt = _('Add the current word to the specified user dictionary')
b.remove_tt = _('Remove the current word from all active user dictionaries')
b.clicked.connect(self.add_remove)
self.user_dictionaries = d = QComboBox(self)
self.user_dictionaries_missing_label = la = QLabel(_(
'You have no active user dictionaries. You must'
' choose at least one active user dictionary via'
' Preferences->Editor->Manage spelling dictionaries'))
la.setWordWrap(True)
self.initialize_user_dictionaries()
d.setMinimumContentsLength(25)
l.addWidget(b), l.addWidget(d), l.addWidget(la)
self.next_occurrence = b = QPushButton(_('Show &next occurrence'), self)
b.setToolTip('<p>' + _(
'Show the next occurrence of the selected word in the editor, so you can edit it manually'))
b.clicked.connect(self.show_next_occurrence)
l.addSpacing(20), l.addWidget(b)
l.addStretch(1)
self.change_button = b = QPushButton(_('&Change selected word to:'), self)
b.clicked.connect(self.change_word)
l.addWidget(b)
self.suggested_word = sw = LineEdit(self)
sw.set_separator(None)
sw.setPlaceholderText(_('The replacement word'))
sw.returnPressed.connect(self.change_word)
l.addWidget(sw)
self.suggested_list = sl = QListWidget(self)
sl.currentItemChanged.connect(self.current_suggestion_changed)
sl.itemActivated.connect(self.change_word)
set_no_activate_on_click(sl)
l.addWidget(sl)
hh.setSectionHidden(3, m.show_only_misspelt)
self.show_only_misspelled = om = QCheckBox(_('Show &only misspelled words'))
om.setChecked(m.show_only_misspelt)
om.stateChanged.connect(self.update_show_only_misspelt)
self.case_sensitive_sort = cs = QCheckBox(_('Case &sensitive sort'))
cs.setChecked(tprefs['spell_check_case_sensitive_sort'])
cs.setToolTip(_('When sorting the list of words, be case sensitive'))
cs.stateChanged.connect(self.sort_type_changed)
self.case_sensitive_search = cs2 = QCheckBox(_('Case sensitive sea&rch'))
cs2.setToolTip(_('When filtering the list of words, be case sensitive'))
cs2.setChecked(tprefs['spell_check_case_sensitive_search'])
cs2.stateChanged.connect(self.search_type_changed)
self.hb = h = QHBoxLayout()
self.summary = s = QLabel('')
self.main.l.addLayout(h), h.addWidget(s), h.addWidget(om), h.addWidget(cs), h.addWidget(cs2), h.addStretch(1)
def keyPressEvent(self, ev):
if ev.key() in (Qt.Key_Enter, Qt.Key_Return):
ev.accept()
return
return Dialog.keyPressEvent(self, ev)
def sort_type_changed(self):
tprefs['spell_check_case_sensitive_sort'] = bool(self.case_sensitive_sort.isChecked())
if self.words_model.sort_on[0] == 0:
with self:
hh = self.words_view.horizontalHeader()
self.words_view.sortByColumn(hh.sortIndicatorSection(), hh.sortIndicatorOrder())
def search_type_changed(self):
tprefs['spell_check_case_sensitive_search'] = bool(self.case_sensitive_search.isChecked())
if unicode(self.filter_text.text()).strip():
self.do_filter()
def show_next_occurrence(self):
self.word_activated(self.words_view.currentIndex())
def word_activated(self, index):
w = self.words_model.word_for_row(index.row())
if w is None:
return
self.find_word.emit(w, self.words_model.words[w])
def initialize_user_dictionaries(self):
ct = unicode(self.user_dictionaries.currentText())
self.user_dictionaries.clear()
self.user_dictionaries.addItems([d.name for d in dictionaries.active_user_dictionaries])
if ct:
idx = self.user_dictionaries.findText(ct)
if idx > -1:
self.user_dictionaries.setCurrentIndex(idx)
self.user_dictionaries.setVisible(self.user_dictionaries.count() > 0)
self.user_dictionaries_missing_label.setVisible(not self.user_dictionaries.isVisible())
def current_word_changed(self, *args):
self.current_word_changed_timer.start(self.current_word_changed_timer.interval())
def do_current_word_changed(self):
try:
b = self.ignore_button
except AttributeError:
return
ignored = recognized = in_user_dictionary = False
current = self.words_view.currentIndex()
current_word = ''
if current.isValid():
row = current.row()
w = self.words_model.word_for_row(row)
if w is not None:
ignored = dictionaries.is_word_ignored(*w)
recognized = self.words_model.spell_map[w]
current_word = w[0]
if recognized:
in_user_dictionary = dictionaries.word_in_user_dictionary(*w)
suggestions = dictionaries.suggestions(*w)
self.suggested_list.clear()
for i, s in enumerate(suggestions):
item = QListWidgetItem(s, self.suggested_list)
if i == 0:
self.suggested_list.setCurrentItem(item)
self.suggested_word.setText(s)
prefix = b.unign_text if ignored else b.ign_text
b.setText(prefix + ' ' + current_word)
b.setToolTip(b.unign_tt if ignored else b.ign_tt)
b.setEnabled(current.isValid() and (ignored or not recognized))
if not self.user_dictionaries_missing_label.isVisible():
b = self.add_button
b.setText(b.remove_text if in_user_dictionary else b.add_text)
b.setToolTip(b.remove_tt if in_user_dictionary else b.add_tt)
self.user_dictionaries.setVisible(not in_user_dictionary)
def current_suggestion_changed(self, item):
try:
self.suggested_word.setText(item.text())
except AttributeError:
pass # item is None
def change_word(self):
current = self.words_view.currentIndex()
if not current.isValid():
return
row = current.row()
w = self.words_model.word_for_row(row)
if w is None:
return
new_word = unicode(self.suggested_word.text())
self.change_requested.emit(w, new_word)
def change_word_after_update(self, w, new_word):
self.refresh(change_request=(w, new_word))
def change_to(self, w, new_word):
if new_word is None:
self.suggested_word.setFocus(Qt.OtherFocusReason)
self.suggested_word.clear()
return
self.change_requested.emit(w, new_word)
def do_change_word(self, w, new_word):
changed_files = replace_word(current_container(), new_word, self.words_model.words[w], w[1])
if changed_files:
self.word_replaced.emit(changed_files)
w = self.words_model.replace_word(w, new_word)
row = self.words_model.row_for_word(w)
if row > -1:
self.words_view.highlight_row(row)
def toggle_ignore(self):
current = self.words_view.currentIndex()
if current.isValid():
self.words_model.toggle_ignored(current.row())
def ignore_all(self):
rows = {i.row() for i in self.words_view.selectionModel().selectedRows()}
rows.discard(-1)
if rows:
self.words_model.ignore_words(rows)
def add_all(self, dicname):
rows = {i.row() for i in self.words_view.selectionModel().selectedRows()}
rows.discard(-1)
if rows:
self.words_model.add_words(dicname, rows)
def add_remove(self):
current = self.words_view.currentIndex()
if current.isValid():
if self.user_dictionaries.isVisible(): # add
udname = unicode(self.user_dictionaries.currentText())
self.words_model.add_word(current.row(), udname)
else:
self.words_model.remove_word(current.row())
def update_show_only_misspelt(self):
m = self.words_model
m.show_only_misspelt = self.show_only_misspelled.isChecked()
self.words_view.horizontalHeader().setSectionHidden(3, m.show_only_misspelt)
self.do_filter()
def __enter__(self):
idx = self.words_view.currentIndex().row()
self.__current_word = self.words_model.word_for_row(idx)
def __exit__(self, *args):
if self.__current_word is not None:
row = self.words_model.row_for_word(self.__current_word)
self.words_view.highlight_row(max(0, row))
self.__current_word = None
def do_filter(self):
text = unicode(self.filter_text.text()).strip()
with self:
self.words_model.filter(text)
def refresh(self, change_request=None):
if not self.isVisible():
return
self.cancel = True
if self.thread is not None:
self.thread.join()
self.stack.setCurrentIndex(0)
self.progress_indicator.startAnimation()
self.refresh_requested.emit()
self.thread = Thread(target=partial(self.get_words, change_request=change_request))
self.thread.daemon = True
self.cancel = False
self.thread.start()
def get_words(self, change_request=None):
try:
words = get_all_words(current_container(), dictionaries.default_locale)
spell_map = {w:dictionaries.recognized(*w) for w in words}
except:
import traceback
traceback.print_exc()
words = traceback.format_exc()
spell_map = {}
if self.cancel:
self.end_work()
else:
self.work_finished.emit(words, spell_map, change_request)
def end_work(self):
self.stack.setCurrentIndex(1)
self.progress_indicator.stopAnimation()
self.words_model.clear()
def work_done(self, words, spell_map, change_request):
self.end_work()
if not isinstance(words, dict):
return error_dialog(self, _('Failed to check spelling'), _(
'Failed to check spelling, click "Show details" for the full error information.'),
det_msg=words, show=True)
if not self.isVisible():
return
self.words_model.set_data(words, spell_map)
col, reverse = self.words_model.sort_on
self.words_view.horizontalHeader().setSortIndicator(
col, Qt.DescendingOrder if reverse else Qt.AscendingOrder)
self.words_view.highlight_row(0)
self.update_summary()
self.initialize_user_dictionaries()
if self.words_model.rowCount() > 0:
self.words_view.resizeRowToContents(0)
self.words_view.verticalHeader().setDefaultSectionSize(self.words_view.rowHeight(0))
if change_request is not None:
w, new_word = change_request
if w in self.words_model.words:
self.do_change_word(w, new_word)
else:
error_dialog(self, _('Files edited'), _(
'The files in the editor were edited outside the spell check dialog,'
' and the word %s no longer exists.') % w[0], show=True)
def update_summary(self):
self.summary.setText(_('Misspelled words: {0} Total words: {1}').format(*self.words_model.counts))
def sizeHint(self):
return QSize(1000, 650)
def show(self):
Dialog.show(self)
QTimer.singleShot(0, self.refresh)
def accept(self):
tprefs['spell-check-table-state'] = bytearray(self.words_view.horizontalHeader().saveState())
Dialog.accept(self)
def reject(self):
tprefs['spell-check-table-state'] = bytearray(self.words_view.horizontalHeader().saveState())
Dialog.reject(self)
@classmethod
def test(cls):
from calibre.ebooks.oeb.polish.container import get_container
from calibre.gui2.tweak_book import set_current_container
set_current_container(get_container(sys.argv[-1], tweak_mode=True))
set_book_locale(current_container().mi.language)
d = cls()
QTimer.singleShot(0, d.refresh)
d.exec_()
# }}}
# Find next occurrence {{{
def find_next(word, locations, current_editor, current_editor_name,
gui_parent, show_editor, edit_file):
files = OrderedDict()
for l in locations:
try:
files[l.file_name].append(l)
except KeyError:
files[l.file_name] = [l]
if current_editor_name not in files:
current_editor_name = None
locations = [(fname, {l.original_word for l in _locations}, False) for fname, _locations in files.iteritems()]
else:
# Re-order the list of locations to search so that we search in the
# current editor first
lfiles = list(files)
idx = lfiles.index(current_editor_name)
before, after = lfiles[:idx], lfiles[idx+1:]
lfiles = after + before + [current_editor_name]
locations = [(current_editor_name, {l.original_word for l in files[current_editor_name]}, True)]
for fname in lfiles:
locations.append((fname, {l.original_word for l in files[fname]}, False))
for file_name, original_words, from_cursor in locations:
ed = editors.get(file_name, None)
if ed is None:
edit_file(file_name)
ed = editors[file_name]
if ed.find_spell_word(original_words, word[1].langcode, from_cursor=from_cursor):
show_editor(file_name)
return True
return False
def find_next_error(current_editor, current_editor_name, gui_parent, show_editor, edit_file):
files = get_checkable_file_names(current_container())[0]
if current_editor_name not in files:
current_editor_name = None
else:
idx = files.index(current_editor_name)
before, after = files[:idx], files[idx+1:]
files = [current_editor_name] + after + before + [current_editor_name]
for file_name in files:
from_cursor = False
if file_name == current_editor_name:
from_cursor = True
current_editor_name = None
ed = editors.get(file_name, None)
if ed is None:
edit_file(file_name)
ed = editors[file_name]
if ed.editor.find_next_spell_error(from_cursor=from_cursor):
show_editor(file_name)
return True
return False
# }}}
if __name__ == '__main__':
app = QApplication([])
dictionaries.initialize()
ManageUserDictionaries.test()
del app
| gpl-3.0 | 1,952,062,430,409,724,200 | 39.365575 | 123 | 0.598528 | false |
peercoin/peercoin | test/functional/p2p_leak.py | 3 | 5513 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test message sending before handshake completion.
A node should never send anything other than VERSION/VERACK/REJECT until it's
received a VERACK.
This test connects to a node and sends it a few messages, trying to entice it
into sending us something it shouldn't."""
import time
from test_framework.messages import msg_getaddr, msg_ping, msg_verack
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until
banscore = 10
class CLazyNode(P2PInterface):
def __init__(self):
super().__init__()
self.unexpected_msg = False
self.ever_connected = False
def bad_message(self, message):
self.unexpected_msg = True
self.log.info("should not have received message: %s" % message.command)
def on_open(self):
self.ever_connected = True
def on_version(self, message): self.bad_message(message)
def on_verack(self, message): self.bad_message(message)
def on_reject(self, message): self.bad_message(message)
def on_inv(self, message): self.bad_message(message)
def on_addr(self, message): self.bad_message(message)
def on_getdata(self, message): self.bad_message(message)
def on_getblocks(self, message): self.bad_message(message)
def on_tx(self, message): self.bad_message(message)
def on_block(self, message): self.bad_message(message)
def on_getaddr(self, message): self.bad_message(message)
def on_headers(self, message): self.bad_message(message)
def on_getheaders(self, message): self.bad_message(message)
def on_ping(self, message): self.bad_message(message)
def on_mempool(self, message): self.bad_message(message)
def on_pong(self, message): self.bad_message(message)
def on_feefilter(self, message): self.bad_message(message)
def on_sendheaders(self, message): self.bad_message(message)
def on_sendcmpct(self, message): self.bad_message(message)
def on_cmpctblock(self, message): self.bad_message(message)
def on_getblocktxn(self, message): self.bad_message(message)
def on_blocktxn(self, message): self.bad_message(message)
# Node that never sends a version. We'll use this to send a bunch of messages
# anyway, and eventually get disconnected.
class CNodeNoVersionBan(CLazyNode):
# send a bunch of veracks without sending a message. This should get us disconnected.
# NOTE: implementation-specific check here. Remove if bitcoind ban behavior changes
def on_open(self):
super().on_open()
for i in range(banscore):
self.send_message(msg_verack())
def on_reject(self, message): pass
# Node that never sends a version. This one just sits idle and hopes to receive
# any message (it shouldn't!)
class CNodeNoVersionIdle(CLazyNode):
def __init__(self):
super().__init__()
# Node that sends a version but not a verack.
class CNodeNoVerackIdle(CLazyNode):
def __init__(self):
self.version_received = False
super().__init__()
def on_reject(self, message): pass
def on_verack(self, message): pass
# When version is received, don't reply with a verack. Instead, see if the
# node will give us a message that it shouldn't. This is not an exhaustive
# list!
def on_version(self, message):
self.version_received = True
self.send_message(msg_ping())
self.send_message(msg_getaddr())
class P2PLeakTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-banscore=' + str(banscore)]]
def run_test(self):
no_version_bannode = self.nodes[0].add_p2p_connection(CNodeNoVersionBan(), send_version=False, wait_for_verack=False)
no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False, wait_for_verack=False)
no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle(), wait_for_verack=False)
# Wait until we got the verack in response to the version. Though, don't wait for the other node to receive the
# verack, since we never sent one
no_verack_idlenode.wait_for_verack()
wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock)
wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock)
wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock)
# Mine a block and make sure that it's not sent to the connected nodes
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
#Give the node enough time to possibly leak out a message
time.sleep(5)
#This node should have been banned
assert not no_version_bannode.is_connected
self.nodes[0].disconnect_p2ps()
# Wait until all connections are closed
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0)
# Make sure no unexpected messages came in
assert no_version_bannode.unexpected_msg == False
assert no_version_idlenode.unexpected_msg == False
assert no_verack_idlenode.unexpected_msg == False
if __name__ == '__main__':
P2PLeakTest().main()
| mit | -7,404,786,810,299,223,000 | 40.765152 | 127 | 0.699982 | false |
sounay/flaminggo-test | onadata/apps/logger/migrations/0038_auto__del_field_instance_is_deleted.py | 13 | 12143 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Instance.is_deleted'
db.delete_column(u'odk_logger_instance', 'is_deleted')
def backwards(self, orm):
# Adding field 'Instance.is_deleted'
db.add_column(u'odk_logger_instance', 'is_deleted',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.attachment': {
'Meta': {'object_name': 'Attachment'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['odk_logger.Instance']"}),
'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'mimetype': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'})
},
'odk_logger.instance': {
'Meta': {'object_name': 'Instance'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.instancehistory': {
'Meta': {'object_name': 'InstanceHistory'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}),
'xform_instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submission_history'", 'to': "orm['odk_logger.Instance']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.note': {
'Meta': {'object_name': 'Note'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': "orm['odk_logger.Instance']"}),
'note': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'), ('user', 'sms_id_string'))", 'object_name': 'XForm'},
'allows_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bamboo_dataset': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '60'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'encrypted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'is_crowd_form': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'last_submission_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'num_of_submissions': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sms_id_string': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100'}),
'surveys_with_geopoints': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.ziggyinstance': {
'Meta': {'object_name': 'ZiggyInstance'},
'client_version': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.CharField', [], {'max_length': '249'}),
'form_instance': ('django.db.models.fields.TextField', [], {}),
'form_version': ('django.db.models.fields.CharField', [], {'default': "u'1.0'", 'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '249'}),
'reporter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ziggys'", 'to': u"orm['auth.User']"}),
'server_version': ('django.db.models.fields.BigIntegerField', [], {}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ziggy_submissions'", 'null': 'True', 'to': "orm['odk_logger.XForm']"})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['logger']
| bsd-2-clause | -6,523,173,707,022,303,000 | 75.85443 | 187 | 0.548546 | false |
fffonion/you-get | src/you_get/extractors/songtaste.py | 20 | 1473 | #!/usr/bin/env python
__all__ = ['songtaste_download']
from ..common import *
import urllib.error
def songtaste_download(url, output_dir = '.', merge = True, info_only = False):
if re.match(r'http://www.songtaste.com/song/\d+', url):
old_fake_headers = fake_headers
id = r1(r'http://www.songtaste.com/song/(\d+)', url)
player_url = 'http://www.songtaste.com/playmusic.php?song_id='+str(id)
fake_headers['Referer'] = player_url
html = get_response(player_url).data
r = '''^WrtSongLine\((.*)\)'''
reg = re.compile(r , re.M)
m = reg.findall(html.decode('gbk'))
l = m[0].replace('"', '').replace(' ', '').split(',')
title = l[2] + '-' + l[1]
for i in range(0, 10):
real_url = l[5].replace('http://mg', 'http://m%d' % i)
try:
type, ext, size = url_info(real_url, True)
except urllib.error.HTTPError as e:
if 403 == e.code:
continue
else:
raise e
break
print_info(site_info, title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, refer = url, merge = merge, faker = True)
fake_hreaders = old_fake_headers
site_info = "SongTaste.com"
download = songtaste_download
download_playlist = playlist_not_supported('songtaste')
| mit | -19,731,022,208,494,020 | 33.255814 | 109 | 0.521385 | false |
famz/patchew | tests/test_mbox.py | 1 | 1637 | #!/usr/bin/env python3
#
# Copyright 2017 Red Hat, Inc.
#
# Authors:
# Fam Zheng <[email protected]>
#
# This work is licensed under the MIT License. Please see the LICENSE file or
# http://opensource.org/licenses/MIT.
import os
import sys
import mbox
sys.path.append(os.path.dirname(__file__))
from patchewtest import PatchewTestCase, main
class MboxTest(PatchewTestCase):
def test_multipart_in_multipart(self):
expected = """
On 07/25/2017 10:57 AM, Jeff Cody wrote:
> Signed-off-by: Jeff Cody <[email protected]>
> ---
> redhat/build_configure.sh | 3 +++
> redhat/qemu-kvm.spec.template | 7 +++++++
> 2 files changed, 10 insertions(+)
>
ACK
--
Eric Blake, Principal Software Engineer
Red Hat, Inc. +1-919-301-3266
Virtualization: qemu.org | libvirt.org
""".strip()
dp = self.get_data_path("0016-nested-multipart.mbox.gz")
with open(dp, "r") as f:
msg = mbox.MboxMessage(f.read())
self.assertEqual(msg.get_body().strip(), expected)
def test_mime_word_recipient(self):
dp = self.get_data_path("0018-mime-word-recipient.mbox.gz")
with open(dp, "r") as f:
msg = mbox.MboxMessage(f.read())
utf8_recipient = msg.get_cc()[1]
self.assertEqual(utf8_recipient[0], "Philippe Mathieu-Daudé")
self.assertEqual(utf8_recipient[1], "[email protected]")
def test_mode_only_patch(self):
dp = self.get_data_path("0021-mode-only-patch.mbox.gz")
with open(dp, "r") as f:
msg = mbox.MboxMessage(f.read())
self.assertTrue(msg.is_patch())
if __name__ == '__main__':
main()
| mit | -7,637,856,859,237,374,000 | 28.214286 | 78 | 0.627139 | false |
EarthSystemCoG/COG | cog/views/utils.py | 2 | 8411 | from django.shortcuts import render
from django.db.models import Q
from django.template import RequestContext
from django.contrib.auth.models import User
import datetime
from cog.models import UserProfile, Project
from cog.utils import getJson, str2bool
from cog.models.peer_site import getPeerSites
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist
import urllib
from collections import OrderedDict
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from cog.plugins.esgf.registry import LocalKnownProvidersDict
from cog.views.constants import MAX_COUNTS_PER_PAGE
# module-scope object that holds list of known ESGF Identity Providers
# included here because login view is part of django-openid-auth module
esgf_known_providers = LocalKnownProvidersDict()
def paginate(objects, request, max_counts_per_page=MAX_COUNTS_PER_PAGE):
'''Utility method to paginate a list of objects before they are rendered in a template.'''
page = getQueryDict(request).get('page')
paginator = Paginator(objects, max_counts_per_page) # show at most 'max_counts_per_page'
try:
_objects = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
_objects = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
_objects = paginator.page(paginator.num_pages)
return _objects
def getKnownIdentityProviders():
# sort dictionary by key
return OrderedDict(sorted(esgf_known_providers.idpDict().items()))
#return esgf_known_providers.idpDict()
# function to return an error message if a project is not active
def getProjectNotActiveRedirect(request, project):
messages = ['Access to all pages of project %s is currently forbidden.' % project.short_name,
'Please contact <a href="/projects/cog/contactus/">support</a> with any questions.']
return render(request,
'cog/common/message.html',
{'mytitle': 'Project Access Not Enabled',
'project': project,
'messages': messages})
# function to return an error message if a project is not public
def getProjectNotVisibleRedirect(request, project):
messages = ['Access to all pages of project %s is restricted to members only.' % project.short_name,
'Please contact <a href="/projects/cog/contactus/">support</a> with any questions.']
return render(request,
'cog/common/message.html',
{'mytitle': 'Project Access Restricted',
'project': project,
'messages': messages})
def set_openid_cookie(response, openid):
"""Utility method to consistently set the openid cookie."""
print 'SETTING openid cookie to: %s' % openid
response.set_cookie('openid', openid,
expires=(datetime.datetime.now() + datetime.timedelta(days=3650)), # expires in 10 years
httponly=True)
def getUsersThatMatch(match, sortby='last_name'):
"""
Returns the list of users (e.g. "list all pending/current/node users" that match a given expression.
By default it sorts by last_name.
"""
return User.objects.filter((Q(username__icontains=match) | Q(first_name__icontains=match) |
Q(last_name__icontains=match) | Q(email__icontains=match))
).order_by(sortby)
def getAdminUsersThatMatch(match, sortby='username'):
"""
Returns the list of admin users (e.g. "list all system users" that match a given expression.
By default it sorts by username.
"""
return User.objects.filter((Q(username__icontains=match) | Q(first_name__icontains=match) |
Q(last_name__icontains=match) | Q(email__icontains=match)) |
Q(date_joined__icontains=match) |
Q(profile__site__name__icontains=match)
).order_by(sortby)
def get_projects_by_name(match):
"""Returns the list of users that match a given expression."""
return Project.objects.filter((Q(short_name__icontains=match)))
def get_all_shared_user_info(user, includeCurrentSite=True):
"""Queries all nodes (including local node) for projects and groups the user belongs to.
Returns two lists of dictionaries but does NOT update the local database.
Example of JSON data retrieved from each node:
{
"users": {
"https://hydra.fsl.noaa.gov/esgf-idp/openid/rootAdmin": {
"home_site_domain": "cog-esgf.esrl.noaa.gov",
"openid": "https://hydra.fsl.noaa.gov/esgf-idp/openid/rootAdmin",
"datacart": {
"size": 0
},
"home_site_name": "NOAA ESRL ESGF-CoG",
"groups": {
"HIWPP": {},
"NCPP DIP": {
"admin": true,
"publisher": true,
"super": true,
"user": true
},
"NOAA ESRL": {
"super": true
}
},
"projects": {
"AlaskaSummerSchool": [
"admin",
"user"
],
"CF-Grids": [
"admin"
],
"CFSS": [
"admin",
"user"
],
.....
"""
# dictionary of information retrieved from each node, including current node
userDict = {} # node --> dictionary of user information
try:
if user.profile.openid() is not None:
openid = user.profile.openid()
print 'Retrieving projects, groups for user with openid=%s' % openid
# loop over remote (enabled) nodes, possibly add current node
sites = list(getPeerSites())
if includeCurrentSite:
sites = sites + [Site.objects.get_current()]
for site in sites:
url = "http://%s/share/user/?openid=%s" % (site.domain, openid)
print 'Retrieving user projects and groups from URL=%s' % url
jobj = getJson(url)
if jobj is not None and openid in jobj['users']:
userDict[site] = jobj['users'][openid]
else:
print 'Openid=%s not found at site=%s' % (openid, site)
except UserProfile.DoesNotExist:
pass # user profile not yet created
# restructure information as list of (project object, user roles) and (group name, group roles) tuples
projects = []
groups = []
for usite, udict in userDict.items():
if udict.get('projects', None):
for pname, proles in udict["projects"].items():
try:
proj = Project.objects.get(short_name__iexact=pname)
projects.append((proj, proles))
except ObjectDoesNotExist:
pass
if udict.get('groups', None):
for gname, gdict in udict["groups"].items():
groles = []
for grole, approved in gdict.items():
if approved:
groles.append(grole)
groups.append((gname,groles))
# sort by project short name
return (projects, groups)
def add_get_parameter(url, key, value):
"""
Utility method to add an HTTP request parameter to a GET request
"""
if '?' in url:
return url + "&%s" % urllib.urlencode([(key, value)])
else:
return url + "?%s" % urllib.urlencode([(key, value)])
def getQueryDict(request):
'''Utiity method to return the query dictionary for a GET or POST request.'''
if request.method == 'POST':
return request.POST
else:
return request.GET
| bsd-3-clause | -1,279,384,678,244,559,400 | 38.674528 | 113 | 0.562597 | false |
jj0hns0n/geonode | geonode/groups/admin.py | 5 | 1470 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.contrib import admin
from modeltranslation.admin import TranslationAdmin
from geonode.groups.models import (GroupMember, GroupProfile,
GroupInvitation, GroupCategory)
@admin.register(GroupCategory)
class GroupCategoryAdmin(TranslationAdmin):
list_display = ('name', 'slug',)
readonly_fields = ('slug',)
class GroupMemberInline(admin.TabularInline):
model = GroupMember
class GroupAdmin(admin.ModelAdmin):
inlines = [
GroupMemberInline
]
exclude = ['group', ]
admin.site.register(GroupProfile, GroupAdmin)
admin.site.register(GroupInvitation)
| gpl-3.0 | -7,623,183,077,955,132,000 | 29.625 | 73 | 0.656463 | false |
tanzquotient/tq_website | groups/services/update_groups.py | 2 | 1088 | import logging
from django.contrib.auth.models import Group
from courses.models import UserProfile
from ..definitions import GroupDefinitions
log = logging.getLogger('update_groups')
def update_groups(queryset=None):
log.info("Updating groups")
# All groups
if queryset is None:
for group_definition in GroupDefinitions.DEFINITIONS:
Group.objects.get_or_create(name=group_definition.name)
queryset = Group.objects
for group_definition in GroupDefinitions.DEFINITIONS:
if group_definition.is_manual():
continue
group = queryset.filter(name=group_definition.name)
if not group.exists():
continue
log.info("Updating group " + group_definition.name)
group = group.get()
group.user_set.clear()
for profile in UserProfile.objects.all():
user = profile.user
if group_definition.matches(user):
group.user_set.add(user)
log.info("Updating group finished. Number of users in group " + str(group.user_set.count()))
| gpl-2.0 | -1,408,957,220,435,228,700 | 27.631579 | 100 | 0.659007 | false |
JioCloud/horizon | openstack_dashboard/test/integration_tests/tests/test_user_settings.py | 14 | 2337 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
class TestUserSettings(helpers.TestCase):
def verify_user_settings_change(self, changed_settings):
language = self.settings_page.modal.language_selection.\
get_attribute("value")
timezone = self.settings_page.modal.timezone_selection.\
get_attribute("value")
pagesize = self.settings_page.modal.pagesize.\
get_attribute("value")
user_settings = (("Language", changed_settings["language"], language),
("Timezone", changed_settings["timezone"], timezone),
("Pagesize", changed_settings["pagesize"], pagesize))
for (setting, expected, observed) in user_settings:
self.assertEqual(expected, observed,
"expected %s: %s, instead found: %s"
% (setting, expected, observed))
def test_user_settings_change(self):
"""tests the user's settings options:
* changes the system's language
* changes the timezone
* changes the number of items per page (page size)
* verifies all changes were successfully executed
"""
self.settings_page = self.home_pg.go_to_settings_page()
self.settings_page.change_language("es")
self.settings_page.change_timezone("Asia/Jerusalem")
self.settings_page.change_pagesize("30")
changed_settings = {"language": "es", "timezone": "Asia/Jerusalem",
"pagesize": "30"}
self.verify_user_settings_change(changed_settings)
self.settings_page.return_to_default_settings()
self.verify_user_settings_change(self.settings_page.DEFAULT_SETTINGS)
| apache-2.0 | -8,152,542,669,743,697,000 | 43.09434 | 78 | 0.643988 | false |
lino-framework/lino | lino/modlib/publisher/mixins.py | 1 | 2653 | # -*- coding: UTF-8 -*-
# Copyright 2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from lino.api import dd, rt, _
from django import http
from django.conf import settings
from django.utils.translation import get_language
from lino.modlib.printing.mixins import Printable
from lino.modlib.printing.choicelists import BuildMethods
from inspect import isclass
class PreviewPublication(dd.Action):
label = _("Preview")
select_rows = False
def run_from_ui(self, ar, **kw):
sr_selected = not isclass(self)
if sr_selected:
ar.success(open_url=self.publisher_url())
else:
ar.success(open_url=self.publisher_url(self, not sr_selected))
def get_view_permission(self, user_type):
if not dd.is_installed('publisher'):
return False
return super(PreviewPublication, self).get_view_permission(user_type)
class Publishable(Printable):
class Meta:
abstract = True
app_label = 'publisher'
publisher_location = None
publisher_page_template = "publisher/page.pub.html"
publisher_item_template = "publisher/item.pub.html"
preview_publication = PreviewPublication()
# @dd.action(select_rows=False)
# def preview_publication(self, ar):
# sr_selected = not isclass(self)
# if sr_selected:
# ar.success(open_url=self.publisher_url())
# else:
# ar.success(open_url=self.publisher_url(self, not sr_selected))
def publisher_url(self):
return "/{}/{}".format(self.publisher_location, self.pk)
# def publisher_url(self, list=False):
# if list:
# return "/{}/".format(self.publisher_location)
# return "/{}/{}".format(self.publisher_location, self.pk)
def render_from(self, tplname, ar):
env = settings.SITE.plugins.jinja.renderer.jinja_env
context = ar.get_printable_context(obj=self)
template = env.get_template(tplname)
# print("20210112 publish {} {} using {}".format(cls, obj, template))
# context = dict(obj=self, request=request, language=get_language())
return template.render(**context)
def get_publisher_response(self, ar):
html = self.render_from(self.publisher_page_template, ar)
return http.HttpResponse(html, content_type='text/html;charset="utf-8"')
@classmethod
def render_dashboard_items(cls, ar):
for obj in cls.get_dashboard_objects(ar):
yield obj.render_from(obj.publisher_item_template, ar)
@classmethod
def get_dashboard_objects(cls, ar):
return []
| bsd-2-clause | -1,319,844,503,397,587,500 | 33.012821 | 80 | 0.6536 | false |
betterlife/flask-psi | psi/app/views/components/image_field.py | 2 | 2447 | from wtforms import StringField
class ImageInput(object):
"""
Image upload controller, supports
1. Multiple file upload one time
2. Preview existing image files on server.
3. Preview to be uploaded image files on the fly before uploading.
Template file components/image_field.html is needed for this controller
to work correctly.
"""
def __call__(self, field, **kwargs):
# Use field.data to get current data.
from flask import render_template
associated_images = []
if ((field.data is not None and hasattr(field.data, 'filename') and len(field.data.filename) > 0)
or (field.data is not None and hasattr(field.data, '__len__') and len(field.data) > 0)):
for p_i in field.data:
associated_images.append(p_i)
else:
associated_images = []
return render_template('components/images_input.html',
associated_images=associated_images)
class ImageField(StringField):
widget = ImageInput()
def __call__(self, **kwargs):
return super(ImageField, self).__call__(**kwargs)
def set_object_type(self, object_type):
self.object_type = object_type
def populate_obj(self, obj, name):
from flask import request
from psi.app.service import Info
from psi.app.utils import db_util
from psi.app.utils import file_util
images_to_del = request.form.get('images-to-delete')
if len(images_to_del) > 0:
to_del_ids = images_to_del.split(',')
for to_del_id in to_del_ids:
db_util.delete_by_id(self.object_type, to_del_id, commit=False)
files = request.files.getlist('images_placeholder')
images = getattr(obj, name)
for f in files:
if len(f.filename) > 0:
image_owner = self.object_type()
image = file_util.save_image(image_owner, f)
Info.get_db().session.add(image)
Info.get_db().session.add(image_owner)
images.append(image_owner)
setattr(obj, name, images)
def images_formatter(view, context, model, name):
from flask import render_template
from wtforms.widgets import HTMLString
val = getattr(model, name)
return HTMLString(render_template("components/images_display.html",
associated_images=val))
| mit | -6,832,428,777,246,576,000 | 37.234375 | 105 | 0.6085 | false |
asyncee/python-obscene-words-filter | obscene_words_filter/words_filter.py | 1 | 1150 | # coding: utf-8
from __future__ import unicode_literals
import re
from functools import partial
class ObsceneWordsFilter(object):
def __init__(self, bad_regexp, good_regexp):
self.bad_regexp = bad_regexp
self.good_regexp = good_regexp
def find_bad_word_matches(self, text):
return self.bad_regexp.finditer(text)
def find_bad_word_matches_without_good_words(self, text):
for match in self.find_bad_word_matches(text):
if not self.is_word_good(match.group()):
yield match
def is_word_good(self, word):
return bool(self.good_regexp.match(word))
def is_word_bad(self, word):
if self.is_word_good(word):
return False
return bool(self.bad_regexp.match(word))
def mask_bad_words(self, text):
for match in self.find_bad_word_matches_without_good_words(text):
start, end = match.span()
text = self.mask_text_range(text, start, end)
return text
@staticmethod
def mask_text_range(text, start, stop, symbol='*'):
return text[:start] + (symbol * (stop - start)) + text[stop:]
| mit | 351,474,502,604,419,460 | 28.487179 | 73 | 0.623478 | false |
labordoc/labordoc-next | modules/docextract/lib/refextract_config.py | 7 | 5828 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""RefExtract configuration"""
from invenio.config import CFG_VERSION, CFG_ETCDIR
# pylint: disable=C0301
# Version number:
CFG_REFEXTRACT_VERSION = "Invenio/%s refextract/%s" % (CFG_VERSION, '1.4')
# Module config directory
CFG_CONF_DIR = '%s/docextract' % CFG_ETCDIR
CFG_REFEXTRACT_KBS = {
'journals' : "%s/journal-titles.kb" % CFG_CONF_DIR,
'journals-re' : "%s/journal-titles-re.kb" % CFG_CONF_DIR,
'report-numbers' : "%s/report-numbers.kb" % CFG_CONF_DIR,
'authors' : "%s/authors.kb" % CFG_CONF_DIR,
'collaborations' : "%s/collaborations.kb" % CFG_CONF_DIR,
'books' : "%s/books.kb" % CFG_CONF_DIR,
'conferences' : "%s/conferences.kb" % CFG_CONF_DIR,
'publishers' : "%s/publishers.kb" % CFG_CONF_DIR,
'special-journals': "%s/special-journals.kb" % CFG_CONF_DIR,
}
# Prefix for temp files
CFG_REFEXTRACT_FILENAME = "refextract"
## MARC Fields and subfields used by refextract:
# Reference fields:
CFG_REFEXTRACT_CTRL_FIELD_RECID = "001" # control-field recid
CFG_REFEXTRACT_TAG_ID_REFERENCE = "999" # ref field tag
CFG_REFEXTRACT_IND1_REFERENCE = "C" # ref field ind1
CFG_REFEXTRACT_IND2_REFERENCE = "5" # ref field ind2
CFG_REFEXTRACT_SUBFIELD_MARKER = "o" # ref marker subfield
CFG_REFEXTRACT_SUBFIELD_MISC = "m" # ref misc subfield
CFG_REFEXTRACT_SUBFIELD_DOI = "a" # ref DOI subfield (NEW)
CFG_REFEXTRACT_SUBFIELD_REPORT_NUM = "r" # ref reportnum subfield
CFG_REFEXTRACT_SUBFIELD_TITLE = "s" # ref journal subfield
CFG_REFEXTRACT_SUBFIELD_URL = "u" # ref url subfield
CFG_REFEXTRACT_SUBFIELD_URL_DESCR = "z" # ref url-text subfield
CFG_REFEXTRACT_SUBFIELD_AUTH = "h" # ref author subfield
CFG_REFEXTRACT_SUBFIELD_QUOTED = "t" # ref title subfield
CFG_REFEXTRACT_SUBFIELD_ISBN = "i" # ref isbn subfield
CFG_REFEXTRACT_SUBFIELD_PUBLISHER = "p" # ref publisher subfield
CFG_REFEXTRACT_SUBFIELD_YEAR = "y" # ref publisher subfield
CFG_REFEXTRACT_SUBFIELD_BOOK = "xbook" # ref book subfield
## refextract statistics fields:
CFG_REFEXTRACT_TAG_ID_EXTRACTION_STATS = "999" # ref-stats tag
CFG_REFEXTRACT_IND1_EXTRACTION_STATS = "C" # ref-stats ind1
CFG_REFEXTRACT_IND2_EXTRACTION_STATS = "6" # ref-stats ind2
CFG_REFEXTRACT_SUBFIELD_EXTRACTION_STATS = "a" # ref-stats subfield
CFG_REFEXTRACT_SUBFIELD_EXTRACTION_TIME = "t" # ref-stats time subfield
CFG_REFEXTRACT_SUBFIELD_EXTRACTION_VERSION = "v" # ref-stats version subfield
## Internal tags are used by refextract to mark-up recognised citation
## information. These are the "closing tags:
CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM = r"</cds.REPORTNUMBER>"
CFG_REFEXTRACT_MARKER_CLOSING_TITLE = r"</cds.JOURNAL>"
CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID = r"</cds.JOURNALibid>"
CFG_REFEXTRACT_MARKER_CLOSING_SERIES = r"</cds.SER>"
CFG_REFEXTRACT_MARKER_CLOSING_VOLUME = r"</cds.VOL>"
CFG_REFEXTRACT_MARKER_CLOSING_YEAR = r"</cds.YR>"
CFG_REFEXTRACT_MARKER_CLOSING_PAGE = r"</cds.PG>"
CFG_REFEXTRACT_MARKER_CLOSING_QUOTED = r"</cds.QUOTED>"
CFG_REFEXTRACT_MARKER_CLOSING_ISBN = r"</cds.ISBN>"
CFG_REFEXTRACT_MARKER_CLOSING_ISBN = r"</cds.PUBLISHER>"
## Of the form '</cds.AUTHxxxx>' only
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND = r"</cds.AUTHstnd>"
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL = r"</cds.AUTHetal>"
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL = r"</cds.AUTHincl>"
## XML Record and collection opening/closing tags:
CFG_REFEXTRACT_XML_VERSION = u"""<?xml version="1.0" encoding="UTF-8"?>"""
CFG_REFEXTRACT_XML_COLLECTION_OPEN = u"""<collection xmlns="http://www.loc.gov/MARC21/slim">"""
CFG_REFEXTRACT_XML_COLLECTION_CLOSE = u"""</collection>"""
CFG_REFEXTRACT_XML_RECORD_OPEN = u"<record>"
CFG_REFEXTRACT_XML_RECORD_CLOSE = u"</record>"
## The minimum length of a reference's misc text to be deemed insignificant.
## when comparing misc text with semi-colon defined sub-references.
## Values higher than this value reflect meaningful misc text.
## Hence, upon finding a correct semi-colon, but having current misc text
## length less than this value (without other meaningful reference objects:
## report numbers, titles...) then no split will occur.
## (A higher value will increase splitting strictness. i.e. Fewer splits)
CGF_REFEXTRACT_SEMI_COLON_MISC_TEXT_SENSITIVITY = 60
## The length of misc text between two adjacent authors which is
## deemed as insignificant. As such, when misc text of a length less
## than this value is found, then the latter author group is dumped into misc.
## (A higher value will increase splitting strictness. i.e. Fewer splits)
CGF_REFEXTRACT_ADJACENT_AUTH_MISC_SEPARATION = 10
## Maximum number of lines for a citation before it is considered invalid
CFG_REFEXTRACT_MAX_LINES = 25
| gpl-2.0 | 3,909,561,055,325,194,000 | 49.241379 | 96 | 0.687886 | false |
imatge-upc/saliency-salgan-2017 | scripts/utils.py | 1 | 1858 | import os
import numpy as np
import cv2
import theano
import lasagne
from constants import HOME_DIR
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
def load_weights(net, path, epochtoload):
"""
Load a pretrained model
:param epochtoload: epoch to load
:param net: model object
:param path: path of the weights to be set
"""
with np.load(HOME_DIR + path + "modelWeights{:04d}.npz".format(epochtoload)) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
lasagne.layers.set_all_param_values(net, param_values)
def predict(model, image_stimuli, num_epoch=None, name=None, path_output_maps=None):
size = (image_stimuli.shape[1], image_stimuli.shape[0])
blur_size = 5
if image_stimuli.shape[:2] != (model.inputHeight, model.inputWidth):
image_stimuli = cv2.resize(image_stimuli, (model.inputWidth, model.inputHeight), interpolation=cv2.INTER_AREA)
blob = np.zeros((1, 3, model.inputHeight, model.inputWidth), theano.config.floatX)
blob[0, ...] = (image_stimuli.astype(theano.config.floatX).transpose(2, 0, 1))
result = np.squeeze(model.predictFunction(blob))
saliency_map = (result * 255).astype(np.uint8)
# resize back to original size
saliency_map = cv2.resize(saliency_map, size, interpolation=cv2.INTER_CUBIC)
# blur
saliency_map = cv2.GaussianBlur(saliency_map, (blur_size, blur_size), 0)
# clip again
saliency_map = np.clip(saliency_map, 0, 255)
if name is None:
# When we use for testing, there is no file name provided.
cv2.imwrite('./' + path_output_maps + '/validationRandomSaliencyPred_{:04d}.png'.format(num_epoch), saliency_map)
else:
cv2.imwrite(os.path.join(path_output_maps, name + '.jpg'), saliency_map)
| mit | 1,986,471,042,272,540,400 | 33.407407 | 121 | 0.668999 | false |
eNuvol/s3cmd | S3/AccessLog.py | 15 | 2970 | ## Amazon S3 - Access Control List representation
## Author: Michal Ludvig <[email protected]>
## http://www.logix.cz/michal
## License: GPL Version 2
import S3Uri
from Exceptions import ParameterError
from Utils import getTreeFromXml
from ACL import GranteeAnonRead
try:
import xml.etree.ElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
__all__ = []
class AccessLog(object):
LOG_DISABLED = "<BucketLoggingStatus></BucketLoggingStatus>"
LOG_TEMPLATE = "<LoggingEnabled><TargetBucket></TargetBucket><TargetPrefix></TargetPrefix></LoggingEnabled>"
def __init__(self, xml = None):
if not xml:
xml = self.LOG_DISABLED
self.tree = getTreeFromXml(xml)
self.tree.attrib['xmlns'] = "http://doc.s3.amazonaws.com/2006-03-01"
def isLoggingEnabled(self):
return bool(self.tree.find(".//LoggingEnabled"))
def disableLogging(self):
el = self.tree.find(".//LoggingEnabled")
if el:
self.tree.remove(el)
def enableLogging(self, target_prefix_uri):
el = self.tree.find(".//LoggingEnabled")
if not el:
el = getTreeFromXml(self.LOG_TEMPLATE)
self.tree.append(el)
el.find(".//TargetBucket").text = target_prefix_uri.bucket()
el.find(".//TargetPrefix").text = target_prefix_uri.object()
def targetPrefix(self):
if self.isLoggingEnabled():
el = self.tree.find(".//LoggingEnabled")
target_prefix = "s3://%s/%s" % (
self.tree.find(".//LoggingEnabled//TargetBucket").text,
self.tree.find(".//LoggingEnabled//TargetPrefix").text)
return S3Uri.S3Uri(target_prefix)
else:
return ""
def setAclPublic(self, acl_public):
le = self.tree.find(".//LoggingEnabled")
if not le:
raise ParameterError("Logging not enabled, can't set default ACL for logs")
tg = le.find(".//TargetGrants")
if not acl_public:
if not tg:
## All good, it's not been there
return
else:
le.remove(tg)
else: # acl_public == True
anon_read = GranteeAnonRead().getElement()
if not tg:
tg = ET.SubElement(le, "TargetGrants")
## What if TargetGrants already exists? We should check if
## AnonRead is there before appending a new one. Later...
tg.append(anon_read)
def isAclPublic(self):
raise NotImplementedError()
def __str__(self):
return ET.tostring(self.tree)
__all__.append("AccessLog")
if __name__ == "__main__":
from S3Uri import S3Uri
log = AccessLog()
print log
log.enableLogging(S3Uri("s3://targetbucket/prefix/log-"))
print log
log.setAclPublic(True)
print log
log.setAclPublic(False)
print log
log.disableLogging()
print log
# vim:et:ts=4:sts=4:ai
| gpl-2.0 | -1,506,036,610,399,320,600 | 31.282609 | 112 | 0.602694 | false |
plilja/project-euler | problem_17/test_english_numbers.py | 1 | 4741 | import unittest
from english_numbers import *
class TestEnglishNumbers(unittest.TestCase):
def test_digits(self):
self.assertEqual('zero', english_number(0))
self.assertEqual('one', english_number(1))
self.assertEqual('two', english_number(2))
self.assertEqual('three', english_number(3))
self.assertEqual('four', english_number(4))
self.assertEqual('five', english_number(5))
self.assertEqual('six', english_number(6))
self.assertEqual('seven', english_number(7))
self.assertEqual('eight', english_number(8))
self.assertEqual('nine', english_number(9))
def test_10_through_19(self):
self.assertEqual('ten', english_number(10))
self.assertEqual('eleven', english_number(11))
self.assertEqual('twelve', english_number(12))
self.assertEqual('thirteen', english_number(13))
self.assertEqual('fourteen', english_number(14))
self.assertEqual('fifteen', english_number(15))
self.assertEqual('sixteen', english_number(16))
self.assertEqual('seventeen', english_number(17))
self.assertEqual('eighteen', english_number(18))
self.assertEqual('nineteen', english_number(19))
def test_20_through_29(self):
self.assertEqual('twenty', english_number(20))
self.assertEqual('twenty-one', english_number(21))
self.assertEqual('twenty-two', english_number(22))
self.assertEqual('twenty-three', english_number(23))
self.assertEqual('twenty-four', english_number(24))
self.assertEqual('twenty-five', english_number(25))
self.assertEqual('twenty-six', english_number(26))
self.assertEqual('twenty-seven', english_number(27))
self.assertEqual('twenty-eight', english_number(28))
self.assertEqual('twenty-nine', english_number(29))
def test_between_30_and_39(self):
self.assertEqual('thirty', english_number(30))
self.assertEqual('thirty-five', english_number(35))
self.assertEqual('thirty-nine', english_number(39))
def test_between_100_and_199(self):
self.assertEqual('one hundred', english_number(100))
self.assertEqual('one hundred and one', english_number(101))
self.assertEqual('one hundred and three', english_number(103))
self.assertEqual('one hundred and eight', english_number(108))
self.assertEqual('one hundred and nine', english_number(109))
self.assertEqual('one hundred and twenty-nine', english_number(129))
self.assertEqual('one hundred and eighty-eight', english_number(188))
self.assertEqual('one hundred and ninety-nine', english_number(199))
def test_between_200_and_299(self):
self.assertEqual('two hundred', english_number(200))
self.assertEqual('two hundred and one', english_number(201))
self.assertEqual('two hundred and twenty-three', english_number(223))
self.assertEqual('two hundred and forty-five', english_number(245))
self.assertEqual('two hundred and sixty-five', english_number(265))
self.assertEqual('two hundred and seventy-two', english_number(272))
def test_between_300_and_999(self):
self.assertEqual('three hundred', english_number(300))
self.assertEqual('four hundred and forty', english_number(440))
self.assertEqual('five hundred and forty-three', english_number(543))
self.assertEqual('six hundred and thirty-one', english_number(631))
self.assertEqual('seven hundred and nine', english_number(709))
self.assertEqual('eight hundred and ninety-four', english_number(894))
self.assertEqual('nine hundred and ninety-nine', english_number(999))
def test_larger_than_1000(self):
self.assertEqual('one thousand', english_number(1000))
self.assertEqual('one thousand and one', english_number(1001))
self.assertEqual('one thousand and twenty', english_number(1020))
self.assertEqual('one thousand one hundred and twenty', english_number(1120))
self.assertEqual('two thousand', english_number(2000))
self.assertEqual('eight thousand seven hundred and thirty-four', english_number(8734))
self.assertEqual('nine thousand nine hundred and ninety-nine', english_number(9999))
class TestProjectEuler(unittest.TestCase):
def test_1_through_5(self):
self.assertEqual(19, len_numbers(range(1, 6)))
def test_342(self):
self.assertEqual(23, len_numbers([342]))
def test_115(self):
self.assertEqual(20, len_numbers([115]))
def test_1_through_1000(self):
self.assertEqual(21124, len_numbers(range(1, 1001)))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 1,017,037,745,907,725,300 | 46.41 | 94 | 0.673698 | false |
XXLRay/libreshot | build/lib.linux-x86_64-2.7/libreshot/uploads/youtube/gdata/sample_util.py | 106 | 10714 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides utility functions used with command line samples."""
# This module is used for version 2 of the Google Data APIs.
import sys
import getpass
import urllib
import gdata.gauth
__author__ = '[email protected] (Jeff Scudder)'
CLIENT_LOGIN = 1
AUTHSUB = 2
OAUTH = 3
HMAC = 1
RSA = 2
class SettingsUtil(object):
"""Gather's user preferences from flags or command prompts.
An instance of this object stores the choices made by the user. At some
point it might be useful to save the user's preferences so that they do
not need to always set flags or answer preference prompts.
"""
def __init__(self, prefs=None):
self.prefs = prefs or {}
def get_param(self, name, prompt='', secret=False, ask=True, reuse=False):
# First, check in this objects stored preferences.
if name in self.prefs:
return self.prefs[name]
# Second, check for a command line parameter.
value = None
for i in xrange(len(sys.argv)):
if sys.argv[i].startswith('--%s=' % name):
value = sys.argv[i].split('=')[1]
elif sys.argv[i] == '--%s' % name:
value = sys.argv[i + 1]
# Third, if it was not on the command line, ask the user to input the
# value.
if value is None and ask:
prompt = '%s: ' % prompt
if secret:
value = getpass.getpass(prompt)
else:
value = raw_input(prompt)
# If we want to save the preference for reuse in future requests, add it
# to this object's prefs.
if value is not None and reuse:
self.prefs[name] = value
return value
def authorize_client(self, client, auth_type=None, service=None,
source=None, scopes=None, oauth_type=None,
consumer_key=None, consumer_secret=None):
"""Uses command line arguments, or prompts user for token values."""
if 'client_auth_token' in self.prefs:
return
if auth_type is None:
auth_type = int(self.get_param(
'auth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. to use your email address and password (ClientLogin)\n'
'2. to use a web browser to visit an auth web page (AuthSub)\n'
'3. if you have registed to use OAuth\n', reuse=True))
# Get the scopes for the services we want to access.
if auth_type == AUTHSUB or auth_type == OAUTH:
if scopes is None:
scopes = self.get_param(
'scopes', 'Enter the URL prefixes (scopes) for the resources you '
'would like to access.\nFor multiple scope URLs, place a comma '
'between each URL.\n'
'Example: http://www.google.com/calendar/feeds/,'
'http://www.google.com/m8/feeds/\n', reuse=True).split(',')
elif isinstance(scopes, (str, unicode)):
scopes = scopes.split(',')
if auth_type == CLIENT_LOGIN:
email = self.get_param('email', 'Please enter your username',
reuse=False)
password = self.get_param('password', 'Password', True, reuse=False)
if service is None:
service = self.get_param(
'service', 'What is the name of the service you wish to access?'
'\n(See list:'
' http://code.google.com/apis/gdata/faq.html#clientlogin)',
reuse=True)
if source is None:
source = self.get_param('source', ask=False, reuse=True)
client.client_login(email, password, source=source, service=service)
elif auth_type == AUTHSUB:
auth_sub_token = self.get_param('auth_sub_token', ask=False, reuse=True)
session_token = self.get_param('session_token', ask=False, reuse=True)
private_key = None
auth_url = None
single_use_token = None
rsa_private_key = self.get_param(
'rsa_private_key',
'If you want to use secure mode AuthSub, please provide the\n'
' location of your RSA private key which corresponds to the\n'
' certificate you have uploaded for your domain. If you do not\n'
' have an RSA key, simply press enter', reuse=True)
if rsa_private_key:
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
if private_key is not None:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
session_token, private_key, scopes)
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
auth_sub_token, private_key, scopes)
client.upgrade_token()
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes, True)
print 'with a private key, get ready for this URL', auth_url
else:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.AuthSubToken(session_token,
scopes)
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.AuthSubToken(auth_sub_token,
scopes)
client.upgrade_token()
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes)
print 'Visit the following URL in your browser to authorize this app:'
print str(auth_url)
print 'After agreeing to authorize the app, copy the token value from'
print ' the URL. Example: "www.google.com/?token=ab12" token value is'
print ' ab12'
token_value = raw_input('Please enter the token value: ')
if private_key is not None:
single_use_token = gdata.gauth.SecureAuthSubToken(
token_value, private_key, scopes)
else:
single_use_token = gdata.gauth.AuthSubToken(token_value, scopes)
client.auth_token = single_use_token
client.upgrade_token()
elif auth_type == OAUTH:
if oauth_type is None:
oauth_type = int(self.get_param(
'oauth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. use an HMAC signature using your consumer key and secret\n'
'2. use RSA with your private key to sign requests\n',
reuse=True))
consumer_key = self.get_param(
'consumer_key', 'Please enter your OAuth conumer key '
'which identifies your app', reuse=True)
if oauth_type == HMAC:
consumer_secret = self.get_param(
'consumer_secret', 'Please enter your OAuth conumer secret '
'which you share with the OAuth provider', True, reuse=False)
# Swap out this code once the client supports requesting an oauth
# token.
# Get a request token.
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
consumer_secret=consumer_secret)
elif oauth_type == RSA:
rsa_private_key = self.get_param(
'rsa_private_key',
'Please provide the location of your RSA private key which\n'
' corresponds to the certificate you have uploaded for your'
' domain.',
reuse=True)
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
rsa_private_key=private_key)
else:
print 'Invalid OAuth signature type'
return None
# Authorize the request token in the browser.
print 'Visit the following URL in your browser to authorize this app:'
print str(request_token.generate_authorization_url())
print 'After agreeing to authorize the app, copy URL from the browser\'s'
print ' address bar.'
url = raw_input('Please enter the url: ')
gdata.gauth.authorize_request_token(request_token, url)
# Exchange for an access token.
client.auth_token = client.get_access_token(request_token)
else:
print 'Invalid authorization type.'
return None
if client.auth_token:
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
def get_param(name, prompt='', secret=False, ask=True):
settings = SettingsUtil()
return settings.get_param(name=name, prompt=prompt, secret=secret, ask=ask)
def authorize_client(client, auth_type=None, service=None, source=None,
scopes=None, oauth_type=None, consumer_key=None,
consumer_secret=None):
"""Uses command line arguments, or prompts user for token values."""
settings = SettingsUtil()
return settings.authorize_client(client=client, auth_type=auth_type,
service=service, source=source,
scopes=scopes, oauth_type=oauth_type,
consumer_key=consumer_key,
consumer_secret=consumer_secret)
def print_options():
"""Displays usage information, available command line params."""
# TODO: fill in the usage description for authorizing the client.
print ''
| gpl-3.0 | -2,702,435,142,644,116,500 | 38.828996 | 79 | 0.614523 | false |
Birion/python-ffdl | pyffdl/sites/tgstory.py | 1 | 3917 | import re
from typing import Any, Dict, Tuple, Union
from sys import exit as sysexit
import attr
import pendulum # type: ignore
from bs4 import BeautifulSoup # type: ignore
from bs4.element import Tag # type: ignore
from furl import furl # type: ignore
from requests import Response
from pyffdl.sites.story import Extra, Story
from pyffdl.utilities.misc import clean_text
@attr.s(auto_attribs=True)
class TGStorytimeStory(Story):
def _init(self):
if self.page.select_one(".bigblock .errormsg"):
self.url.query.add({"ageconsent": "ok"})
main_page_request = self.session.get(self.url.url)
if not main_page_request.ok:
sysexit(1)
self._page = BeautifulSoup(main_page_request.content, "html5lib")
@staticmethod
def get_raw_text(response: Response) -> str:
"""Returns only the text of the chapter."""
soup = BeautifulSoup(response.content, "html5lib")
return clean_text([x for x in soup.select_one("#story span")])
@staticmethod
def chapter_parser(value: Tag) -> Tuple[int, str]:
return int(value["value"]), re.sub(r"^\d+\.\s+", "", value.text)
@property
def select(self) -> str:
return "select.textbox[name=chapter] option"
def make_title_page(self) -> None:
"""Parses the main page for information about the story and author."""
def get_clean_text(header: Any, selector: str) -> str:
try:
return header.select_one(selector).string.strip()
except AttributeError:
return "\n".join(header.select_one(selector).stripped_strings)
def process_content(header: Any) -> Dict[str, Union[str, int]]:
_ = " ".join(
str(x).strip()
for x in header.select_one(".content").contents
if str(x).strip() != "" and x.name != "br"
)
_ = re.sub(r" ?</span>", "", _)
_ = [x.strip() for x in re.split(r'<span class="label">', _) if x]
data = {}
for finding in _:
name, val = finding.split(": ")
val = ", ".join(
x
for x in BeautifulSoup(val, "html5lib").stripped_strings
if x != ","
)
if val.isdigit():
val = int(val)
data[name] = val
return data
_header = self.page.select_one(".boxtop")
self.metadata.title = get_clean_text(_header, "#pagetitle>a:first-of-type")
_author = _header.select_one("#pagetitle>a:last-of-type")
_author_url = furl(_author["href"])
self.metadata.author.name = _author.string.strip()
self.metadata.author.url = self.url.copy().set(
path=_author_url.path, query_params=_author_url.query.params
)
self.metadata.summary = get_clean_text(_header, ".summarytext")
content = process_content(_header)
del content["Read"]
del content["Chapters"]
try:
self.metadata.complete = content.pop("Completed") == "Completed Story"
except KeyError:
self.metadata.complete = False
try:
self.metadata.updated = pendulum.from_format(
content.pop("Updated"), "MM/DD/YY"
)
except KeyError:
self.metadata.updated = None
self.metadata.published = pendulum.from_format(
content.pop("Published"), "MM/DD/YY"
)
self.metadata.category = content.pop("Categories")
self.metadata.words = content.pop("Word count")
for key, value in content.items():
self.metadata.extras.append(Extra(name=key, value=value))
def make_new_chapter_url(self, url: furl, value: str) -> furl:
url.query.params["chapter"] = value
return url
| mit | 1,630,131,043,255,856,000 | 33.973214 | 83 | 0.567016 | false |
effigies/mne-python | mne/io/meas_info.py | 1 | 35635 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
from warnings import warn
from copy import deepcopy
import os.path as op
import numpy as np
from scipy import linalg
from ..externals.six import BytesIO, string_types
from datetime import datetime as dt
from .pick import channel_type
from .constants import FIFF
from .open import fiff_open
from .tree import dir_tree_find, copy_tree
from .tag import read_tag, find_tag
from .proj import _read_proj, _write_proj, _uniquify_projs
from .ctf import read_ctf_comp, write_ctf_comp
from .write import (start_file, end_file, start_block, end_block,
write_string, write_dig_point, write_float, write_int,
write_coord_trans, write_ch_info, write_name_list,
write_julian)
from ..utils import logger, verbose
from ..fixes import Counter
from .. import __version__
from ..externals.six import b
_kind_dict = dict(
eeg=(FIFF.FIFFV_EEG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
mag=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_MAG_T3, FIFF.FIFF_UNIT_T),
grad=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFF_UNIT_T_M),
misc=(FIFF.FIFFV_MISC_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_NONE),
stim=(FIFF.FIFFV_STIM_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
eog=(FIFF.FIFFV_EOG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
ecg=(FIFF.FIFFV_ECG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
seeg=(FIFF.FIFFV_SEEG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
)
def _summarize_str(st):
"""Aux function"""
return st[:56][::-1].split(',', 1)[-1][::-1] + ', ...'
class Info(dict):
""" Info class to nicely represent info dicts
"""
def __repr__(self):
"""Summarize info instead of printing all"""
strs = ['<Info | %s non-empty fields']
non_empty = 0
for k, v in self.items():
if k in ['bads', 'ch_names']:
entr = (', '.join(b for ii, b in enumerate(v) if ii < 10)
if v else '0 items')
if len(entr) >= 56:
# get rid of of half printed ch names
entr = _summarize_str(entr)
elif k == 'filename' and v:
path, fname = op.split(v)
entr = path[:10] + '.../' + fname
elif k == 'projs' and v:
entr = ', '.join(p['desc'] + ': o%s' %
{0: 'ff', 1: 'n'}[p['active']] for p in v)
if len(entr) >= 56:
entr = _summarize_str(entr)
elif k == 'meas_date' and np.iterable(v):
# first entire in meas_date is meaningful
entr = dt.fromtimestamp(v[0]).strftime('%Y-%m-%d %H:%M:%S')
else:
this_len = (len(v) if hasattr(v, '__len__') else
('%s' % v if v is not None else None))
entr = (('%d items' % this_len) if isinstance(this_len, int)
else ('%s' % this_len if this_len else ''))
if entr:
non_empty += 1
entr = ' | ' + entr
if k == 'chs':
ch_types = [channel_type(self, idx) for idx in range(len(v))]
ch_counts = Counter(ch_types)
entr += " (%s)" % ', '.join("%s: %d" % (ch_type.upper(), count)
for ch_type, count
in ch_counts.items())
strs.append('%s : %s%s' % (k, str(type(v))[7:-2], entr))
strs_non_empty = sorted(s for s in strs if '|' in s)
strs_empty = sorted(s for s in strs if '|' not in s)
st = '\n '.join(strs_non_empty + strs_empty)
st += '\n>'
st %= non_empty
return st
def _anonymize(self):
if self.get('subject_info') is not None:
del self['subject_info']
def read_fiducials(fname):
"""Read fiducials from a fiff file
Returns
-------
pts : list of dicts
List of digitizer points (each point in a dict).
coord_frame : int
The coordinate frame of the points (one of
mne.io.constants.FIFF.FIFFV_COORD_...)
"""
fid, tree, _ = fiff_open(fname)
with fid:
isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)
isotrak = isotrak[0]
pts = []
coord_frame = FIFF.FIFFV_COORD_UNKNOWN
for k in range(isotrak['nent']):
kind = isotrak['directory'][k].kind
pos = isotrak['directory'][k].pos
if kind == FIFF.FIFF_DIG_POINT:
tag = read_tag(fid, pos)
pts.append(tag.data)
elif kind == FIFF.FIFF_MNE_COORD_FRAME:
tag = read_tag(fid, pos)
coord_frame = tag.data[0]
if coord_frame == FIFF.FIFFV_COORD_UNKNOWN:
err = ("No coordinate frame was found in the file %r, it is probably "
"not a valid fiducials file." % fname)
raise ValueError(err)
# coord_frame is not stored in the tag
for pt in pts:
pt['coord_frame'] = coord_frame
return pts, coord_frame
def write_fiducials(fname, pts, coord_frame=0):
"""Write fiducials to a fiff file
Parameters
----------
fname : str
Destination file name.
pts : iterator of dict
Iterator through digitizer points. Each point is a dictionary with
the keys 'kind', 'ident' and 'r'.
coord_frame : int
The coordinate frame of the points (one of
mne.io.constants.FIFF.FIFFV_COORD_...)
"""
pts_frames = set((pt.get('coord_frame', coord_frame) for pt in pts))
bad_frames = pts_frames - set((coord_frame,))
if len(bad_frames) > 0:
err = ("Points have coord_frame entries that are incompatible with "
"coord_frame=%i: %s." % (coord_frame, str(tuple(bad_frames))))
raise ValueError(err)
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_ISOTRAK)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame)
for pt in pts:
write_dig_point(fid, pt)
end_block(fid, FIFF.FIFFB_ISOTRAK)
end_file(fid)
def _read_dig_points(fname, comments='%'):
"""Read digitizer data from file.
This function can read space-delimited text files of digitizer data.
Parameters
----------
fname : str
The filepath of space delimited file with points.
comments : str
The character used to indicate the start of a comment;
Default: '%'.
Returns
-------
dig_points : np.ndarray, shape (n_points, 3)
Array of dig points.
"""
dig_points = np.loadtxt(fname, comments=comments, ndmin=2)
if dig_points.shape[-1] != 3:
err = 'Data must be (n, 3) instead of %s' % (dig_points.shape,)
raise ValueError(err)
return dig_points
def _write_dig_points(fname, dig_points):
"""Write points to file
Parameters
----------
fname : str
Path to the file to write. The kind of file to write is determined
based on the extension: '.txt' for tab separated text file.
dig_points : numpy.ndarray, shape (n_points, 3)
Points.
"""
_, ext = op.splitext(fname)
dig_points = np.asarray(dig_points)
if (dig_points.ndim != 2) or (dig_points.shape[1] != 3):
err = ("Points must be of shape (n_points, 3), "
"not %s" % (dig_points.shape,))
raise ValueError(err)
if ext == '.txt':
with open(fname, 'wb') as fid:
version = __version__
now = dt.now().strftime("%I:%M%p on %B %d, %Y")
fid.write(b("% Ascii 3D points file created by mne-python version "
"{version} at {now}\n".format(version=version,
now=now)))
fid.write(b("% {N} 3D points, "
"x y z per line\n".format(N=len(dig_points))))
np.savetxt(fid, dig_points, delimiter='\t', newline='\n')
else:
msg = "Unrecognized extension: %r. Need '.txt'." % ext
raise ValueError(msg)
def _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None,
dig_points=None):
"""Constructs digitizer info for the info.
Parameters
----------
nasion : array-like | numpy.ndarray, shape (3,) | None
Point designated as the nasion point.
lpa : array-like | numpy.ndarray, shape (3,) | None
Point designated as the left auricular point.
rpa : array-like | numpy.ndarray, shape (3,) | None
Point designated as the right auricular point.
hpi : array-like | numpy.ndarray, shape (n_points, 3) | None
Points designated as head position indicator points.
dig_points : array-like | numpy.ndarray, shape (n_points, 3)
Points designed as the headshape points.
Returns
-------
dig : list
List of digitizer points to be added to the info['dig'].
"""
dig = []
if nasion is not None:
nasion = np.asarray(nasion)
if nasion.shape == (3,):
dig.append({'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': FIFF.FIFFV_COORD_HEAD})
else:
msg = ('Nasion should have the shape (3,) instead of %s'
% (nasion.shape,))
raise ValueError(msg)
if lpa is not None:
lpa = np.asarray(lpa)
if lpa.shape == (3,):
dig.append({'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': FIFF.FIFFV_COORD_HEAD})
else:
msg = ('LPA should have the shape (3,) instead of %s'
% (lpa.shape,))
raise ValueError(msg)
if rpa is not None:
rpa = np.asarray(rpa)
if rpa.shape == (3,):
dig.append({'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': FIFF.FIFFV_COORD_HEAD})
else:
msg = ('RPA should have the shape (3,) instead of %s'
% (rpa.shape,))
raise ValueError(msg)
if hpi is not None:
hpi = np.asarray(hpi)
if hpi.shape[1] == 3:
for idx, point in enumerate(hpi):
dig.append({'r': point, 'ident': idx,
'kind': FIFF.FIFFV_POINT_HPI,
'coord_frame': FIFF.FIFFV_COORD_HEAD})
else:
msg = ('HPI should have the shape (n_points, 3) instead of '
'%s' % (hpi.shape,))
raise ValueError(msg)
if dig_points is not None:
dig_points = np.asarray(dig_points)
if dig_points.shape[1] == 3:
for idx, point in enumerate(dig_points):
dig.append({'r': point, 'ident': idx,
'kind': FIFF.FIFFV_POINT_EXTRA,
'coord_frame': FIFF.FIFFV_COORD_HEAD})
else:
msg = ('Points should have the shape (n_points, 3) instead of '
'%s' % (dig_points.shape,))
raise ValueError(msg)
return dig
@verbose
def read_info(fname, verbose=None):
"""Read measurement info from a file
Parameters
----------
fname : str
File name.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
info : instance of mne.io.meas_info.Info
Info on dataset.
"""
f, tree, _ = fiff_open(fname)
with f as fid:
info = read_meas_info(fid, tree)[0]
return info
def read_bad_channels(fid, node):
"""Read bad channels
Parameters
----------
fid : file
The file descriptor.
node : dict
The node of the FIF tree that contains info on the bad channels.
Returns
-------
bads : list
A list of bad channel's names.
"""
nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS)
bads = []
if len(nodes) > 0:
for node in nodes:
tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST)
if tag is not None and tag.data is not None:
bads = tag.data.split(':')
return bads
@verbose
def read_meas_info(fid, tree, verbose=None):
"""Read the measurement info
Parameters
----------
fid : file
Open file descriptor.
tree : tree
FIF tree structure.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
info : instance of mne.io.meas_info.Info
Info on dataset.
meas : dict
Node in tree that contains the info.
"""
# Find the desired blocks
meas = dir_tree_find(tree, FIFF.FIFFB_MEAS)
if len(meas) == 0:
raise ValueError('Could not find measurement data')
if len(meas) > 1:
raise ValueError('Cannot read more that 1 measurement data')
meas = meas[0]
meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO)
if len(meas_info) == 0:
raise ValueError('Could not find measurement info')
if len(meas_info) > 1:
raise ValueError('Cannot read more that 1 measurement info')
meas_info = meas_info[0]
# Read measurement info
dev_head_t = None
ctf_head_t = None
meas_date = None
highpass = None
lowpass = None
nchan = None
sfreq = None
chs = []
experimenter = None
description = None
proj_id = None
proj_name = None
line_freq = None
p = 0
for k in range(meas_info['nent']):
kind = meas_info['directory'][k].kind
pos = meas_info['directory'][k].pos
if kind == FIFF.FIFF_NCHAN:
tag = read_tag(fid, pos)
nchan = int(tag.data)
elif kind == FIFF.FIFF_SFREQ:
tag = read_tag(fid, pos)
sfreq = float(tag.data)
elif kind == FIFF.FIFF_CH_INFO:
tag = read_tag(fid, pos)
chs.append(tag.data)
p += 1
elif kind == FIFF.FIFF_LOWPASS:
tag = read_tag(fid, pos)
lowpass = float(tag.data)
elif kind == FIFF.FIFF_HIGHPASS:
tag = read_tag(fid, pos)
highpass = float(tag.data)
elif kind == FIFF.FIFF_MEAS_DATE:
tag = read_tag(fid, pos)
meas_date = tag.data
elif kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, pos)
cand = tag.data
if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \
cand['to'] == FIFF.FIFFV_COORD_HEAD:
dev_head_t = cand
elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \
cand['to'] == FIFF.FIFFV_COORD_HEAD:
ctf_head_t = cand
elif kind == FIFF.FIFF_EXPERIMENTER:
tag = read_tag(fid, pos)
experimenter = tag.data
elif kind == FIFF.FIFF_DESCRIPTION:
tag = read_tag(fid, pos)
description = tag.data
elif kind == FIFF.FIFF_PROJ_ID:
tag = read_tag(fid, pos)
proj_id = tag.data
elif kind == FIFF.FIFF_PROJ_NAME:
tag = read_tag(fid, pos)
proj_name = tag.data
elif kind == FIFF.FIFF_LINE_FREQ:
tag = read_tag(fid, pos)
line_freq = float(tag.data)
# Check that we have everything we need
if nchan is None:
raise ValueError('Number of channels in not defined')
if sfreq is None:
raise ValueError('Sampling frequency is not defined')
if len(chs) == 0:
raise ValueError('Channel information not defined')
if len(chs) != nchan:
raise ValueError('Incorrect number of channel definitions found')
if dev_head_t is None or ctf_head_t is None:
hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)
if len(hpi_result) == 1:
hpi_result = hpi_result[0]
for k in range(hpi_result['nent']):
kind = hpi_result['directory'][k].kind
pos = hpi_result['directory'][k].pos
if kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, pos)
cand = tag.data
if (cand['from'] == FIFF.FIFFV_COORD_DEVICE and
cand['to'] == FIFF.FIFFV_COORD_HEAD and
dev_head_t is None):
dev_head_t = cand
elif (cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and
cand['to'] == FIFF.FIFFV_COORD_HEAD and
ctf_head_t is None):
ctf_head_t = cand
# Locate the Polhemus data
isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK)
dig = None
if len(isotrak) == 0:
logger.info('Isotrak not found')
elif len(isotrak) > 1:
warn('Multiple Isotrak found')
else:
isotrak = isotrak[0]
dig = []
for k in range(isotrak['nent']):
kind = isotrak['directory'][k].kind
pos = isotrak['directory'][k].pos
if kind == FIFF.FIFF_DIG_POINT:
tag = read_tag(fid, pos)
dig.append(tag.data)
dig[-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
# Locate the acquisition information
acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS)
acq_pars = None
acq_stim = None
if len(acqpars) == 1:
acqpars = acqpars[0]
for k in range(acqpars['nent']):
kind = acqpars['directory'][k].kind
pos = acqpars['directory'][k].pos
if kind == FIFF.FIFF_DACQ_PARS:
tag = read_tag(fid, pos)
acq_pars = tag.data
elif kind == FIFF.FIFF_DACQ_STIM:
tag = read_tag(fid, pos)
acq_stim = tag.data
# Load the SSP data
projs = _read_proj(fid, meas_info)
# Load the CTF compensation data
comps = read_ctf_comp(fid, meas_info, chs)
# Load the bad channel list
bads = read_bad_channels(fid, meas_info)
#
# Put the data together
#
if tree['id'] is not None:
info = Info(file_id=tree['id'])
else:
info = Info(file_id=None)
subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT)
if len(subject_info) == 1:
subject_info = subject_info[0]
si = dict()
for k in range(subject_info['nent']):
kind = subject_info['directory'][k].kind
pos = subject_info['directory'][k].pos
if kind == FIFF.FIFF_SUBJ_ID:
tag = read_tag(fid, pos)
si['id'] = int(tag.data)
elif kind == FIFF.FIFF_SUBJ_HIS_ID:
tag = read_tag(fid, pos)
si['his_id'] = str(tag.data)
elif kind == FIFF.FIFF_SUBJ_LAST_NAME:
tag = read_tag(fid, pos)
si['last_name'] = str(tag.data)
elif kind == FIFF.FIFF_SUBJ_FIRST_NAME:
tag = read_tag(fid, pos)
si['first_name'] = str(tag.data)
elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY:
tag = read_tag(fid, pos)
si['birthday'] = tag.data
elif kind == FIFF.FIFF_SUBJ_SEX:
tag = read_tag(fid, pos)
si['sex'] = int(tag.data)
elif kind == FIFF.FIFF_SUBJ_HAND:
tag = read_tag(fid, pos)
si['hand'] = int(tag.data)
else:
si = None
info['subject_info'] = si
# Load extra information blocks
read_extra_meas_info(fid, tree, info)
# Make the most appropriate selection for the measurement id
if meas_info['parent_id'] is None:
if meas_info['id'] is None:
if meas['id'] is None:
if meas['parent_id'] is None:
info['meas_id'] = info['file_id']
else:
info['meas_id'] = meas['parent_id']
else:
info['meas_id'] = meas['id']
else:
info['meas_id'] = meas_info['id']
else:
info['meas_id'] = meas_info['parent_id']
info['experimenter'] = experimenter
info['description'] = description
info['proj_id'] = proj_id
info['proj_name'] = proj_name
if meas_date is None:
info['meas_date'] = [info['meas_id']['secs'], info['meas_id']['usecs']]
else:
info['meas_date'] = meas_date
info['nchan'] = nchan
info['sfreq'] = sfreq
info['highpass'] = highpass if highpass is not None else 0
info['lowpass'] = lowpass if lowpass is not None else info['sfreq'] / 2.0
info['line_freq'] = line_freq
# Add the channel information and make a list of channel names
# for convenience
info['chs'] = chs
info['ch_names'] = [ch['ch_name'] for ch in chs]
#
# Add the coordinate transformations
#
info['dev_head_t'] = dev_head_t
info['ctf_head_t'] = ctf_head_t
if dev_head_t is not None and ctf_head_t is not None:
head_ctf_trans = linalg.inv(ctf_head_t['trans'])
dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans'])
info['dev_ctf_t'] = {'from': FIFF.FIFFV_COORD_DEVICE,
'to': FIFF.FIFFV_MNE_COORD_CTF_HEAD,
'trans': dev_ctf_trans}
else:
info['dev_ctf_t'] = None
# All kinds of auxliary stuff
info['dig'] = dig
info['bads'] = bads
info['projs'] = projs
info['comps'] = comps
info['acq_pars'] = acq_pars
info['acq_stim'] = acq_stim
return info, meas
def read_extra_meas_info(fid, tree, info):
"""Read extra blocks from fid"""
# current method saves them into a BytesIO file instance for simplicity
# this and its partner, write_extra_meas_info, could be made more
# comprehensive (i.e.., actually parse and read the data instead of
# just storing it for later)
blocks = [FIFF.FIFFB_EVENTS, FIFF.FIFFB_HPI_RESULT, FIFF.FIFFB_HPI_MEAS,
FIFF.FIFFB_PROCESSING_HISTORY]
info['orig_blocks'] = dict(blocks=blocks)
fid_bytes = BytesIO()
start_file(fid_bytes, tree['id'])
start_block(fid_bytes, FIFF.FIFFB_MEAS_INFO)
for block in info['orig_blocks']['blocks']:
nodes = dir_tree_find(tree, block)
copy_tree(fid, tree['id'], nodes, fid_bytes)
end_block(fid_bytes, FIFF.FIFFB_MEAS_INFO)
info['orig_blocks']['bytes'] = fid_bytes.getvalue()
def write_extra_meas_info(fid, info):
"""Write otherwise left out blocks of data"""
# uses BytesIO fake file to read the appropriate blocks
if 'orig_blocks' in info and info['orig_blocks'] is not None:
# Blocks from the original
fid_bytes, tree, _ = fiff_open(BytesIO(info['orig_blocks']['bytes']))
for block in info['orig_blocks']['blocks']:
nodes = dir_tree_find(tree, block)
copy_tree(fid_bytes, tree['id'], nodes, fid)
def write_meas_info(fid, info, data_type=None, reset_range=True):
"""Write measurement info into a file id (from a fif file)
Parameters
----------
fid : file
Open file descriptor.
info : instance of mne.io.meas_info.Info
The measurement info structure.
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for
raw data.
reset_range : bool
If True, info['chs'][k]['range'] will be set to unity.
Notes
-----
Tags are written in a particular order for compatibility with maxfilter.
"""
# Measurement info
start_block(fid, FIFF.FIFFB_MEAS_INFO)
# Extra measurement info
write_extra_meas_info(fid, info)
# Polhemus data
if info['dig'] is not None:
start_block(fid, FIFF.FIFFB_ISOTRAK)
for d in info['dig']:
write_dig_point(fid, d)
end_block(fid, FIFF.FIFFB_ISOTRAK)
# megacq parameters
if info['acq_pars'] is not None or info['acq_stim'] is not None:
start_block(fid, FIFF.FIFFB_DACQ_PARS)
if info['acq_pars'] is not None:
write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars'])
if info['acq_stim'] is not None:
write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim'])
end_block(fid, FIFF.FIFFB_DACQ_PARS)
# Coordinate transformations if the HPI result block was not there
if info['dev_head_t'] is not None:
write_coord_trans(fid, info['dev_head_t'])
if info['ctf_head_t'] is not None:
write_coord_trans(fid, info['ctf_head_t'])
# Projectors
_write_proj(fid, info['projs'])
# CTF compensation info
write_ctf_comp(fid, info['comps'])
# Bad channels
if len(info['bads']) > 0:
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
# General
if info.get('experimenter') is not None:
write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter'])
if info.get('description') is not None:
write_string(fid, FIFF.FIFF_DESCRIPTION, info['description'])
if info.get('proj_id') is not None:
write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id'])
if info.get('proj_name') is not None:
write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name'])
if info.get('meas_date') is not None:
write_int(fid, FIFF.FIFF_MEAS_DATE, info['meas_date'])
write_int(fid, FIFF.FIFF_NCHAN, info['nchan'])
write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])
write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])
write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])
if info.get('line_freq') is not None:
write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])
if data_type is not None:
write_int(fid, FIFF.FIFF_DATA_PACK, data_type)
# Channel information
for k, c in enumerate(info['chs']):
# Scan numbers may have been messed up
c = deepcopy(c)
c['scanno'] = k + 1
# for float/double, the "range" param is unnecessary
if reset_range is True:
c['range'] = 1.0
write_ch_info(fid, c)
# Subject information
if info.get('subject_info') is not None:
start_block(fid, FIFF.FIFFB_SUBJECT)
si = info['subject_info']
if si.get('id') is not None:
write_int(fid, FIFF.FIFF_SUBJ_ID, si['id'])
if si.get('his_id') is not None:
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id'])
if si.get('last_name') is not None:
write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])
if si.get('first_name') is not None:
write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])
if si.get('birthday') is not None:
write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])
if si.get('sex') is not None:
write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex'])
if si.get('hand') is not None:
write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])
end_block(fid, FIFF.FIFFB_SUBJECT)
end_block(fid, FIFF.FIFFB_MEAS_INFO)
def write_info(fname, info, data_type=None, reset_range=True):
"""Write measurement info in fif file.
Parameters
----------
fname : str
The name of the file. Should end by -info.fif.
info : instance of mne.io.meas_info.Info
The measurement info structure
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for
raw data.
reset_range : bool
If True, info['chs'][k]['range'] will be set to unity.
"""
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MEAS)
write_meas_info(fid, info, data_type, reset_range)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
def _is_equal_dict(dicts):
"""Aux function"""
tests = zip(*[d.items() for d in dicts])
is_equal = []
for d in tests:
k0, v0 = d[0]
is_equal.append(all([np.all(k == k0) and
np.all(v == v0) for k, v in d]))
return all(is_equal)
@verbose
def _merge_dict_values(dicts, key, verbose=None):
"""Merge things together
Fork for {'dict', 'list', 'array', 'other'}
and consider cases where one or all are of the same type.
"""
values = [d[key] for d in dicts]
msg = ("Don't know how to merge '%s'. Make sure values are "
"compatible." % key)
def _flatten(lists):
return [item for sublist in lists for item in sublist]
def _check_isinstance(values, kind, func):
return func([isinstance(v, kind) for v in values])
def _where_isinstance(values, kind):
"""Aux function"""
return np.where([isinstance(v, type) for v in values])[0]
# list
if _check_isinstance(values, list, all):
lists = (d[key] for d in dicts)
return (_uniquify_projs(_flatten(lists)) if key == 'projs'
else _flatten(lists))
elif _check_isinstance(values, list, any):
idx = _where_isinstance(values, list)
if len(idx) == 1:
return values[int(idx)]
elif len(idx) > 1:
lists = (d[key] for d in dicts if isinstance(d[key], list))
return _flatten(lists)
# dict
elif _check_isinstance(values, dict, all):
is_qual = _is_equal_dict(values)
if is_qual:
return values[0]
else:
RuntimeError(msg)
elif _check_isinstance(values, dict, any):
idx = _where_isinstance(values, dict)
if len(idx) == 1:
return values[int(idx)]
elif len(idx) > 1:
raise RuntimeError(msg)
# ndarray
elif _check_isinstance(values, np.ndarray, all):
is_qual = all([np.all(values[0] == x) for x in values[1:]])
if is_qual:
return values[0]
elif key == 'meas_date':
logger.info('Found multiple entries for %s. '
'Setting value to `None`' % key)
return None
else:
raise RuntimeError(msg)
elif _check_isinstance(values, np.ndarray, any):
idx = _where_isinstance(values, np.ndarray)
if len(idx) == 1:
return values[int(idx)]
elif len(idx) > 1:
raise RuntimeError(msg)
# other
else:
unique_values = set(values)
if len(unique_values) == 1:
return list(values)[0]
elif isinstance(list(unique_values)[0], BytesIO):
logger.info('Found multiple StringIO instances. '
'Setting value to `None`')
return None
elif isinstance(list(unique_values)[0], string_types):
logger.info('Found multiple filenames. '
'Setting value to `None`')
return None
else:
raise RuntimeError(msg)
@verbose
def _merge_info(infos, verbose=None):
"""Merge two measurement info dictionaries"""
info = Info()
ch_names = _merge_dict_values(infos, 'ch_names')
duplicates = set([ch for ch in ch_names if ch_names.count(ch) > 1])
if len(duplicates) > 0:
msg = ("The following channels are present in more than one input "
"measurement info objects: %s" % list(duplicates))
raise ValueError(msg)
info['nchan'] = len(ch_names)
info['ch_names'] = ch_names
info['chs'] = []
for this_info in infos:
info['chs'].extend(this_info['chs'])
transforms = ['ctf_head_t', 'dev_head_t', 'dev_ctf_t']
for trans_name in transforms:
trans = [i[trans_name] for i in infos if i[trans_name]]
if len(trans) == 0:
info[trans_name] = None
elif len(trans) == 1:
info[trans_name] = trans[0]
elif all([np.all(trans[0]['trans'] == x['trans']) and
trans[0]['from'] == x['from'] and
trans[0]['to'] == x['to']
for x in trans[1:]]):
info[trans_name] = trans[0]
else:
msg = ("Measurement infos provide mutually inconsistent %s" %
trans_name)
raise ValueError(msg)
other_fields = ['acq_pars', 'acq_stim', 'bads', 'buffer_size_sec',
'comps', 'description', 'dig', 'experimenter', 'file_id',
'filename', 'highpass', 'line_freq', 'lowpass',
'meas_date', 'meas_id', 'orig_blocks', 'proj_id',
'proj_name', 'projs', 'sfreq', 'subject_info', 'sfreq']
for k in other_fields:
info[k] = _merge_dict_values(infos, k)
return info
def create_info(ch_names, sfreq, ch_types=None):
"""Create a basic Info instance suitable for use with create_raw
Parameters
----------
ch_names : list of str
Channel names.
sfreq : float
Sample rate of the data.
ch_types : list of str
Channel types. If None, data are assumed to be misc.
Currently supported fields are "mag", "grad", "eeg", and "misc".
Notes
-----
The info dictionary will be sparsely populated to enable functionality
within the rest of the package. Advanced functionality such as source
localization can only be obtained through substantial, proper
modifications of the info structure (not recommended).
"""
if not isinstance(ch_names, (list, tuple)):
raise TypeError('ch_names must be a list or tuple')
sfreq = float(sfreq)
if sfreq <= 0:
raise ValueError('sfreq must be positive')
nchan = len(ch_names)
if ch_types is None:
ch_types = ['misc'] * nchan
if len(ch_types) != nchan:
raise ValueError('ch_types and ch_names must be the same length')
info = Info()
info['meas_date'] = [0, 0]
info['sfreq'] = sfreq
for key in ['bads', 'projs', 'comps']:
info[key] = list()
for key in ['meas_id', 'file_id', 'highpass', 'lowpass', 'acq_pars',
'acq_stim', 'filename', 'dig']:
info[key] = None
info['ch_names'] = ch_names
info['nchan'] = nchan
info['chs'] = list()
loc = np.concatenate((np.zeros(3), np.eye(3).ravel())).astype(np.float32)
for ci, (name, kind) in enumerate(zip(ch_names, ch_types)):
if not isinstance(name, string_types):
raise TypeError('each entry in ch_names must be a string')
if not isinstance(kind, string_types):
raise TypeError('each entry in ch_types must be a string')
if kind not in _kind_dict:
raise KeyError('kind must be one of %s, not %s'
% (list(_kind_dict.keys()), kind))
kind = _kind_dict[kind]
chan_info = dict(loc=loc, eeg_loc=None, unit_mul=0, range=1., cal=1.,
coil_trans=None, kind=kind[0], coil_type=kind[1],
unit=kind[2], coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
ch_name=name, scanno=ci + 1, logno=ci + 1)
info['chs'].append(chan_info)
info['dev_head_t'] = None
info['dev_ctf_t'] = None
info['ctf_head_t'] = None
return info
| bsd-3-clause | 9,196,523,345,549,300,000 | 34.635 | 79 | 0.551761 | false |
svk/harmless7drl | cursesui.py | 1 | 6727 | import curses
import sys
import time
def main( rootwidget, *args, **kwargs ):
from harmless7drl import MainLoop
rv = None
try:
cui = CursesInterface( debug=True )
rv = MainLoop( cui ).query( rootwidget, *args, **kwargs )
cui.shutdown()
except:
handleException()
return rv
def handleException():
if not curses.isendwin():
curses.endwin()
raise
def flipyx(yx): return yx[1], yx[0]
class KeypressEvent:
def __init__(self, key):
self.type = "keypress"
self.key = key
from harmless7drl import ResizedException
class CursesInterface:
def __init__(self, debug = False):
self.stdscr = curses.initscr()
self.debug = debug
self.setupColours()
self.setupKeyboard()
self.previousCursorState = curses.curs_set(0)
self.warn( "session start at %s" % (str( time.time())))
def inside(self, x, y):
if x < 0 or y < 0:
return False
w, h = self.dimensions()
return not (x >= w or y >= h)
def dimensions(self):
return flipyx( self.stdscr.getmaxyx() )
def clear(self):
self.stdscr.erase()
def warn(self, warning):
if self.debug:
print >>sys.stderr, warning
def setupKeyboard(self):
curses.raw()
curses.halfdelay(1)
curses.noecho()
self.stdscr.keypad( 1 )
self.keymap = {}
import string
for ch in string.printable:
self.keymap[ ord(ch) ] = ch
self.keymap[ curses.KEY_BACKSPACE ] = 'backspace'
self.keymap[ curses.KEY_LEFT ] = 'west'
self.keymap[ curses.KEY_RIGHT ] = 'east'
self.keymap[ curses.KEY_UP ] = 'north'
self.keymap[ curses.KEY_DOWN ] = 'south'
self.keymap[ curses.KEY_A1 ] = 'northwest'
self.keymap[ curses.KEY_A3 ] = 'northeast'
self.keymap[ curses.KEY_C1 ] = 'southwest'
self.keymap[ curses.KEY_C3 ] = 'southeast'
self.keymap[ 27 ] = 'escape'
del self.keymap[ ord('\t') ] # hack because tab is bound to cause
# trouble in various text input stuff
def setupColours(self):
assert curses.has_colors()
curses.start_color()
self.colours = {
'white': (curses.COLOR_WHITE, 0),
'black': (curses.COLOR_BLACK, 0),
'red': (curses.COLOR_RED, 0),
'blue': (curses.COLOR_BLUE, 0),
'cyan': (curses.COLOR_CYAN, 0),
'green': (curses.COLOR_GREEN, 0),
'magenta': (curses.COLOR_MAGENTA, 0),
'yellow': (curses.COLOR_YELLOW, 0),
'bold-white': (curses.COLOR_WHITE, curses.A_BOLD),
'bold-black': (curses.COLOR_BLACK, curses.A_BOLD),
'bold-red': (curses.COLOR_RED, curses.A_BOLD),
'bold-blue': (curses.COLOR_BLUE, curses.A_BOLD),
'bold-cyan': (curses.COLOR_CYAN, curses.A_BOLD),
'bold-green': (curses.COLOR_GREEN, curses.A_BOLD),
'bold-magenta': (curses.COLOR_MAGENTA, curses.A_BOLD),
'bold-yellow': (curses.COLOR_YELLOW, curses.A_BOLD),
}
self.pairs = {}
i = 1
revs = {}
for fgName,fga in self.colours.items():
fg, fgattr = fga
for bgName,bga in self.colours.items():
bg, bgattr = bga
if fg == curses.COLOR_WHITE and bg == curses.COLOR_BLACK:
self.pairs[ fgName, bgName ] = 0
continue
# elif fg == bg:
# continue
elif revs.has_key( (fg,bg) ):
self.pairs[ fgName, bgName ] = revs[ fg, bg ]
continue
curses.init_pair( i, fg, bg )
self.pairs[ fgName, bgName ] = i
revs[ fg, bg ] = i
i += 1
def put(self, x, y, ch, fg = 'white', bg = 'black'):
# if fg == bg:
# fg = "white" if fg != "white" else "black"
# ch = ' '
if not self.inside( x, y ):
self.warn( "put character at %d, %d (out of bounds)" % (x,y) )
else:
try:
cid, attr = self.colours[ fg ]
self.stdscr.addch( y, x, ch, curses.color_pair( self.pairs[ fg, bg ]) | attr )
except:
# An error is triggered when we write to the last char on the screen?
pass
def putString(self, x, y, s, fg = 'white', bg = 'black'):
for ch in s:
self.put( x, y, ch, fg, bg)
x += 1
return x
def show(self):
self.stdscr.refresh()
def get(self):
rv = self.stdscr.getch()
if rv == -1:
return None
if rv == curses.KEY_RESIZE:
raise ResizedException()
try:
ch = self.keymap[ rv ]
if ch != None:
return KeypressEvent( ch )
except KeyError:
self.warn( "unknown input %d" % rv )
return None
def shutdown(self):
self.clear()
curses.endwin()
curses.curs_set( self.previousCursorState )
self.warn( "session end at %s" % (str( time.time())))
if __name__ == '__main__':
try:
cui = CursesInterface(debug = True)
w, h = cui.dimensions()
x, y = 15, 15
controls = {
'h': (-1, 0),
'l': (1, 0),
'j': (0, 1),
'k': (0, -1),
'y': (-1, -1),
'u': (1, -1),
'b': (-1, 1),
'n': (1, 1),
}
while True:
cui.clear()
cui.putString( 10, 10, "Hello world!" )
cui.putString( 10, 11, "This window is %dx%d" % (w,h) )
cui.putString( 10, 13, "longname(): %s" % curses.longname() )
cui.putString( 10, 14, "COLOR_PAIRS: %d" % curses.COLOR_PAIRS )
cui.putString( 10, 15, "can_change_color(): %s" % curses.can_change_color() )
cui.put( x, y, "@", fg = 'red' )
cui.show()
rv = None
try:
rv = cui.get()
except ResizedException:
w, h = cui.dimensions()
if rv:
if rv.type == "keypress":
if rv.key == 'q': break
if controls.has_key( rv.key ):
dx, dy = controls[ rv.key ]
x += dx
y += dy
x = max( x, 0 )
x = min( x, w - 1 )
y = max( y, 0 )
y = min( y, h - 1 )
cui.shutdown()
except:
if not curses.isendwin():
curses.endwin()
raise
| mit | -5,567,437,666,297,782,000 | 33.675258 | 94 | 0.478371 | false |
TGAC/earlham-galaxytools | tools/hcluster_sg_parser/hcluster_sg_parser.py | 3 | 1541 | """
A simple parser to convert the hcluster_sg output into lists of IDs, one list for each cluster.
When a minimum and/or maximum number of cluster elements are specified, the IDs contained in the filtered-out clusters are collected in the "discarded IDS" output dataset.
Usage:
python hcluster_sg_parser.py [-m <N>] [-M <N>] <file> <discarded_out>
"""
import optparse
import sys
def main():
parser = optparse.OptionParser()
parser.add_option('-m', '--min', type='int', default=0, help='Minimum number of cluster elements')
parser.add_option('-M', '--max', type='int', default=sys.maxsize, help='Maximum number of cluster elements')
options, args = parser.parse_args()
with open(args[2], 'w') as discarded_max_out:
with open(args[1], 'w') as discarded_min_out:
with open(args[0]) as fh:
for line in fh:
line = line.rstrip()
line_cols = line.split('\t')
cluster_id = line_cols[0]
n_ids = int(line_cols[-2])
id_list = line_cols[-1].replace(',', '\n')
if n_ids < options.min:
discarded_min_out.write(id_list)
elif n_ids > options.max:
discarded_max_out.write(id_list)
else:
outfile = cluster_id + '_output.txt'
with open(outfile, 'w') as f:
f.write(id_list)
if __name__ == "__main__":
main()
| mit | -4,329,951,946,097,758,000 | 37.525 | 171 | 0.53926 | false |
sarvex/depot-tools | recipes/webrtc.py | 17 | 1117 | # Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class WebRTC(recipe_util.Recipe):
"""Basic Recipe class for WebRTC."""
@staticmethod
def fetch_spec(props):
url = 'https://chromium.googlesource.com/external/webrtc.git'
spec = {
'solutions': [
{
'name': 'src',
'url': url,
'deps_file': 'DEPS',
'managed': False,
'custom_deps': {},
'safesync_url': '',
},
],
'with_branch_heads': True,
}
if props.get('target_os'):
spec['target_os'] = props['target_os'].split(',')
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'src'
def main(argv=None):
return WebRTC().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | -1,632,004,421,401,554,400 | 21.34 | 72 | 0.585497 | false |
boooka/GeoPowerOff | venv/lib/python2.7/site-packages/test/tools_control.py | 1 | 1076 | # coding: utf-8
from unittest import TestCase
import time
from grab.tools.control import sleep, repeat
class ControlToolsTestCase(TestCase):
def test_sleep(self):
now = time.time()
sleep(0.9, 1.1)
self.assertTrue(1.2 > (time.time() - now) > 0.8)
now = time.time()
sleep(0, 0.5)
self.assertTrue(0 < (time.time() - now) < 0.6)
def test_repeat(self):
COUNTER = [0]
def foo(counter=COUNTER):
counter[0] += 1
if counter[0] == 1:
raise ValueError
elif counter[0] == 2:
raise IndexError
else:
return 4
COUNTER[0] = 0
self.assertRaises(ValueError, lambda: repeat(foo, limit=1))
COUNTER[0] = 0
self.assertRaises(IndexError, lambda: repeat(foo, limit=2))
COUNTER[0] = 0
self.assertEqual(4, repeat(foo, limit=3))
COUNTER[0] = 0
self.assertRaises(IndexError,
lambda: repeat(foo, limit=2, fatal_exceptions=(IndexError,)))
| apache-2.0 | 5,076,641,471,661,331,000 | 25.9 | 87 | 0.539033 | false |
jphnoel/udata | udata/harvest/backends/ckan.py | 1 | 8735 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import logging
from uuid import UUID
from urlparse import urljoin
from voluptuous import (
Schema, All, Any, Lower, Coerce, DefaultTo
)
from udata.models import db, Resource, License, SpatialCoverage
from udata.utils import get_by, daterange_start, daterange_end
from . import BaseBackend, register
from ..exceptions import HarvestException, HarvestSkipException
from ..filters import (
boolean, email, to_date, slug, normalize_tag, normalize_string,
is_url, empty_none, hash
)
log = logging.getLogger(__name__)
RESOURCE_TYPES = ('file', 'file.upload', 'api', 'documentation',
'image', 'visualization')
ALLOWED_RESOURCE_TYPES = ('file', 'file.upload', 'api')
resource = {
'id': basestring,
'position': int,
'name': All(DefaultTo(''), basestring),
'description': All(basestring, normalize_string),
'format': All(basestring, Lower),
'mimetype': Any(All(basestring, Lower), None),
'size': Any(Coerce(int), None),
'hash': Any(All(basestring, hash), None),
'created': All(basestring, to_date),
'last_modified': Any(All(basestring, to_date), None),
'url': All(basestring, is_url(full=True)),
'resource_type': All(empty_none,
DefaultTo('file'),
basestring,
Any(*RESOURCE_TYPES)
),
}
tag = {
'id': basestring,
'vocabulary_id': Any(basestring, None),
'display_name': basestring,
'name': All(basestring, normalize_tag),
'state': basestring,
}
organization = {
'id': basestring,
'description': basestring,
'created': All(basestring, to_date),
'title': basestring,
'name': All(basestring, slug),
'revision_timestamp': All(basestring, to_date),
'is_organization': boolean,
'state': basestring,
'image_url': basestring,
'revision_id': basestring,
'type': 'organization',
'approval_status': 'approved'
}
schema = Schema({
'id': basestring,
'name': basestring,
'title': basestring,
'notes': All(basestring, normalize_string),
'license_id': All(DefaultTo('not-specified'), basestring),
'tags': [tag],
'metadata_created': All(basestring, to_date),
'metadata_modified': All(basestring, to_date),
'organization': Any(organization, None),
'resources': [resource],
'revision_id': basestring,
'extras': [{
'key': basestring,
'value': Any(basestring, int, float, boolean, {}, []),
}],
'private': boolean,
'type': 'dataset',
'author': Any(basestring, None),
'author_email': All(empty_none, Any(All(basestring, email), None)),
'maintainer': Any(basestring, None),
'maintainer_email': All(empty_none, Any(All(basestring, email), None)),
'state': Any(basestring, None),
}, required=True, extra=True)
@register
class CkanBackend(BaseBackend):
name = 'ckan'
display_name = 'CKAN'
def get_headers(self):
headers = super(CkanBackend, self).get_headers()
headers['content-type'] = 'application/json'
if self.config.get('apikey'):
headers['Authorization'] = self.config['apikey']
return headers
def action_url(self, endpoint):
path = '/'.join(['api/3/action', endpoint])
return urljoin(self.source.url, path)
def get_action(self, endpoint, fix=False, **kwargs):
url = self.action_url(endpoint)
if fix:
response = self.post(url, '{}', params=kwargs)
else:
response = self.get(url, params=kwargs)
if response.status_code != 200:
msg = response.text.strip('"')
raise HarvestException(msg)
return response.json()
def get_status(self):
url = urljoin(self.source.url, '/api/util/status')
response = self.get(url)
return response.json()
def initialize(self):
'''List all datasets for a given ...'''
# status = self.get_status()
# fix = status['ckan_version'] < '1.8'
fix = False
response = self.get_action('package_list', fix=fix)
names = response['result']
if self.max_items:
names = names[:self.max_items]
for name in names:
self.add_item(name)
def process(self, item):
response = self.get_action('package_show', id=item.remote_id)
data = self.validate(response['result'], schema)
# Fix the remote_id: use real ID instead of not stable name
item.remote_id = data['id']
# Skip if no resource
if not len(data.get('resources', [])):
msg = 'Dataset {0} has no record'.format(item.remote_id)
raise HarvestSkipException(msg)
dataset = self.get_dataset(item.remote_id)
# Core attributes
if not dataset.slug:
dataset.slug = data['name']
dataset.title = data['title']
dataset.description = data['notes']
dataset.license = License.objects(id=data['license_id']).first()
# dataset.license = license or License.objects.get(id='notspecified')
dataset.tags = [t['name'] for t in data['tags'] if t['name']]
dataset.created_at = data['metadata_created']
dataset.last_modified = data['metadata_modified']
dataset.extras['ckan:name'] = data['name']
temporal_start, temporal_end = None, None
spatial_geom = None
for extra in data['extras']:
# GeoJSON representation (Polygon or Point)
if extra['key'] == 'spatial':
spatial_geom = json.loads(extra['value'])
# Textual representation of the extent / location
elif extra['key'] == 'spatial-text':
log.debug('spatial-text value not handled')
print 'spatial-text', extra['value']
# Linked Data URI representing the place name
elif extra['key'] == 'spatial-uri':
log.debug('spatial-uri value not handled')
print 'spatial-uri', extra['value']
# Update frequency
elif extra['key'] == 'frequency':
print 'frequency', extra['value']
# Temporal coverage start
elif extra['key'] == 'temporal_start':
print 'temporal_start', extra['value']
temporal_start = daterange_start(extra['value'])
continue
# Temporal coverage end
elif extra['key'] == 'temporal_end':
print 'temporal_end', extra['value']
temporal_end = daterange_end(extra['value'])
continue
# else:
# print extra['key'], extra['value']
dataset.extras[extra['key']] = extra['value']
if spatial_geom:
dataset.spatial = SpatialCoverage()
if spatial_geom['type'] == 'Polygon':
coordinates = [spatial_geom['coordinates']]
elif spatial_geom['type'] == 'MultiPolygon':
coordinates = spatial_geom['coordinates']
else:
HarvestException('Unsupported spatial geometry')
dataset.spatial.geom = {
'type': 'MultiPolygon',
'coordinates': coordinates
}
if temporal_start and temporal_end:
dataset.temporal_coverage = db.DateRange(
start=temporal_start,
end=temporal_end,
)
# Remote URL
if data.get('url'):
dataset.extras['remote_url'] = data['url']
# Resources
for res in data['resources']:
if res['resource_type'] not in ALLOWED_RESOURCE_TYPES:
continue
try:
resource = get_by(dataset.resources, 'id', UUID(res['id']))
except:
log.error('Unable to parse resource ID %s', res['id'])
continue
if not resource:
resource = Resource(id=res['id'])
dataset.resources.append(resource)
resource.title = res.get('name', '') or ''
resource.description = res.get('description')
resource.url = res['url']
resource.filetype = ('api' if res['resource_type'] == 'api'
else 'remote')
resource.format = res.get('format')
resource.mime = res.get('mimetype')
resource.hash = res.get('hash')
resource.created = res['created']
resource.modified = res['last_modified']
resource.published = resource.published or resource.created
return dataset
| agpl-3.0 | 8,610,673,050,826,417,000 | 33.662698 | 77 | 0.569891 | false |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/SunlightLabs/Congress/Legislator/GetByCoordinates.py | 4 | 4696 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetByCoordinates
# Returns all legislators that currently represent an area (district or state) that contains a given Geo point.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetByCoordinates(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetByCoordinates Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetByCoordinates, self).__init__(temboo_session, '/Library/SunlightLabs/Congress/Legislator/GetByCoordinates')
def new_input_set(self):
return GetByCoordinatesInputSet()
def _make_result_set(self, result, path):
return GetByCoordinatesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetByCoordinatesChoreographyExecution(session, exec_id, path)
class GetByCoordinatesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetByCoordinates
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Sunlight Labs.)
"""
super(GetByCoordinatesInputSet, self)._set_input('APIKey', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma-separated list of fields to include in the response.)
"""
super(GetByCoordinatesInputSet, self)._set_input('Fields', value)
def set_Latitude(self, value):
"""
Set the value of the Latitude input for this Choreo. ((required, decimal) The latitude coordinate of the area that a legislator represents.)
"""
super(GetByCoordinatesInputSet, self)._set_input('Latitude', value)
def set_Longitude(self, value):
"""
Set the value of the Longitude input for this Choreo. ((required, decimal) The longitude coordinate of the area that a legislator represents.)
"""
super(GetByCoordinatesInputSet, self)._set_input('Longitude', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page offset.)
"""
super(GetByCoordinatesInputSet, self)._set_input('Page', value)
def set_PerPage(self, value):
"""
Set the value of the PerPage input for this Choreo. ((optional, integer) The number of results to return per page.)
"""
super(GetByCoordinatesInputSet, self)._set_input('PerPage', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(GetByCoordinatesInputSet, self)._set_input('ResponseFormat', value)
class GetByCoordinatesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetByCoordinates Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from the Sunlight Congress API.)
"""
return self._output.get('Response', None)
class GetByCoordinatesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetByCoordinatesResultSet(response, path)
| apache-2.0 | -643,487,047,451,474,600 | 40.928571 | 176 | 0.675681 | false |
Arcanemagus/SickRage | sickbeard/metadata/helpers.py | 1 | 1508 | # coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: https://sick-rage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import sickbeard
from sickbeard import helpers, logger
meta_session = helpers.make_session()
def getShowImage(url, imgNum=None):
if url is None:
return None
# if they provided a fanart number try to use it instead
if imgNum is not None:
tempURL = url.split('-')[0] + "-" + str(imgNum) + ".jpg"
else:
tempURL = url
logger.log("Fetching image from " + tempURL, logger.DEBUG)
image_data = helpers.getURL(tempURL, session=meta_session, returns='content', allow_proxy=sickbeard.PROXY_INDEXERS)
if image_data is None:
logger.log("There was an error trying to retrieve the image, aborting", logger.WARNING)
return
return image_data
| gpl-3.0 | -7,566,578,462,335,683,000 | 31.782609 | 119 | 0.714854 | false |
PuchatekwSzortach/face_detection | face/geometry.py | 1 | 3679 | """
Module with geometry related functions, mostly relating to bounding boxes processing
"""
import shapely.geometry
import shapely.affinity
import cv2
def get_bounding_box(left, top, width, height):
"""
Given left and top coordinates and width and height of a bounding box, return a bounding box instance
:param left: x-coordinate of left side
:param top: y-coordinate of top side
:param width: width
:param height: height
:return: shapely.geometry.Polygon instance representing a bounding box
"""
return shapely.geometry.box(left, top, left + width, top + height)
def get_bounding_boxes_map(path, **kwargs):
"""
Give a path to a file with bounding boxes for images, return a dictionary mapping image names to
bounding boxes
:param path: path to file
:return: {image file name: shapely.geometry.Polygon} dictionary
"""
file_opener = kwargs["open"] if "open" in kwargs.keys() else open
map = {}
with file_opener(path) as file:
# Discard first 2 lines, as they represent header
data = file.readlines()[2:]
for line in data:
tokens = line.split()
file_name = tokens[0]
int_tokens = [int(token) for token in tokens[1:]]
map[file_name] = get_bounding_box(*int_tokens)
return map
def get_intersection_over_union(first_polygon, second_polygon):
"""
Given two polygons returns their intersection over union
:param first_polygon: shapely.geometry.Polygon instance
:param second_polygon: shapely.geometry.Polygon instance
:return: float
"""
intersection_polygon = first_polygon.intersection(second_polygon)
union_polygon = first_polygon.union(second_polygon)
return intersection_polygon.area / union_polygon.area
def get_scale(bounding_box, target_size):
"""
Get a scale that would bring smaller side of bounding box to have target_size
:param bounding_box: bounding box
:param target_size: target size for smaller bounding box side
:return: float
"""
horizontal_side = bounding_box.bounds[2] - bounding_box.bounds[0]
vertical_side = bounding_box.bounds[3] - bounding_box.bounds[1]
smaller_side = horizontal_side if horizontal_side < vertical_side else vertical_side
return target_size / smaller_side
def get_scaled_bounding_box(bounding_box, scale):
"""
Given a bounding box and a scale, return scaled bounding box. Note that scaling is done w.r.t. axis origin,
hence this operation can change all bounding boxes coordinates
:param bounding_box: bounding box
:param scale: scale
:return: rescaled bounding box
"""
return shapely.affinity.affine_transform(bounding_box, [scale, 0, 0, scale, 0, 0])
def flip_bounding_box_about_vertical_axis(bounding_box, image_shape):
"""
Given a bounding box and image shape, flip the box about vertical axis of the image
:param bounding_box: bounding box
:param image_shape: image shape
:return: flipped bounding box
"""
bounds = bounding_box.bounds
return shapely.geometry.box(image_shape[1] - bounds[0], bounds[1], image_shape[1] - bounds[2], bounds[3])
def draw_bounding_box(image, bounding_box, color, thickness):
"""
Draw bounding box in an image with specified color and thickness
:param image: numpy array
:param bounding_box: shapely.geometry.Polygon instance
:param color: (float, float, float) tuple
:param thickness: int
"""
bounds = [round(value) for value in bounding_box.bounds]
cv2.rectangle(image, (bounds[0], bounds[1]), (bounds[2], bounds[3]), color=color, thickness=thickness)
| mit | -6,511,548,540,223,265,000 | 30.715517 | 111 | 0.690949 | false |
jucacrispim/toxicbuild | tests/webui/__init__.py | 1 | 6900 | # -*- coding: utf-8 -*-
# package with behave tests
import os
import time
from selenium import webdriver
from selenium.common.exceptions import (
NoSuchElementException,
StaleElementReferenceException
)
from selenium.webdriver.common.action_chains import ActionChains
from toxicbuild.core.utils import now, datetime2string
class SeleniumBrowserException(Exception):
pass
class SeleniumBrowser(webdriver.Chrome):
def __init__(self, *args, **kwargs):
options = webdriver.ChromeOptions()
options.add_argument('--start-maximized')
options.add_argument('--no-sandbox')
super().__init__(*args, chrome_options=options, **kwargs)
# self.maximize_window()
self.implicitly_wait(10)
def click(self, element):
"""Clicks in a element using ActionChains.
:param element: Um elemento para clicar."""
action = ActionChains(self).click(element)
action.perform()
def _get_screenshot_filename(self):
ts = str(int(time.time()))
dt = datetime2string(now(), dtformat='%Y/%m/%d')
fname = '{}.png'.format(ts)
return dt, fname
def save_screenshot(self):
path, fname = self._get_screenshot_filename()
path = os.path.join('artifacts', path)
os.makedirs(path, exist_ok=True)
self.get_screenshot_as_file(os.path.join(path, fname))
def wait_text_become_present(self, text, timeout=30):
"""Waits until a text is present in the page source.
:param text: The text that should be present in the page.
:param timeout: timeout in seconds for the operation."""
r = int(timeout * 10)
for index in range(r):
time.sleep(0.1)
if text in self.page_source:
return True
raise SeleniumBrowserException(
'text %s not present after %s seconds' % (text, timeout))
def wait_text_vanishes(self, text, timeout=30):
"""Waits until a text is not present anyomore in the page source.
:param text: The text that should be present in the page.
:param timeout: timeout in seconds for the operation."""
r = int(timeout * 10)
for index in range(r):
time.sleep(0.1)
if text not in self.page_source:
return True
raise SeleniumBrowserException(
'text %s did not vanish after %s seconds' % (text, timeout))
def do_login(self, url, username, passwd):
"""Do login in the web interface.
:param url: Login page url.
:param username: Username for login.
:param passwd: Password for login."""
self.get(url)
username_input = self.find_element_by_id('inputUsername')
username_input.send_keys(username)
passwd_input = self.find_element_by_id('inputPassword')
passwd_input.send_keys(passwd)
btn = self.find_element_by_id('btn-login')
self.click(btn)
def click_link(self, link_text):
"""Clicks in link indicated by link_text"""
self.click(self.find_element_by_partial_link_text(link_text))
@property
def is_logged(self):
"""True if the browser is already logged in the web interface."""
try:
self.implicitly_wait(0)
self.find_element_by_id('inputPassword')
is_logged = False
except NoSuchElementException:
is_logged = True
finally:
self.implicitly_wait(10)
return is_logged
def wait_element_become_visible(self, el, timeout=10):
"""Waits until an element become visible.
:param el: A page element
:param timeout: Timeout for the operation."""
r = int(timeout * 10)
for index in range(r):
time.sleep(0.1)
if el.is_displayed():
return True
raise SeleniumBrowserException(
'The element %s not visible after %s seconds' % (el, timeout))
def wait_element_become_hidden(self, el, timeout=10):
"""Waits until an element become hidden.
:param el: A page element
:param timeout: Timeout for the operation."""
r = int(timeout * 10)
for index in range(r):
time.sleep(0.1)
if not el.is_displayed():
return True
raise SeleniumBrowserException(
'The element %s not hidden after %s seconds' % (el, timeout))
def wait_element_become_present(self, fn, timeout=10, check_display=True):
"""Waits until an element is present in the DOM.
:param fn: A function that should return an element. If no return value
tries again until timeout is reached.
:param timeout: Timeout for the operation.
:param check_display: Checks if the element is displayed before
returning it.
"""
r = int(timeout * 10)
for index in range(r):
time.sleep(0.1)
try:
el = fn()
if check_display:
el = el if el.is_displayed() else None
except Exception:
el = None
if el:
return el
def wait_element_be_removed(self, fn, timeout=10):
"""Waits until an element is not present in the DOM anymore.
:param fn: A function that should return an element. If return value
is true, tries again until timeout is reached.
:param timeout: Timeout for the operation."""
r = int(timeout * 10)
for index in range(r):
time.sleep(0.1)
try:
el = fn()
except Exception:
el = True
if not el:
return el
def refresh_until(self, fn, timeout=10):
"""Reloads a page until ``fn`` returns True.
:param fn: A callable which returns a boolean.
:param timeout: Timeout for the operation."""
for index in range(timeout):
self.refresh()
time.sleep(1)
try:
r = fn()
except Exception:
r = False
if r:
return r
def click_and_retry_on_stale(self, fn, retry=True):
"""Clicks in an element and it again in case of
StaleElementReferenceException error.
"""
el = self.wait_element_become_present(fn)
try:
el.click()
except StaleElementReferenceException:
el = self.click_and_retry_on_stale(fn, retry=False)
return el
def take_screenshot(fn):
def wrapper(context, *args, **kwargs):
try:
r = fn(context, *args, **kwargs)
except Exception as e:
browser = context.browser
browser.save_screenshot()
raise e
return r
return wrapper
| agpl-3.0 | -5,694,029,584,247,244,000 | 28.361702 | 79 | 0.580145 | false |
mlperf/inference_results_v0.7 | closed/Cisco/code/bert-99.9/tensorrt/evaluate.py | 26 | 3419 | """ Official evaluation script for v1.1 of the SQuAD dataset. """
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
if __name__ == '__main__':
expected_version = '1.1'
parser = argparse.ArgumentParser(
description='Evaluation for SQuAD ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (dataset_json['version'] != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
| apache-2.0 | 512,214,946,775,979,700 | 35.37234 | 77 | 0.621819 | false |
bharadwaj-raju/libdesktop | libdesktop/system.py | 1 | 6802 | # coding: utf-8
# This file is part of libdesktop
# The MIT License (MIT)
#
# Copyright (c) 2016 Bharadwaj Raju
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import subprocess as sp
import subprocess
import os
import sys
def get_cmd_out(command):
'''Get the output of a command.
Gets a nice Unicode no-extra-whitespace string of the ``stdout`` of a given command.
Args:
command (str or list): A string of the command, or a list of the arguments (as would be used in :class:`subprocess.Popen`).
Note:
If ``command`` is a ``str``, it will be evaluated with ``shell=True`` i.e. in the default shell (for example, bash).
Returns:
str: The ``stdout`` of the command.'''
if isinstance(command, list):
result = sp.check_output(command)
else:
result = sp.check_output(command, shell=True)
return result.decode('utf-8').rstrip()
def get_name():
'''Get desktop environment or OS.
Get the OS name or desktop environment.
**List of Possible Values**
+-------------------------+---------------+
| Windows | windows |
+-------------------------+---------------+
| Mac OS X | mac |
+-------------------------+---------------+
| GNOME 3+ | gnome |
+-------------------------+---------------+
| GNOME 2 | gnome2 |
+-------------------------+---------------+
| XFCE | xfce4 |
+-------------------------+---------------+
| KDE | kde |
+-------------------------+---------------+
| Unity | unity |
+-------------------------+---------------+
| LXDE | lxde |
+-------------------------+---------------+
| i3wm | i3 |
+-------------------------+---------------+
| \*box | \*box |
+-------------------------+---------------+
| Trinity (KDE 3 fork) | trinity |
+-------------------------+---------------+
| MATE | mate |
+-------------------------+---------------+
| IceWM | icewm |
+-------------------------+---------------+
| Pantheon (elementaryOS) | pantheon |
+-------------------------+---------------+
| LXQt | lxqt |
+-------------------------+---------------+
| Awesome WM | awesome |
+-------------------------+---------------+
| Enlightenment | enlightenment |
+-------------------------+---------------+
| AfterStep | afterstep |
+-------------------------+---------------+
| WindowMaker | windowmaker |
+-------------------------+---------------+
| [Other] | unknown |
+-------------------------+---------------+
Returns:
str: The name of the desktop environment or OS.
'''
if sys.platform in ['win32', 'cygwin']:
return 'windows'
elif sys.platform == 'darwin':
return 'mac'
else:
desktop_session = os.environ.get(
'XDG_CURRENT_DESKTOP') or os.environ.get('DESKTOP_SESSION')
if desktop_session is not None:
desktop_session = desktop_session.lower()
# Fix for X-Cinnamon etc
if desktop_session.startswith('x-'):
desktop_session = desktop_session.replace('x-', '')
if desktop_session in ['gnome', 'unity', 'cinnamon', 'mate',
'xfce4', 'lxde', 'fluxbox',
'blackbox', 'openbox', 'icewm', 'jwm',
'afterstep', 'trinity', 'kde', 'pantheon',
'i3', 'lxqt', 'awesome', 'enlightenment']:
return desktop_session
#-- Special cases --#
# Canonical sets environment var to Lubuntu rather than
# LXDE if using LXDE.
# There is no guarantee that they will not do the same
# with the other desktop environments.
elif 'xfce' in desktop_session:
return 'xfce4'
elif desktop_session.startswith('ubuntu'):
return 'unity'
elif desktop_session.startswith('xubuntu'):
return 'xfce4'
elif desktop_session.startswith('lubuntu'):
return 'lxde'
elif desktop_session.startswith('kubuntu'):
return 'kde'
elif desktop_session.startswith('razor'):
return 'razor-qt'
elif desktop_session.startswith('wmaker'):
return 'windowmaker'
if os.environ.get('KDE_FULL_SESSION') == 'true':
return 'kde'
elif os.environ.get('GNOME_DESKTOP_SESSION_ID'):
if not 'deprecated' in os.environ.get('GNOME_DESKTOP_SESSION_ID'):
return 'gnome2'
elif is_running('xfce-mcs-manage'):
return 'xfce4'
elif is_running('ksmserver'):
return 'kde'
return 'unknown'
def is_in_path(program):
'''
Check if a program is in the system ``PATH``.
Checks if a given program is in the user's ``PATH`` or not.
Args:
program (str): The program to try to find in ``PATH``.
Returns:
bool: Is the program in ``PATH``?
'''
if sys.version_info.major == 2:
path = os.getenv('PATH')
if os.name == 'nt':
path = path.split(';')
else:
path = path.split(':')
else:
path = os.get_exec_path()
for i in path:
if os.path.isdir(i):
if program in os.listdir(i):
return True
def is_running(process):
'''
Check if process is running.
Check if the given process name is running or not.
Note:
On a Linux system, kernel threads (like ``kthreadd`` etc.)
are excluded.
Args:
process (str): The name of the process.
Returns:
bool: Is the process running?
'''
if os.name == 'nt':
process_list = get_cmd_out(['tasklist', '/v'])
return process in process_list
else:
process_list = get_cmd_out('ps axw | awk \'{print $5}\'')
for i in process_list.split('\n'):
# 'COMMAND' is the column heading
# [*] indicates kernel-level processes like \
# kthreadd, which manages threads in the Linux kernel
if not i == 'COMMAND' or i.startswith('['):
if i == process:
return True
elif os.path.basename(i) == process:
# check i without executable path
# for example, if 'process' arguments is 'sshd'
# and '/usr/bin/sshd' is listed in ps, return True
return True
return False # finally
| mit | 488,043,641,069,394,800 | 26.99177 | 126 | 0.563805 | false |
cpyou/odoo | addons/point_of_sale/wizard/pos_session_opening.py | 15 | 5032 |
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.addons.point_of_sale.point_of_sale import pos_session
class pos_session_opening(osv.osv_memory):
_name = 'pos.session.opening'
_columns = {
'pos_config_id' : fields.many2one('pos.config', string='Point of Sale', required=True),
'pos_session_id' : fields.many2one('pos.session', string='PoS Session'),
'pos_state' : fields.related('pos_session_id', 'state',
type='selection',
selection=pos_session.POS_SESSION_STATE,
string='Session Status', readonly=True),
'pos_state_str' : fields.char('Status', readonly=True),
'show_config' : fields.boolean('Show Config', readonly=True),
'pos_session_name' : fields.related('pos_session_id', 'name', string="Session Name",
type='char', size=64, readonly=True),
'pos_session_username' : fields.related('pos_session_id', 'user_id', 'name',
type='char', size=64, readonly=True)
}
def open_ui(self, cr, uid, ids, context=None):
data = self.browse(cr, uid, ids[0], context=context)
context = dict(context or {})
context['active_id'] = data.pos_session_id.id
return {
'type' : 'ir.actions.act_url',
'url': '/pos/web/',
'target': 'self',
}
def open_existing_session_cb_close(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0], context=context)
wizard.pos_session_id.signal_workflow('cashbox_control')
return self.open_session_cb(cr, uid, ids, context)
def open_session_cb(self, cr, uid, ids, context=None):
assert len(ids) == 1, "you can open only one session at a time"
proxy = self.pool.get('pos.session')
wizard = self.browse(cr, uid, ids[0], context=context)
if not wizard.pos_session_id:
values = {
'user_id' : uid,
'config_id' : wizard.pos_config_id.id,
}
session_id = proxy.create(cr, uid, values, context=context)
s = proxy.browse(cr, uid, session_id, context=context)
if s.state=='opened':
return self.open_ui(cr, uid, ids, context=context)
return self._open_session(session_id)
return self._open_session(wizard.pos_session_id.id)
def open_existing_session_cb(self, cr, uid, ids, context=None):
assert len(ids) == 1
wizard = self.browse(cr, uid, ids[0], context=context)
return self._open_session(wizard.pos_session_id.id)
def _open_session(self, session_id):
return {
'name': _('Session'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'pos.session',
'res_id': session_id,
'view_id': False,
'type': 'ir.actions.act_window',
}
def on_change_config(self, cr, uid, ids, config_id, context=None):
result = {
'pos_session_id': False,
'pos_state': False,
'pos_state_str' : '',
'pos_session_username' : False,
'pos_session_name' : False,
}
if not config_id:
return {'value' : result}
proxy = self.pool.get('pos.session')
session_ids = proxy.search(cr, uid, [
('state', '!=', 'closed'),
('config_id', '=', config_id),
('user_id', '=', uid),
], context=context)
if session_ids:
session = proxy.browse(cr, uid, session_ids[0], context=context)
result['pos_state'] = str(session.state)
result['pos_state_str'] = dict(pos_session.POS_SESSION_STATE).get(session.state, '')
result['pos_session_id'] = session.id
result['pos_session_name'] = session.name
result['pos_session_username'] = session.user_id.name
return {'value' : result}
def default_get(self, cr, uid, fieldnames, context=None):
so = self.pool.get('pos.session')
session_ids = so.search(cr, uid, [('state','<>','closed'), ('user_id','=',uid)], context=context)
if session_ids:
result = so.browse(cr, uid, session_ids[0], context=context).config_id.id
else:
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
result = current_user.pos_config and current_user.pos_config.id or False
if not result:
r = self.pool.get('pos.config').search(cr, uid, [], context=context)
result = r and r[0] or False
count = self.pool.get('pos.config').search_count(cr, uid, [('state', '=', 'active')], context=context)
show_config = bool(count > 1)
return {
'pos_config_id' : result,
'show_config' : show_config,
}
| agpl-3.0 | 4,784,035,911,628,702,000 | 42.37931 | 110 | 0.545509 | false |
mach327/chirp_fork | chirp/drivers/vx7.py | 2 | 11036 | # Copyright 2010 Dan Smith <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from chirp.drivers import yaesu_clone
from chirp import chirp_common, directory, bitwise
from textwrap import dedent
import logging
LOG = logging.getLogger(__name__)
MEM_FORMAT = """
#seekto 0x0611;
u8 checksum1;
#seekto 0x0691;
u8 checksum2;
#seekto 0x0742;
struct {
u16 in_use;
} bank_used[9];
#seekto 0x0EA2;
struct {
u16 members[48];
} bank_members[9];
#seekto 0x3F52;
u8 checksum3;
#seekto 0x1202;
struct {
u8 even_pskip:1,
even_skip:1,
even_valid:1,
even_masked:1,
odd_pskip:1,
odd_skip:1,
odd_valid:1,
odd_masked:1;
} flags[225];
#seekto 0x1322;
struct {
u8 unknown1;
u8 power:2,
duplex:2,
tune_step:4;
bbcd freq[3];
u8 zeros1:2,
ones:2,
zeros2:2,
mode:2;
u8 name[8];
u8 zero;
bbcd offset[3];
u8 zeros3:2,
tone:6;
u8 zeros4:1,
dcs:7;
u8 zeros5:5,
is_split_tone:1,
tmode:2;
u8 charset;
} memory[450];
"""
DUPLEX = ["", "-", "+", "split"]
MODES = ["FM", "AM", "WFM", "Auto"]
TMODES = ["", "Tone", "TSQL", "DTCS", "Cross"]
CROSS_MODES = ["DTCS->", "Tone->DTCS", "DTCS->Tone"]
STEPS = [5.0, 10.0, 12.5, 15.0, 20.0, 25.0, 50.0, 100.0, 9.0]
CHARSET = ["%i" % int(x) for x in range(0, 10)] + \
[" "] + \
[chr(x) for x in range(ord("A"), ord("Z")+1)] + \
[chr(x) for x in range(ord("a"), ord("z")+1)] + \
list(".,:;!\"#$%&'()*+-.=<>?@[?]^_\\{|}") + \
list("\x00" * 100)
POWER_LEVELS = [chirp_common.PowerLevel("L1", watts=0.05),
chirp_common.PowerLevel("L2", watts=1.00),
chirp_common.PowerLevel("L3", watts=2.50),
chirp_common.PowerLevel("Hi", watts=5.00)
]
POWER_LEVELS_220 = [chirp_common.PowerLevel("L1", watts=0.05),
chirp_common.PowerLevel("L2", watts=0.30)]
def _is220(freq):
return freq >= 222000000 and freq <= 225000000
class VX7BankModel(chirp_common.BankModel):
"""A VX-7 Bank model"""
def get_num_mappings(self):
return 9
def get_mappings(self):
banks = []
for i in range(0, self.get_num_mappings()):
bank = chirp_common.Bank(self, "%i" % (i+1), "MG%i" % (i+1))
bank.index = i
banks.append(bank)
return banks
def add_memory_to_mapping(self, memory, bank):
_members = self._radio._memobj.bank_members[bank.index]
_bank_used = self._radio._memobj.bank_used[bank.index]
for i in range(0, 48):
if _members.members[i] == 0xFFFF:
_members.members[i] = memory.number - 1
_bank_used.in_use = 0x0000
break
def remove_memory_from_mapping(self, memory, bank):
_members = self._radio._memobj.bank_members[bank.index].members
_bank_used = self._radio._memobj.bank_used[bank.index]
found = False
remaining_members = 0
for i in range(0, len(_members)):
if _members[i] == (memory.number - 1):
_members[i] = 0xFFFF
found = True
elif _members[i] != 0xFFFF:
remaining_members += 1
if not found:
raise Exception("Memory {num} not in " +
"bank {bank}".format(num=memory.number,
bank=bank))
if not remaining_members:
_bank_used.in_use = 0xFFFF
def get_mapping_memories(self, bank):
memories = []
_members = self._radio._memobj.bank_members[bank.index].members
_bank_used = self._radio._memobj.bank_used[bank.index]
if _bank_used.in_use == 0xFFFF:
return memories
for number in _members:
if number == 0xFFFF:
continue
memories.append(self._radio.get_memory(number+1))
return memories
def get_memory_mappings(self, memory):
banks = []
for bank in self.get_mappings():
if memory.number in [x.number for x in
self.get_mapping_memories(bank)]:
banks.append(bank)
return banks
def _wipe_memory(mem):
mem.set_raw("\x00" * (mem.size() / 8))
mem.unknown1 = 0x05
mem.ones = 0x03
@directory.register
class VX7Radio(yaesu_clone.YaesuCloneModeRadio):
"""Yaesu VX-7"""
BAUD_RATE = 19200
VENDOR = "Yaesu"
MODEL = "VX-7"
_model = ""
_memsize = 16211
_block_lengths = [10, 8, 16193]
_block_size = 8
@classmethod
def get_prompts(cls):
rp = chirp_common.RadioPrompts()
rp.pre_download = _(dedent("""\
1. Turn radio off.
2. Connect cable to MIC/SP jack.
3. Press and hold in the [MON-F] key while turning the radio on
("CLONE" will appear on the display).
4. <b>After clicking OK</b>, press the [BAND] key to send image."""))
rp.pre_upload = _(dedent("""\
1. Turn radio off.
2. Connect cable to MIC/SP jack.
3. Press and hold in the [MON-F] key while turning the radio on
("CLONE" will appear on the display).
4. Press the [V/M] key ("CLONE WAIT" will appear on the LCD)."""))
return rp
def _checksums(self):
return [yaesu_clone.YaesuChecksum(0x0592, 0x0610),
yaesu_clone.YaesuChecksum(0x0612, 0x0690),
yaesu_clone.YaesuChecksum(0x0000, 0x3F51),
]
def process_mmap(self):
self._memobj = bitwise.parse(MEM_FORMAT, self._mmap)
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.has_bank = True
rf.has_dtcs_polarity = False
rf.valid_modes = list(set(MODES))
rf.valid_tmodes = list(TMODES)
rf.valid_duplexes = list(DUPLEX)
rf.valid_tuning_steps = list(STEPS)
rf.valid_bands = [(500000, 999000000)]
rf.valid_skips = ["", "S", "P"]
rf.valid_power_levels = POWER_LEVELS
rf.valid_characters = "".join(CHARSET)
rf.valid_name_length = 8
rf.memory_bounds = (1, 450)
rf.can_odd_split = True
rf.has_ctone = False
rf.has_cross = True
rf.valid_cross_modes = list(CROSS_MODES)
return rf
def get_raw_memory(self, number):
return repr(self._memobj.memory[number-1])
def get_memory(self, number):
_mem = self._memobj.memory[number-1]
_flag = self._memobj.flags[(number-1)/2]
nibble = ((number-1) % 2) and "even" or "odd"
used = _flag["%s_masked" % nibble]
valid = _flag["%s_valid" % nibble]
pskip = _flag["%s_pskip" % nibble]
skip = _flag["%s_skip" % nibble]
mem = chirp_common.Memory()
mem.number = number
if not used:
mem.empty = True
if not valid:
mem.empty = True
mem.power = POWER_LEVELS[0]
return mem
mem.freq = chirp_common.fix_rounded_step(int(_mem.freq) * 1000)
mem.offset = int(_mem.offset) * 1000
mem.rtone = mem.ctone = chirp_common.TONES[_mem.tone]
if not _mem.is_split_tone:
mem.tmode = TMODES[_mem.tmode]
mem.cross_mode = CROSS_MODES[0]
else:
mem.tmode = "Cross"
mem.cross_mode = CROSS_MODES[int(_mem.tmode)]
mem.duplex = DUPLEX[_mem.duplex]
if mem.duplex == "split":
mem.offset = chirp_common.fix_rounded_step(mem.offset)
mem.mode = MODES[_mem.mode]
mem.dtcs = chirp_common.DTCS_CODES[_mem.dcs]
mem.tuning_step = STEPS[_mem.tune_step]
mem.skip = pskip and "P" or skip and "S" or ""
if _is220(mem.freq):
levels = POWER_LEVELS_220
else:
levels = POWER_LEVELS
try:
mem.power = levels[_mem.power]
except IndexError:
LOG.error("Radio reported invalid power level %s (in %s)" %
(_mem.power, levels))
mem.power = levels[0]
for i in _mem.name:
if i == "\xFF":
break
mem.name += CHARSET[i]
mem.name = mem.name.rstrip()
return mem
def set_memory(self, mem):
_mem = self._memobj.memory[mem.number-1]
_flag = self._memobj.flags[(mem.number-1)/2]
nibble = ((mem.number-1) % 2) and "even" or "odd"
valid = _flag["%s_valid" % nibble]
used = _flag["%s_masked" % nibble]
if not mem.empty and not valid:
_wipe_memory(_mem)
self._wipe_memory_banks(mem)
if mem.empty and valid and not used:
_flag["%s_valid" % nibble] = False
return
_flag["%s_masked" % nibble] = not mem.empty
if mem.empty:
return
_flag["%s_valid" % nibble] = True
_mem.freq = mem.freq / 1000
_mem.offset = mem.offset / 1000
_mem.tone = chirp_common.TONES.index(mem.rtone)
if mem.tmode != "Cross":
_mem.is_split_tone = 0
_mem.tmode = TMODES.index(mem.tmode)
else:
_mem.is_split_tone = 1
_mem.tmode = CROSS_MODES.index(mem.cross_mode)
_mem.duplex = DUPLEX.index(mem.duplex)
_mem.mode = MODES.index(mem.mode)
_mem.dcs = chirp_common.DTCS_CODES.index(mem.dtcs)
_mem.tune_step = STEPS.index(mem.tuning_step)
if mem.power:
if _is220(mem.freq):
levels = [str(l) for l in POWER_LEVELS_220]
_mem.power = levels.index(str(mem.power))
else:
_mem.power = POWER_LEVELS.index(mem.power)
else:
_mem.power = 0
_flag["%s_pskip" % nibble] = mem.skip == "P"
_flag["%s_skip" % nibble] = mem.skip == "S"
for i in range(0, 8):
_mem.name[i] = CHARSET.index(mem.name.ljust(8)[i])
def validate_memory(self, mem):
msgs = yaesu_clone.YaesuCloneModeRadio.validate_memory(self, mem)
if _is220(mem.freq):
if str(mem.power) not in [str(l) for l in POWER_LEVELS_220]:
msgs.append(chirp_common.ValidationError(
"Power level %s not supported on 220MHz band" %
mem.power))
return msgs
@classmethod
def match_model(cls, filedata, filename):
return len(filedata) == cls._memsize
def get_bank_model(self):
return VX7BankModel(self)
| gpl-3.0 | 942,188,671,914,474,500 | 29.486188 | 73 | 0.554639 | false |
rahulvgmail/TripMapR | TripMapR/travelogue/tests/test_views_travelogue.py | 1 | 3438 | from django.test import TestCase
from .factories import TravelogueFactory
class RequestTravelogueTest(TestCase):
urls = 'travelogue.tests.test_urls'
def setUp(self):
super(RequestTravelogueTest, self).setUp()
self.travelogue = TravelogueFactory(slug='test-travelogue')
def test_archive_travelogue_url_works(self):
response = self.client.get('/travelogue/travelogue/')
self.assertEqual(response.status_code, 200)
def test_archive_travelogue_empty(self):
"""If there are no galleries to show, tell the visitor - don't show a
404."""
self.travelogue.is_public = False
self.travelogue.save()
response = self.client.get('/travelogue/travelogue/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['latest'].count(),
0)
def test_paginated_travelogue_url_works(self):
response = self.client.get('/travelogue/traveloguelist/')
self.assertEqual(response.status_code, 200)
def test_travelogue_works(self):
response = self.client.get('/travelogue/travelogue/test-travelogue/')
self.assertEqual(response.status_code, 200)
def test_archive_year_travelogue_works(self):
response = self.client.get('/travelogue/travelogue/2011/')
self.assertEqual(response.status_code, 200)
def test_archive_month_travelogue_works(self):
response = self.client.get('/travelogue/travelogue/2011/12/')
self.assertEqual(response.status_code, 200)
def test_archive_day_travelogue_works(self):
response = self.client.get('/travelogue/travelogue/2011/12/23/')
self.assertEqual(response.status_code, 200)
def test_detail_travelogue_works(self):
response = self.client.get('/travelogue/travelogue/2011/12/23/test-travelogue/')
self.assertEqual(response.status_code, 200)
def test_redirect_to_list(self):
"""Trivial test - if someone requests the root url of the app
(i.e. /travelogue/'), redirect them to the travelogue list page."""
response = self.client.get('/travelogue/')
self.assertRedirects(response, '/travelogue/travelogue/', 301, 200)
class TraveloguePaginationTest(TestCase):
urls = 'travelogue.tests.test_urls'
def test_pagination(self):
for i in range(1, 23):
TravelogueFactory(title='travelogue{0:0>3}'.format(i))
response = self.client.get('/travelogue/traveloguelist/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']),
20)
# Check first and last items.
self.assertEqual(response.context['object_list'][0].title,
'travelogue022')
self.assertEqual(response.context['object_list'][19].title,
'travelogue003')
# Now get the second page of results.
response = self.client.get('/travelogue/traveloguelist/?page=2')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']),
2)
# Check first and last items.
self.assertEqual(response.context['object_list'][0].title,
'travelogue002')
self.assertEqual(response.context['object_list'][1].title,
'travelogue001')
| bsd-3-clause | -7,713,778,036,676,238,000 | 37.2 | 88 | 0.645433 | false |
DarkEnergyScienceCollaboration/ReprocessingTaskForce | scripts/run_mergeCoaddMeasurements.py | 2 | 1557 | #!/usr/bin/env python
from __future__ import print_function
import os
import glob
import libRun as LR
def build_cmd(patch, configFile, input, output) :
if not os.path.isdir("scripts"):
os.makedirs("scripts")
cmd = "mv "+ patch + " scripts"
os.system(cmd)
cmd = "mergeCoaddMeasurements.py %s --output %s" % (input, output) + \
" @scripts/" + patch + " --configfile " + configFile
return cmd
if __name__ == "__main__":
usage = """%prog [option]"""
description = """This script will run detectCoadSources for a given list of filters and patches.
The default if to use f.list files (where 'f' is a filter in ugriz) and patches_f.txt,
and launch detectCoadSources in several batch jobs. You thus need to be running it at CC-IN2P3
to make it work. To run all filters, you can do something like
%prog -f ugriz -m 1 -c detectCoaddSources.py -a"""
opts, args = LR.standard_options(usage=usage, description=description)
opts.mod = 10
opts.input = "pardir/output"
opts.output = "pardir/output"
file_patch = "patches_all.txt"
cmd = "split -l " + str(opts.mod) + " -d " + file_patch + " " + file_patch + "_"
os.system(cmd)
patch_list = sorted(glob.glob(file_patch + "_*"))
for patch in sorted(patch_list):
print("\n", patch)
cmd = build_cmd(patch, opts.configs, opts.input, opts.output)
LR.submit(cmd, patch, autosubmit=opts.autosubmit, ct=60000, vmem='4G',
from_slac=opts.fromslac)
| gpl-2.0 | 4,190,196,103,252,527,000 | 30.77551 | 101 | 0.617213 | false |
moylop260/stock-logistics-warehouse | __unported__/stock_move_location/__openerp__.py | 8 | 1711 | # -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Julius Network Solutions SARL <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
{
"name" : "Move Stock Location",
"version" : "1.0",
"author" : "Julius Network Solutions",
"description" : """
Presentation:
This module allows to move all stock in a stock location to an other one.
And adds fields and buttons to advance in Physical Inventories.
""",
"website" : "http://www.julius.fr",
"depends" : [
"stock",
"stock_barcode_reader",
],
"category" : "Customs/Stock",
"init_xml" : [],
"demo_xml" : [],
"update_xml" : [
'stock_view.xml',
'stock_move_sequence.xml',
'wizard/move_location_view.xml',
],
'test': [],
'installable': False,
'active': False,
'certificate': '',
}
| agpl-3.0 | -5,584,330,758,636,996,000 | 32.54902 | 81 | 0.568089 | false |
bowang/tensorflow | tensorflow/contrib/layers/python/layers/__init__.py | 18 | 1796 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""layers module with higher level NN primitives."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.layers.python.layers.embedding_ops import *
from tensorflow.contrib.layers.python.layers.encoders import *
from tensorflow.contrib.layers.python.layers.feature_column import *
from tensorflow.contrib.layers.python.layers.feature_column_ops import *
from tensorflow.contrib.layers.python.layers.initializers import *
from tensorflow.contrib.layers.python.layers.layers import *
from tensorflow.contrib.layers.python.layers.normalization import *
from tensorflow.contrib.layers.python.layers.optimizers import *
from tensorflow.contrib.layers.python.layers.regularizers import *
from tensorflow.contrib.layers.python.layers.summaries import *
from tensorflow.contrib.layers.python.layers.target_column import *
from tensorflow.contrib.layers.python.ops.bucketization_op import *
from tensorflow.contrib.layers.python.ops.sparse_feature_cross_op import *
# pylint: enable=wildcard-import
| apache-2.0 | 978,811,709,208,615,800 | 50.314286 | 80 | 0.767261 | false |
SteveMcGrath/pySecurityCenter | examples/sc4/csv_gen/runcsv.py | 1 | 3078 | #!/usr/bin/env python
# CSV Report Generator
# --------------------
# This script is designed to pull vulnerability data from Security Center 4.2
# and format it into a CSV file. While Security Center can already do this,
# this script will also break out the Plugin Text field into individial fields
# before generating the CSV row. Once the report has been generated, the
# script will then compile and email and send the email off to the email
# associated to the asset list in question.
# --------------------
#
# Written By: Steven McGrath
# Verison: Build 042
# Date: 05/01/2012
import sccsv
from securitycenter import SecurityCenter4
import json
import os
from ConfigParser import ConfigParser
conf = ConfigParser()
conf.read('csv_gen.conf')
sccsv.debug.DEBUG = conf.getboolean('Settings', 'debug')
sc = SecurityCenter4(conf.get('Settings', 'address')
port=conf.getint('Settings', 'port'))
sc.login(conf.get('Settings', 'username'),
conf.get('Settings', 'password'))
def build_and_email(section):
# The first thing that we need to do is get all of the email configuration
# stuff loaded up. This will involve some minor parsing and in some cases
# we will need to check to see if there is a local variable set to override
# the global one that is set in the Settings stanza.
email_to = conf.get(section, 'email_to').split(',')
email_from = conf.get('Settings', 'email_from')
email_host = conf.get('Settings', 'smtp_host')
if conf.has_option(section, 'email_msg'):
email_msg = conf.get(section, 'email_msg')
else:
email_msg = conf.get('Settings', 'email_msg')
if conf.has_option(section, 'email_subject'):
email_subject = conf.get(section, 'email_subject')
else:
email_subject = conf.get('Settings', 'email_subject')
# We are going to derrive the filename from the stanza...
filename = '%s.csv' % section.replace('CSVFILE: ', '')
# We will also need the field listing as well...
field_list = conf.get(section, 'fields').split(',')
# Next we will need to build the filter list. We will do this by looking at
# the linked filter and parsing every option, loading it into the filterlist
fid = conf.get(section, 'filter')
filterlist = json.loads(conf.get(section, 'filter'))
source = conf.get(section, 'datasource')
# Now that we have everything in place, it's time to call the worker
sccsv.generator.gen_csv(sc, filename, field_list, source, filterlist)
# Now that we have the CSV file generated, check to see if we want to
# send out an email and act accordingly. If we do generate an email, then
# we will delete the CSV file when done, otherwise just leave it.
if conf.getboolean(section, 'gen_email'):
sccsv.mail.send(email_from, email_to, email_subject, email_msg,
filename, host=email_host)
os.remove(filename)
if __name__ == '__main__':
for section in conf.sections():
if 'CSVFILE: ' in section:
build_and_email(section)
| bsd-3-clause | -7,626,626,793,328,045,000 | 38.461538 | 80 | 0.671215 | false |
swpease/Flavify | flavors/migrations/0001_initial.py | 1 | 1265 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-03 21:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('base_item', models.CharField(max_length=50)),
('subcategory', models.CharField(blank=True, max_length=50)),
],
),
migrations.CreateModel(
name='Taste',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mouth_taste', models.CharField(choices=[('Sweet', 'Sweet'), ('Salty', 'Salty'), ('Sour', 'Sour'), ('Bitter', 'Bitter'), ('Umami', 'Umami'), ('Spicy', 'Spicy'), ('Numbing', 'Numbing'), ('Cooling', 'Cooling')], max_length=20)),
],
),
migrations.AddField(
model_name='ingredient',
name='tastes',
field=models.ManyToManyField(to='flavors.Taste'),
),
]
| mit | 8,676,821,688,018,126,000 | 34.138889 | 243 | 0.550198 | false |
benjaminy/Charcoal | RealAppAnalysis/Browsers/funcdata_to_graphs.py | 1 | 3628 | import matplotlib.pyplot as pyplot
from csv import DictReader
from datautil import findFile, parseCmdLnArgs, _flagged, _flaggedRetArg, log, extract, readCSV
import sys
from os.path import join
def main(argv):
opts, args = parseCmdLnArgs(argv,"ho:sdg:", ["help", "outdir=", "show", "debug"])
_handleHelpFlag(opts)
_selectGraph()
graph_data = _formatData(_loadFile(args))
xs, ys = getCumulativeDurationPercentages(graph_data, sum(graph_data))
ys2 = getFunctionAccumulation(graph_data)
show_graph = _flagged(opts, "-s", "--show")
figure = graph(xs, ys, ys2, show = show_graph)
filepath = _handleOutFlag(opts, args)
figure.savefig(filepath, indent = 2, format = "png")
debug = _flagged(opts, "-d", "--debug")
if debug:
log(opts, indent = 2, tag = "opts")
log(args, indent = 2, tag = "args")
log(filepath, indent = 2, tag = "Output")
def _selectGraph():
options = [("Scatter Plot", scatterPlot),
("Cumulative Distribution", cumulativeDistribution)]
for index, opt in enumerate(options): log(options[index][0], tag = index + 1)
plot = int(input("Select a plot: ")) - 1
func_index = 1
return options[plot][func_index]
def scatterPlot():
pass
def cumulativeDistribution():
pass
def _handleHelpFlag(opts):
if _flagged(opts, "-h", "--help"):
_usage()
sys.exit(2)
def _handleOutFlag(opts, args):
index_extension = -3
filename = args[0][:index_extension] + "png"
outdir = _flaggedRetArg(opts, "-o", "--outdir")
if outdir:
filename = filename.split("//")[-1]
return join(outdir, filename)
def _loadFile(args):
if not args:
filepath = findFile()
args.insert(0, filepath)
else: filepath = args[0]
data = readCSV(filepath)
return data
def _formatData(data):
durations_string_repr = extract(data, "duration")
just_func_durations = [float(duration) for duration in durations_string_repr]
return sorted(just_func_durations)
def getCumulativeDurationPercentages(durations, fcall_total_duration):
xs = []
ys = []
cumulative_time = 0.0
percent = 0.0
for dur in durations:
#Credit to Blake
xs.append(dur)
cumulative_time += dur
percent = cumulative_time / fcall_total_duration
ys.append(percent)
return (xs, ys)
def getFunctionAccumulation(durations):
ys = []
num = 0
percent = 0.0
num_of_durations = len(durations)
for dur in durations:
num += 1
percent = float(num) / float(num_of_durations)
ys.append(percent)
return ys
def _usage():
pass
def graph(xs, ys, ys2, show = False, outdir = ""):
x_max = 10000.0
x_min = 0.0
y_max = 1.05
y_min = -0.05
figure, plot_one = pyplot.subplots()
plot_one.set_xlabel("Function call duration (μs)")
plot_one.set_ylabel("Cumulative Percentages of Total Function Duration", color = 'b')
plot_one.set_xscale("log")
plot_one.set_ylim(y_min, y_max)
plot_one.plot(xs, ys2, "b.")
plot_two = plot_one.twinx()
plot_two.set_ylabel("Cumulative Percentage of Function Duration", color = 'r')
plot_two.set_xscale("log")
plot_two.set_ylim(y_min, y_max)
plot_two.plot(xs, ys, "r.")
if show: pyplot.show()
return pyplot
if __name__ == "__main__":
main(sys.argv[1:]) | mit | 8,720,110,406,313,218,000 | 26.574803 | 94 | 0.581748 | false |
PayloadSecurity/VxAPI | tests/_requests_scenarios/overview/overview_refresh.py | 1 | 1235 | scenarios = [
{
"url": "/key/current",
"method": "get",
"status_code": 200,
"json": {"api_key": "111", "auth_level": 100, "auth_level_name": "default"},
"headers": {
"content-type": "application/json",
"webservice-version": "8.10",
"api-version": "2.2.0",
"api-limits": {"limits": {"minute": 5, "hour": 200}, "used": {"minute": 0, "hour": 0}, "limit_reached": False},
"submission-limits": {"total": {"used": {"hour": 0, "hour_unique": 0, "day": 0, "day_unique": 0, "week": 15, "week_unique": 0, "month": 25, "month_unique": 0, "omega": 302, "omega_unique": 125}, "quota": {"hour": 200, "month": 5000}, "available": {"hour": 200, "month": 4975}, "quota_reached": False}, "quota_reached": False}
}
},
{
"url": "/overview/test/refresh",
"method": "get",
"status_code": 200,
"json": {"pies": "to"},
"headers": {
"content-type": "application/json",
"webservice-version": "8.10",
"api-version": "2.2.0",
"api-limits": {"limits": {"minute": 5, "hour": 200}, "used": {"minute": 0, "hour": 0}, "limit_reached": False}
}
}
]
| gpl-3.0 | 4,935,258,597,403,407,000 | 44.740741 | 337 | 0.479352 | false |
google-research/deeplab2 | model/layers/convolutions.py | 1 | 24647 | # coding=utf-8
# Copyright 2021 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains wrapper classes for convolution layers of tf.keras and Switchable Atrous Convolution.
Switchable Atrous Convolution (SAC) is convolution with a switchable atrous
rate. It also has optional pre- and post-global context layers.
[1] Siyuan Qiao, Liang-Chieh Chen, Alan Yuille. DetectoRS: Detecting Objects
with Recursive Feature Pyramid and Switchable Atrous Convolution.
arXiv:2006.02334
"""
import functools
from typing import Optional
from absl import logging
import tensorflow as tf
from deeplab2.model import utils
from deeplab2.model.layers import activations
def _compute_padding_size(kernel_size, atrous_rate):
kernel_size_effective = kernel_size + (kernel_size - 1) * (atrous_rate - 1)
pad_total = kernel_size_effective - 1
pad_begin = pad_total // 2
pad_end = pad_total - pad_begin
if pad_begin != pad_end:
logging.warn('Convolution requires one more padding to the '
'bottom-right pixel. This may cause misalignment.')
return (pad_begin, pad_end)
class GlobalContext(tf.keras.layers.Layer):
"""Class for the global context modules in Switchable Atrous Convolution."""
def build(self, input_shape):
super().build(input_shape)
input_shape = tf.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
self.global_average_pooling = tf.keras.layers.GlobalAveragePooling2D()
self.convolution = tf.keras.layers.Conv2D(
input_channel, 1, strides=1, padding='same', name=self.name + '_conv',
kernel_initializer='zeros', bias_initializer='zeros')
def call(self, inputs, *args, **kwargs):
outputs = self.global_average_pooling(inputs)
outputs = tf.expand_dims(outputs, axis=1)
outputs = tf.expand_dims(outputs, axis=1)
outputs = self.convolution(outputs)
return inputs + outputs
def _get_input_channel(self, input_shape):
# Reference: tf.keras.layers.convolutional.Conv.
if input_shape.dims[-1].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
return int(input_shape[-1])
class SwitchableAtrousConvolution(tf.keras.layers.Conv2D):
"""Class for the Switchable Atrous Convolution."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._average_pool = tf.keras.layers.AveragePooling2D(
pool_size=(5, 5), strides=1, padding='same')
self._switch = tf.keras.layers.Conv2D(
1,
kernel_size=1,
strides=self.strides,
padding='same',
dilation_rate=1,
name='switch',
kernel_initializer='zeros',
bias_initializer='zeros')
def build(self, input_shape):
super().build(input_shape)
if self.padding == 'causal':
tf_padding = 'VALID'
elif isinstance(self.padding, str):
tf_padding = self.padding.upper()
else:
tf_padding = self.padding
large_dilation_rate = list(self.dilation_rate)
large_dilation_rate = [r * 3 for r in large_dilation_rate]
self._large_convolution_op = functools.partial(
tf.nn.convolution,
strides=list(self.strides),
padding=tf_padding,
dilations=large_dilation_rate,
data_format=self._tf_data_format,
name=self.__class__.__name__ + '_large')
def call(self, inputs):
# Reference: tf.keras.layers.convolutional.Conv.
input_shape = inputs.shape
switches = self._switch(self._average_pool(inputs))
if self._is_causal: # Apply causal padding to inputs for Conv1D.
inputs = tf.compat.v1.pad(inputs, self._compute_causal_padding(inputs))
outputs = self._convolution_op(inputs, self.kernel)
outputs_large = self._large_convolution_op(inputs, self.kernel)
outputs = switches * outputs_large + (1 - switches) * outputs
if self.use_bias:
outputs = tf.nn.bias_add(
outputs, self.bias, data_format=self._tf_data_format)
if not tf.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(input_shape)
outputs.set_shape(out_shape)
if self.activation is not None:
return self.activation(outputs)
return outputs
def squeeze_batch_dims(self, inp, op, inner_rank):
# Reference: tf.keras.utils.conv_utils.squeeze_batch_dims.
with tf.name_scope('squeeze_batch_dims'):
shape = inp.shape
inner_shape = shape[-inner_rank:]
if not inner_shape.is_fully_defined():
inner_shape = tf.compat.v1.shape(inp)[-inner_rank:]
batch_shape = shape[:-inner_rank]
if not batch_shape.is_fully_defined():
batch_shape = tf.compat.v1.shape(inp)[:-inner_rank]
if isinstance(inner_shape, tf.TensorShape):
inp_reshaped = tf.reshape(inp, [-1] + inner_shape.as_list())
else:
inp_reshaped = tf.reshape(
inp, tf.concat(([-1], inner_shape), axis=-1))
out_reshaped = op(inp_reshaped)
out_inner_shape = out_reshaped.shape[-inner_rank:]
if not out_inner_shape.is_fully_defined():
out_inner_shape = tf.compat.v1.shape(out_reshaped)[-inner_rank:]
out = tf.reshape(
out_reshaped, tf.concat((batch_shape, out_inner_shape), axis=-1))
out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])
return out
class Conv2DSame(tf.keras.layers.Layer):
"""A wrapper class for a 2D convolution with 'same' padding.
In contrast to tf.keras.layers.Conv2D, this layer aligns the kernel with the
top-left corner rather than the bottom-right corner. Optionally, a batch
normalization and an activation can be added on top.
"""
def __init__(
self,
output_channels: int,
kernel_size: int,
name: str,
strides: int = 1,
atrous_rate: int = 1,
use_bias: bool = True,
use_bn: bool = False,
bn_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization,
bn_gamma_initializer: str = 'ones',
activation: Optional[str] = None,
use_switchable_atrous_conv: bool = False,
use_global_context_in_sac: bool = False,
conv_kernel_weight_decay: float = 0.0):
"""Initializes convolution with zero padding aligned to the top-left corner.
DeepLab aligns zero padding differently to tf.keras 'same' padding.
Considering a convolution with a 7x7 kernel, a stride of 2 and an even input
size, tf.keras 'same' padding will add 2 zero padding to the top-left and 3
zero padding to the bottom-right. However, for consistent feature alignment,
DeepLab requires an equal padding of 3 in all directions. This behavior is
consistent with e.g. the ResNet 'stem' block.
Args:
output_channels: An integer specifying the number of filters of the
convolution.
kernel_size: An integer specifying the size of the convolution kernel.
name: A string specifying the name of this layer.
strides: An optional integer or tuple of integers specifying the size of
the strides (default: 1).
atrous_rate: An optional integer or tuple of integers specifying the
atrous rate of the convolution (default: 1).
use_bias: An optional flag specifying whether bias should be added for the
convolution.
use_bn: An optional flag specifying whether batch normalization should be
added after the convolution (default: False).
bn_layer: An optional tf.keras.layers.Layer that computes the
normalization (default: tf.keras.layers.BatchNormalization).
bn_gamma_initializer: An initializer for the batch norm gamma weight.
activation: An optional flag specifying an activation function to be added
after the convolution.
use_switchable_atrous_conv: Boolean, whether the layer uses switchable
atrous convolution.
use_global_context_in_sac: Boolean, whether the switchable atrous
convolution (SAC) uses pre- and post-global context.
conv_kernel_weight_decay: A float, the weight decay for convolution
kernels.
Raises:
ValueError: If use_bias and use_bn in the convolution.
"""
super(Conv2DSame, self).__init__(name=name)
if use_bn and use_bias:
raise ValueError('Conv2DSame is using convolution bias with batch_norm.')
if use_global_context_in_sac:
self._pre_global_context = GlobalContext(name='pre_global_context')
convolution_op = tf.keras.layers.Conv2D
convolution_padding = 'same'
if strides == 1 or strides == (1, 1):
if use_switchable_atrous_conv:
convolution_op = SwitchableAtrousConvolution
else:
padding = _compute_padding_size(kernel_size, atrous_rate)
self._zeropad = tf.keras.layers.ZeroPadding2D(
padding=(padding, padding), name='zeropad')
convolution_padding = 'valid'
self._conv = convolution_op(
output_channels,
kernel_size,
strides=strides,
padding=convolution_padding,
use_bias=use_bias,
dilation_rate=atrous_rate,
name='conv',
kernel_initializer='he_normal',
kernel_regularizer=tf.keras.regularizers.l2(
conv_kernel_weight_decay))
if use_global_context_in_sac:
self._post_global_context = GlobalContext(name='post_global_context')
if use_bn:
self._batch_norm = bn_layer(axis=3, name='batch_norm',
gamma_initializer=bn_gamma_initializer)
self._activation_fn = None
if activation is not None:
self._activation_fn = activations.get_activation(activation)
self._use_global_context_in_sac = use_global_context_in_sac
self._strides = strides
self._use_bn = use_bn
def call(self, input_tensor, training=False):
"""Performs a forward pass.
Args:
input_tensor: An input tensor of type tf.Tensor with shape [batch, height,
width, channels].
training: A boolean flag indicating whether training behavior should be
used (default: False).
Returns:
The output tensor.
"""
x = input_tensor
if self._use_global_context_in_sac:
x = self._pre_global_context(x)
if not (self._strides == 1 or self._strides == (1, 1)):
x = self._zeropad(x)
x = self._conv(x)
if self._use_global_context_in_sac:
x = self._post_global_context(x)
if self._use_bn:
x = self._batch_norm(x, training=training)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class DepthwiseConv2DSame(tf.keras.layers.Layer):
"""A wrapper class for a 2D depthwise convolution.
In contrast to convolutions in tf.keras.layers.DepthwiseConv2D, this layers
aligns the kernel with the top-left corner rather than the bottom-right
corner. Optionally, a batch normalization and an activation can be added.
"""
def __init__(self,
kernel_size: int,
name: str,
strides: int = 1,
atrous_rate: int = 1,
use_bias: bool = True,
use_bn: bool = False,
bn_layer=tf.keras.layers.BatchNormalization,
activation: Optional[str] = None):
"""Initializes a 2D depthwise convolution.
Args:
kernel_size: An integer specifying the size of the convolution kernel.
name: A string specifying the name of this layer.
strides: An optional integer or tuple of integers specifying the size of
the strides (default: 1).
atrous_rate: An optional integer or tuple of integers specifying the
atrous rate of the convolution (default: 1).
use_bias: An optional flag specifying whether bias should be added for the
convolution.
use_bn: An optional flag specifying whether batch normalization should be
added after the convolution (default: False).
bn_layer: An optional tf.keras.layers.Layer that computes the
normalization (default: tf.keras.layers.BatchNormalization).
activation: An optional flag specifying an activation function to be added
after the convolution.
Raises:
ValueError: If use_bias and use_bn in the convolution.
"""
super(DepthwiseConv2DSame, self).__init__(name=name)
if use_bn and use_bias:
raise ValueError(
'DepthwiseConv2DSame is using convlution bias with batch_norm.')
if strides == 1 or strides == (1, 1):
convolution_padding = 'same'
else:
padding = _compute_padding_size(kernel_size, atrous_rate)
self._zeropad = tf.keras.layers.ZeroPadding2D(
padding=(padding, padding), name='zeropad')
convolution_padding = 'valid'
self._depthwise_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=strides,
padding=convolution_padding,
use_bias=use_bias,
dilation_rate=atrous_rate,
name='depthwise_conv')
if use_bn:
self._batch_norm = bn_layer(axis=3, name='batch_norm')
self._activation_fn = None
if activation is not None:
self._activation_fn = activations.get_activation(activation)
self._strides = strides
self._use_bn = use_bn
def call(self, input_tensor, training=False):
"""Performs a forward pass.
Args:
input_tensor: An input tensor of type tf.Tensor with shape [batch, height,
width, channels].
training: A boolean flag indicating whether training behavior should be
used (default: False).
Returns:
The output tensor.
"""
x = input_tensor
if not (self._strides == 1 or self._strides == (1, 1)):
x = self._zeropad(x)
x = self._depthwise_conv(x)
if self._use_bn:
x = self._batch_norm(x, training=training)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class SeparableConv2DSame(tf.keras.layers.Layer):
"""A wrapper class for a 2D separable convolution.
In contrast to convolutions in tf.keras.layers.SeparableConv2D, this layers
aligns the kernel with the top-left corner rather than the bottom-right
corner. Optionally, a batch normalization and an activation can be added.
"""
def __init__(
self,
output_channels: int,
kernel_size: int,
name: str,
strides: int = 1,
atrous_rate: int = 1,
use_bias: bool = True,
use_bn: bool = False,
bn_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization,
activation: Optional[str] = None):
"""Initializes a 2D separable convolution.
Args:
output_channels: An integer specifying the number of filters of the
convolution output.
kernel_size: An integer specifying the size of the convolution kernel.
name: A string specifying the name of this layer.
strides: An optional integer or tuple of integers specifying the size of
the strides (default: 1).
atrous_rate: An optional integer or tuple of integers specifying the
atrous rate of the convolution (default: 1).
use_bias: An optional flag specifying whether bias should be added for the
convolution.
use_bn: An optional flag specifying whether batch normalization should be
added after the convolution (default: False).
bn_layer: An optional tf.keras.layers.Layer that computes the
normalization (default: tf.keras.layers.BatchNormalization).
activation: An optional flag specifying an activation function to be added
after the convolution.
Raises:
ValueError: If use_bias and use_bn in the convolution.
"""
super(SeparableConv2DSame, self).__init__(name=name)
if use_bn and use_bias:
raise ValueError(
'SeparableConv2DSame is using convolution bias with batch_norm.')
self._depthwise = DepthwiseConv2DSame(
kernel_size=kernel_size,
name='depthwise',
strides=strides,
atrous_rate=atrous_rate,
use_bias=use_bias,
use_bn=use_bn,
bn_layer=bn_layer,
activation=activation)
self._pointwise = Conv2DSame(
output_channels=output_channels,
kernel_size=1,
name='pointwise',
strides=1,
atrous_rate=1,
use_bias=use_bias,
use_bn=use_bn,
bn_layer=bn_layer,
activation=activation)
def call(self, input_tensor, training=False):
"""Performs a forward pass.
Args:
input_tensor: An input tensor of type tf.Tensor with shape [batch, height,
width, channels].
training: A boolean flag indicating whether training behavior should be
used (default: False).
Returns:
The output tensor.
"""
x = self._depthwise(input_tensor, training=training)
return self._pointwise(x, training=training)
class StackedConv2DSame(tf.keras.layers.Layer):
"""Stacked Conv2DSame or SeparableConv2DSame.
This class sequentially stacks a given number of Conv2DSame layers or
SeparableConv2DSame layers.
"""
def __init__(
self,
num_layers: int,
conv_type: str,
output_channels: int,
kernel_size: int,
name: str,
strides: int = 1,
atrous_rate: int = 1,
use_bias: bool = True,
use_bn: bool = False,
bn_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization,
activation: Optional[str] = None):
"""Initializes a stack of convolutions.
Args:
num_layers: The number of convolutions to create.
conv_type: A string specifying the convolution type used in each block.
Must be one of 'standard_conv' or 'depthwise_separable_conv'.
output_channels: An integer specifying the number of filters of the
convolution output.
kernel_size: An integer specifying the size of the convolution kernel.
name: A string specifying the name of this layer.
strides: An optional integer or tuple of integers specifying the size of
the strides (default: 1).
atrous_rate: An optional integer or tuple of integers specifying the
atrous rate of the convolution (default: 1).
use_bias: An optional flag specifying whether bias should be added for the
convolution.
use_bn: An optional flag specifying whether batch normalization should be
added after the convolution (default: False).
bn_layer: An optional tf.keras.layers.Layer that computes the
normalization (default: tf.keras.layers.BatchNormalization).
activation: An optional flag specifying an activation function to be added
after the convolution.
Raises:
ValueError: An error occurs when conv_type is neither 'standard_conv'
nor 'depthwise_separable_conv'.
"""
super(StackedConv2DSame, self).__init__(name=name)
if conv_type == 'standard_conv':
convolution_op = Conv2DSame
elif conv_type == 'depthwise_separable_conv':
convolution_op = SeparableConv2DSame
else:
raise ValueError('Convolution %s not supported.' % conv_type)
for index in range(num_layers):
current_name = utils.get_conv_bn_act_current_name(index, use_bn,
activation)
utils.safe_setattr(self, current_name, convolution_op(
output_channels=output_channels,
kernel_size=kernel_size,
name=utils.get_layer_name(current_name),
strides=strides,
atrous_rate=atrous_rate,
use_bias=use_bias,
use_bn=use_bn,
bn_layer=bn_layer,
activation=activation))
self._num_layers = num_layers
self._use_bn = use_bn
self._activation = activation
def call(self, input_tensor, training=False):
"""Performs a forward pass.
Args:
input_tensor: An input tensor of type tf.Tensor with shape [batch, height,
width, channels].
training: A boolean flag indicating whether training behavior should be
used (default: False).
Returns:
The output tensor.
"""
x = input_tensor
for index in range(self._num_layers):
current_name = utils.get_conv_bn_act_current_name(index, self._use_bn,
self._activation)
x = getattr(self, current_name)(x, training=training)
return x
class Conv1D(tf.keras.layers.Layer):
"""A wrapper class for a 1D convolution with batch norm and activation.
Conv1D creates a convolution kernel that is convolved with the layer input
over a single spatial (or temporal) dimension to produce a tensor of outputs.
The input should always be 3D with shape [batch, length, channel], so
accordingly, the optional batch norm is done on axis=2.
In DeepLab, we use Conv1D only with kernel_size = 1 for dual path transformer
layers in MaX-DeepLab [1] architectures.
Reference:
[1] MaX-DeepLab: End-to-End Panoptic Segmentation with Mask Transformers,
CVPR 2021.
Huiyu Wang, Yukun Zhu, Hartwig Adam, Alan Yuille, Liang-Chieh Chen.
"""
def __init__(
self,
output_channels: int,
name: str,
use_bias: bool = True,
use_bn: bool = False,
bn_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization,
bn_gamma_initializer: str = 'ones',
activation: Optional[str] = None,
conv_kernel_weight_decay: float = 0.0,
kernel_initializer='he_normal',
kernel_size: int = 1,
padding: str = 'valid'):
"""Initializes a Conv1D.
Args:
output_channels: An integer specifying the number of filters of the
convolution.
name: A string specifying the name of this layer.
use_bias: An optional flag specifying whether bias should be added for the
convolution.
use_bn: An optional flag specifying whether batch normalization should be
added after the convolution (default: False).
bn_layer: An optional tf.keras.layers.Layer that computes the
normalization (default: tf.keras.layers.BatchNormalization).
bn_gamma_initializer: An initializer for the batch norm gamma weight.
activation: An optional flag specifying an activation function to be added
after the convolution.
conv_kernel_weight_decay: A float, the weight decay for convolution
kernels.
kernel_initializer: An initializer for the convolution kernel.
kernel_size: An integer specifying the size of the convolution kernel.
padding: An optional string specifying the padding to use. Must be either
'same' or 'valid' (default: 'valid').
Raises:
ValueError: If use_bias and use_bn in the convolution.
"""
super(Conv1D, self).__init__(name=name)
if use_bn and use_bias:
raise ValueError('Conv1D is using convlution bias with batch_norm.')
self._conv = tf.keras.layers.Conv1D(
output_channels,
kernel_size=kernel_size,
strides=1,
padding=padding,
use_bias=use_bias,
name='conv',
kernel_initializer=kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(
conv_kernel_weight_decay))
self._batch_norm = None
if use_bn:
# Batch norm uses axis=2 because the input is 3D with channel being the
# last dimension.
self._batch_norm = bn_layer(axis=2, name='batch_norm',
gamma_initializer=bn_gamma_initializer)
self._activation_fn = None
if activation is not None:
self._activation_fn = activations.get_activation(activation)
def call(self, input_tensor, training=False):
"""Performs a forward pass.
Args:
input_tensor: An input tensor of type tf.Tensor with shape [batch, length,
channels].
training: A boolean flag indicating whether training behavior should be
used (default: False).
Returns:
The output tensor.
"""
x = self._conv(input_tensor)
if self._batch_norm is not None:
x = self._batch_norm(x, training=training)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
| apache-2.0 | -6,441,769,110,858,099,000 | 36.007508 | 107 | 0.664178 | false |
mbatchkarov/dc_evaluation | eval/pipeline/feature_selectors.py | 1 | 7143 | import logging
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_selection import SelectKBest, chi2
import numpy as np
from sklearn.feature_selection.univariate_selection import _clean_nans
from discoutils.thesaurus_loader import Vectors
from eval.utils.misc import calculate_log_odds, update_dict_according_to_mask
__author__ = 'mmb28'
class VectorBackedSelectKBest(SelectKBest):
"""
An extention of sklearn's SelectKBest, which also contains a VectorStore. Feature selection is done
in two optional steps:
1: Remove all features that are not contained in the vector store
2: Remove any remaining low-scoring features to ensure a maximum of k features are left fit
Additionally, this class stores a vocabulary (like a vectorizer), which maps features to a corresponding columns
in the feature vector matrix. This is so that a FeatureVectorsCsvDumper can be placed after this object in a
pipeline.
Also, this object assumes its input is not a matrix X (as in SelectKBest), but a tuple (X, vocabulary). The
vocabulary is provided by ThesaurusVectorizer, which comes before this object in a pipeline and represents the
mapping of features to columns in X before any feature selection is done.
"""
def __init__(self, score_func=chi2, k='all', must_be_in_thesaurus=False, min_log_odds_score=0, **kwargs):
"""
:param min_log_odds_score: any feature with a log odds score between -min_log_odds_score and
min_log_odds_score will be removed. Assumes the classification problem is binary.
"""
if not score_func:
score_func = chi2
self.k = k
self.must_be_in_thesaurus = must_be_in_thesaurus
self.min_log_odds_score = min_log_odds_score
self.vocabulary_ = None
super(VectorBackedSelectKBest, self).__init__(score_func=score_func, k=k)
def fit(self, X, y, vector_source=None, clusters=None, **kwargs):
if vector_source is None and clusters is None and self.must_be_in_thesaurus:
logging.error('You requested feature selection based on vector presence '
'but did not provide a vector source.')
raise ValueError('sector source (vectors or clusters) required with must_be_in_thesaurus')
if self.must_be_in_thesaurus:
self.vector_source = vector_source if vector_source else set(clusters.index)
# Vectorizer also returns its vocabulary, store it and work with the rest
X, self.vocabulary_ = X
if self.k == 'all' or int(self.k) >= X.shape[1]:
# do not bother calculating feature informativeness if all features will be used anyway
self.scores_ = np.ones((X.shape[1],))
else:
super(VectorBackedSelectKBest, self).fit(X, y)
self.vectors_mask = self._zero_score_of_oot_feats() \
if self.must_be_in_thesaurus else np.ones(X.shape[1], dtype=bool)
self.log_odds_mask = self._zero_score_of_low_log_odds_features(X, y) \
if self.min_log_odds_score > 0 else np.ones(X.shape[1], dtype=bool);
return self
def transform(self, X):
# Vectorizer also returns its vocabulary, remove it
if self.vocabulary_:
return super(VectorBackedSelectKBest, self).transform(X[0]), self.vocabulary_
else:
# Sometimes the training set contain no features. We don't want this to break the experiment,
# so let is slide
logging.error('Empty vocabulary')
return X[0], self.vocabulary_
def _zero_score_of_oot_feats(self):
mask = np.ones(self.scores_.shape, dtype=bool)
for feature, index in self.vocabulary_.items():
if feature not in self.vector_source:
mask[index] = False
if np.count_nonzero(mask) == 0:
logging.error('Feature selector removed all features')
raise ValueError('Empty vocabulary')
return mask
def _zero_score_of_low_log_odds_features(self, X, y):
if self.min_log_odds_score <= 0:
# we don't want to use log odds score, return an all-true mask
return np.ones(X.shape[1])
if len(set(y)) != 2:
raise ValueError('Calculating a log odds score requires a binary classification task')
log_odds = calculate_log_odds(X, y)
return (log_odds > self.min_log_odds_score) | (log_odds < -self.min_log_odds_score)
def _get_support_mask(self):
k = self.k
chi2_scores = self.scores_
chi2_mask = np.ones(chi2_scores.shape, dtype=bool)
if k != 'all' and k < len(chi2_scores):
# we don't want all features to be kept, and the number we want is less than the number available
chi2_scores = _clean_nans(chi2_scores)
selected_indices = np.argsort(chi2_scores)[:k]
chi2_mask[selected_indices] = False
mask = chi2_mask & self.vectors_mask & self.log_odds_mask
logging.info('%d/%d features survived feature selection', np.count_nonzero(mask), len(mask))
# Only keep the scores of the features that survived. This array is used to check the
# input data shape at train and decode time matches. However, because the post-feature-selections
# vocabulary is passed back into the vectorizer, at decode time the input will likely be smaller. This is
# like doing feature selection in the vectorizer.
self.scores_ = self.scores_[mask]
self.log_odds_mask = self.log_odds_mask[mask]
self.vectors_mask = self.vectors_mask[mask]
self.vocabulary_ = update_dict_according_to_mask(self.vocabulary_, mask)
return mask
class MetadataStripper(BaseEstimator, TransformerMixin):
"""
The current implementation of ThesaurusVectorizer's fit() returns not just a data matrix, but also some
metadata (its vocabulary). This class is meant to sit in a pipeline behind the vectorizer to remove that
metadata, so that it doesn't break other items in the pipeline.
Currently several other pipeline elements can make use of this data ( VectorBackedSelectKBest and
FeatureVectorsCsvDumper). This class must come after these in a pipeline as they do not have any
defensive checks
"""
def fit(self, X, y, vector_source=None, strategy='linear', k=None, **kwargs):
matrix, self.voc = X # store voc, may be handy for for debugging
self.vector_source = vector_source
if isinstance(self.vector_source, Vectors):
# the vector source can be either a Thesaurus or Vectors. Both can provide nearest neighbours,
# but the latter needs this method to be called first
if not k:
k = 10
self.vector_source.init_sims([str(foo) for foo in self.voc.keys()],
strategy=strategy, n_neighbors=k)
return self
def transform(self, X, **kwargs):
# if X is a tuple, strip metadata, otherwise let it be
return X[0] if tuple(X) == X else X
| bsd-3-clause | -3,467,563,906,106,328,000 | 48.262069 | 117 | 0.659387 | false |
indro/t2c | apps/external_apps/django_extensions/management/modelviz.py | 2 | 8563 | #!/usr/bin/env python
"""Django model to DOT (Graphviz) converter
by Antonio Cavedoni <[email protected]>
Make sure your DJANGO_SETTINGS_MODULE is set to your project or
place this script in the same directory of the project and call
the script like this:
$ python modelviz.py [-h] [-a] [-d] [-g] [-i <model_names>] <app_label> ... <app_label> > <filename>.dot
$ dot <filename>.dot -Tpng -o <filename>.png
options:
-h, --help
show this help message and exit.
-a, --all_applications
show models from all applications.
-d, --disable_fields
don't show the class member fields.
-g, --group_models
draw an enclosing box around models from the same app.
-i, --include_models=User,Person,Car
only include selected models in graph.
"""
__version__ = "0.9"
__svnid__ = "$Id$"
__license__ = "Python"
__author__ = "Antonio Cavedoni <http://cavedoni.com/>"
__contributors__ = [
"Stefano J. Attardi <http://attardi.org/>",
"limodou <http://www.donews.net/limodou/>",
"Carlo C8E Miron",
"Andre Campos <[email protected]>",
"Justin Findlay <[email protected]>",
"Alexander Houben <[email protected]>",
"Bas van Oostveen <[email protected]>",
]
import getopt, sys
from django.core.management import setup_environ
try:
import settings
except ImportError:
pass
else:
setup_environ(settings)
from django.utils.safestring import mark_safe
from django.template import Template, Context
from django.db import models
from django.db.models import get_models
from django.db.models.fields.related import \
ForeignKey, OneToOneField, ManyToManyField
try:
from django.db.models.fields.generic import GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericRelation
head_template = """
digraph name {
fontname = "Helvetica"
fontsize = 8
node [
fontname = "Helvetica"
fontsize = 8
shape = "plaintext"
]
edge [
fontname = "Helvetica"
fontsize = 8
]
"""
body_template = """
{% if use_subgraph %}
subgraph {{ cluster_app_name }} {
label=<
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER"
><FONT FACE="Helvetica Bold" COLOR="Black" POINT-SIZE="12"
>{{ app_name }}</FONT></TD></TR>
</TABLE>
>
color=olivedrab4
style="rounded"
{% endif %}
{% for model in models %}
{{ model.app_name }}_{{ model.name }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ model.name }}</FONT></TD></TR>
{% if not disable_fields %}
{% for field in model.fields %}
<TR><TD ALIGN="LEFT" BORDER="0"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica Bold">{{ field.name }}</FONT
></TD>
<TD ALIGN="LEFT"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica Bold">{{ field.type }}</FONT
></TD></TR>
{% endfor %}
{% endif %}
</TABLE>
>]
{% endfor %}
{% if use_subgraph %}
}
{% endif %}
"""
rel_template = """
{% for model in models %}
{% for relation in model.relations %}
{% if relation.needs_node %}
{{ relation.target }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ relation.target }}</FONT></TD></TR>
</TABLE>
>]
{% endif %}
{{ model.app_name }}_{{ model.name }} -> {{ relation.target_app }}_{{ relation.target }}
[label="{{ relation.name }}"] {{ relation.arrows }};
{% endfor %}
{% endfor %}
"""
tail_template = """
}
"""
def generate_dot(app_labels, **kwargs):
disable_fields = kwargs.get('disable_fields', False)
include_models = kwargs.get('include_models', [])
all_applications = kwargs.get('all_applications', False)
use_subgraph = kwargs.get('group_models', False)
dot = head_template
apps = []
if all_applications:
apps = models.get_apps()
for app_label in app_labels:
app = models.get_app(app_label)
if not app in apps:
apps.append(app)
graphs = []
for app in apps:
graph = Context({
'name': '"%s"' % app.__name__,
'app_name': "%s" % '.'.join(app.__name__.split('.')[:-1]),
'cluster_app_name': "cluster_%s" % app.__name__.replace(".", "_"),
'disable_fields': disable_fields,
'use_subgraph': use_subgraph,
'models': []
})
for appmodel in get_models(app):
model = {
'app_name': app.__name__.replace(".", "_"),
'name': appmodel.__name__,
'fields': [],
'relations': []
}
# consider given model name ?
def consider(model_name):
return not include_models or model_name in include_models
if not consider(appmodel._meta.object_name):
continue
# model attributes
def add_attributes():
model['fields'].append({
'name': field.name,
'type': type(field).__name__,
'blank': field.blank
})
for field in appmodel._meta.fields:
add_attributes()
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
add_attributes()
# relations
def add_relation(extras=""):
_rel = {
'target_app': field.rel.to.__module__.replace('.','_'),
'target': field.rel.to.__name__,
'type': type(field).__name__,
'name': field.name,
'arrows': extras,
'needs_node': True
}
if _rel not in model['relations'] and consider(_rel['target']):
model['relations'].append(_rel)
for field in appmodel._meta.fields:
if isinstance(field, ForeignKey):
add_relation()
elif isinstance(field, OneToOneField):
add_relation('[arrowhead=none arrowtail=none]')
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
if isinstance(field, ManyToManyField):
add_relation('[arrowhead=normal arrowtail=normal]')
elif isinstance(field, GenericRelation):
add_relation(mark_safe('[style="dotted"] [arrowhead=normal arrowtail=normal]'))
graph['models'].append(model)
graphs.append(graph)
nodes = []
for graph in graphs:
nodes.extend([e['name'] for e in graph['models']])
for graph in graphs:
# don't draw duplication nodes because of relations
for model in graph['models']:
for relation in model['relations']:
if relation['target'] in nodes:
relation['needs_node'] = False
# render templates
t = Template(body_template)
dot += '\n' + t.render(graph)
for graph in graphs:
t = Template(rel_template)
dot += '\n' + t.render(graph)
dot += '\n' + tail_template
return dot
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hadgi:",
["help", "all_applications", "disable_fields", "group_models", "include_models="])
except getopt.GetoptError, error:
print __doc__
sys.exit(error)
kwargs = {}
for opt, arg in opts:
if opt in ("-h", "--help"):
print __doc__
sys.exit()
if opt in ("-a", "--all_applications"):
kwargs['all_applications'] = True
if opt in ("-d", "--disable_fields"):
kwargs['disable_fields'] = True
if opt in ("-g", "--group_models"):
kwargs['group_models'] = True
if opt in ("-i", "--include_models"):
kwargs['include_models'] = arg.split(',')
if not args and not kwargs.get('all_applications', False):
print __doc__
sys.exit()
print generate_dot(args, **kwargs)
if __name__ == "__main__":
main()
| mit | 1,218,517,478,839,711,700 | 29.47331 | 106 | 0.548523 | false |
mytliulei/DCNRobotInstallPackages | windows/win32/pygal-1.7.0/pygal_gen.py | 4 | 2328 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2014 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
import argparse
import pygal
parser = argparse.ArgumentParser(
description='Generate pygal chart in command line',
prog='pygal_gen')
parser.add_argument('-t', '--type', dest='type', default='Line',
choices=map(lambda x: x.__name__, pygal.CHARTS),
help='Kind of chart to generate')
parser.add_argument('-o', '--output', dest='filename', default='pygal_out.svg',
help='Filename to write the svg to')
parser.add_argument('-s', '--serie', dest='series', nargs='+', action='append',
help='Add a serie in the form (title val1 val2...)')
parser.add_argument('--version', action='version',
version='pygal %s' % pygal.__version__)
for key in pygal.config.CONFIG_ITEMS:
opt_name = key.name
val = key.value
opts = {}
if key.type == list:
opts['type'] = key.subtype
opts['nargs'] = '+'
else:
opts['type'] = key.type
if opts['type'] == bool:
del opts['type']
opts['action'] = 'store_true' if not val else 'store_false'
if val:
opt_name = 'no-' + opt_name
if key.name == 'interpolate':
opts['choices'] = list(pygal.interpolate.INTERPOLATIONS.keys())
parser.add_argument(
'--%s' % opt_name, dest=key.name, default=val, **opts)
config = parser.parse_args()
chart = getattr(pygal, config.type)(**vars(config))
for serie in config.series:
chart.add(serie[0], map(float, serie[1:]))
chart.render_to_file(config.filename)
| apache-2.0 | 6,623,995,898,823,301,000 | 33.731343 | 79 | 0.641599 | false |
gandreello/openthread | tests/scripts/thread-cert/Cert_9_2_08_PersistentDatasets.py | 3 | 7902 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import config
import node
COMMISSIONER = 1
LEADER = 2
ROUTER = 3
ED = 4
SED = 5
CHANNEL_INIT = 19
PANID_INIT = 0xface
LEADER_ACTIVE_TIMESTAMP = 10
COMMISSIONER_PENDING_CHANNEL = 20
COMMISSIONER_PENDING_PANID = 0xafce
MTDS = [ED, SED]
class Cert_9_2_8_PersistentDatasets(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,6):
self.nodes[i] = node.Node(i, (i in MTDS), simulator=self.simulator)
self.nodes[COMMISSIONER].set_active_dataset(LEADER_ACTIVE_TIMESTAMP, panid=PANID_INIT, channel=CHANNEL_INIT)
self.nodes[COMMISSIONER].set_mode('rsdn')
self.nodes[COMMISSIONER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[COMMISSIONER].enable_whitelist()
self.nodes[COMMISSIONER].set_router_selection_jitter(1)
self.nodes[LEADER].set_active_dataset(LEADER_ACTIVE_TIMESTAMP, panid=PANID_INIT, channel=CHANNEL_INIT)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[COMMISSIONER].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[SED].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER].set_active_dataset(LEADER_ACTIVE_TIMESTAMP, panid=PANID_INIT, channel=CHANNEL_INIT)
self.nodes[ROUTER].set_mode('rsdn')
self._setUpRouter()
self.nodes[ED].set_channel(CHANNEL_INIT)
self.nodes[ED].set_panid(PANID_INIT)
self.nodes[ED].set_mode('rsn')
self._setUpEd()
self.nodes[SED].set_channel(CHANNEL_INIT)
self.nodes[SED].set_panid(PANID_INIT)
self.nodes[SED].set_mode('s')
self._setUpSed()
def _setUpRouter(self):
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ROUTER].set_router_selection_jitter(1)
def _setUpEd(self):
self.nodes[ED].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED].enable_whitelist()
self.nodes[ED].set_timeout(config.DEFAULT_CHILD_TIMEOUT)
def _setUpSed(self):
self.nodes[SED].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[SED].enable_whitelist()
self.nodes[SED].set_timeout(config.DEFAULT_CHILD_TIMEOUT)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
node.destroy()
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[SED].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED].get_state(), 'child')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[COMMISSIONER].send_mgmt_pending_set(pending_timestamp=10,
active_timestamp=70,
delay_timer=60000,
channel=COMMISSIONER_PENDING_CHANNEL,
panid=COMMISSIONER_PENDING_PANID)
self.simulator.go(5)
self.nodes[ROUTER].reset()
self.nodes[ED].reset()
self.nodes[SED].reset()
self.simulator.go(60)
self.assertEqual(self.nodes[LEADER].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[COMMISSIONER].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[LEADER].get_channel(), COMMISSIONER_PENDING_CHANNEL)
self.assertEqual(self.nodes[COMMISSIONER].get_channel(), COMMISSIONER_PENDING_CHANNEL)
# reset the devices here again to simulate the fact that the devices were disabled the entire time
self.nodes[ROUTER].reset()
self._setUpRouter()
self.nodes[ROUTER].start()
self.nodes[ED].reset()
self._setUpEd()
self.nodes[ED].start()
self.nodes[SED].reset()
self._setUpSed()
self.nodes[SED].start()
self.assertEqual(self.nodes[ROUTER].get_panid(), PANID_INIT)
self.assertEqual(self.nodes[ED].get_panid(), PANID_INIT)
self.assertEqual(self.nodes[SED].get_panid(), PANID_INIT)
self.assertEqual(self.nodes[ROUTER].get_channel(), CHANNEL_INIT)
self.assertEqual(self.nodes[ED].get_channel(), CHANNEL_INIT)
self.assertEqual(self.nodes[SED].get_channel(), CHANNEL_INIT)
self.simulator.go(10)
self.assertEqual(self.nodes[ROUTER].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[ED].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[SED].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[ROUTER].get_channel(), COMMISSIONER_PENDING_CHANNEL)
self.assertEqual(self.nodes[ED].get_channel(), COMMISSIONER_PENDING_CHANNEL)
self.assertEqual(self.nodes[SED].get_channel(), COMMISSIONER_PENDING_CHANNEL)
self.simulator.go(5)
ipaddrs = self.nodes[ROUTER].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
self.assertTrue(self.nodes[LEADER].ping(ipaddr))
ipaddrs = self.nodes[ED].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
self.assertTrue(self.nodes[LEADER].ping(ipaddr))
ipaddrs = self.nodes[SED].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
self.assertTrue(self.nodes[LEADER].ping(ipaddr))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 2,855,262,090,080,147,500 | 39.111675 | 116 | 0.65553 | false |
ganeti-github-testing/ganeti-test-1 | lib/client/gnt_storage.py | 10 | 6269 | #
#
# Copyright (C) 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""External Storage related commands"""
# pylint: disable=W0401,W0613,W0614,C0103
# W0401: Wildcard import ganeti.cli
# W0613: Unused argument, since all functions follow the same API
# W0614: Unused import %s from wildcard import (since we need cli)
# C0103: Invalid name gnt-storage
from ganeti.cli import *
from ganeti import opcodes
from ganeti import utils
def ShowExtStorageInfo(opts, args):
"""List detailed information about ExtStorage providers.
@param opts: the command line options selected by the user
@type args: list
@param args: empty list or list of ExtStorage providers' names
@rtype: int
@return: the desired exit code
"""
op = opcodes.OpExtStorageDiagnose(output_fields=["name", "nodegroup_status",
"parameters"],
names=[])
result = SubmitOpCode(op, opts=opts)
if not result:
ToStderr("Can't get the ExtStorage providers list")
return 1
do_filter = bool(args)
for (name, nodegroup_data, parameters) in result:
if do_filter:
if name not in args:
continue
else:
args.remove(name)
nodegroups_valid = []
for nodegroup_name, nodegroup_status in nodegroup_data.iteritems():
if nodegroup_status:
nodegroups_valid.append(nodegroup_name)
ToStdout("%s:", name)
if nodegroups_valid != []:
ToStdout(" - Valid for nodegroups:")
for ndgrp in utils.NiceSort(nodegroups_valid):
ToStdout(" %s", ndgrp)
ToStdout(" - Supported parameters:")
for pname, pdesc in parameters:
ToStdout(" %s: %s", pname, pdesc)
else:
ToStdout(" - Invalid for all nodegroups")
ToStdout("")
if args:
for name in args:
ToStdout("%s: Not Found", name)
ToStdout("")
return 0
def _ExtStorageStatus(status, diagnose):
"""Beautifier function for ExtStorage status.
@type status: boolean
@param status: is the ExtStorage provider valid
@type diagnose: string
@param diagnose: the error message for invalid ExtStorages
@rtype: string
@return: a formatted status
"""
if status:
return "valid"
else:
return "invalid - %s" % diagnose
def DiagnoseExtStorage(opts, args):
"""Analyse all ExtStorage providers.
@param opts: the command line options selected by the user
@type args: list
@param args: should be an empty list
@rtype: int
@return: the desired exit code
"""
op = opcodes.OpExtStorageDiagnose(output_fields=["name", "node_status",
"nodegroup_status"],
names=[])
result = SubmitOpCode(op, opts=opts)
if not result:
ToStderr("Can't get the list of ExtStorage providers")
return 1
for provider_name, node_data, nodegroup_data in result:
nodes_valid = {}
nodes_bad = {}
nodegroups_valid = {}
nodegroups_bad = {}
# Per node diagnose
for node_name, node_info in node_data.iteritems():
if node_info: # at least one entry in the per-node list
(fo_path, fo_status, fo_msg, fo_params) = node_info.pop(0)
fo_msg = "%s (path: %s)" % (_ExtStorageStatus(fo_status, fo_msg),
fo_path)
if fo_params:
fo_msg += (" [parameters: %s]" %
utils.CommaJoin([v[0] for v in fo_params]))
else:
fo_msg += " [no parameters]"
if fo_status:
nodes_valid[node_name] = fo_msg
else:
nodes_bad[node_name] = fo_msg
else:
nodes_bad[node_name] = "ExtStorage provider not found"
# Per nodegroup diagnose
for nodegroup_name, nodegroup_status in nodegroup_data.iteritems():
status = nodegroup_status
if status:
nodegroups_valid[nodegroup_name] = "valid"
else:
nodegroups_bad[nodegroup_name] = "invalid"
def _OutputPerNodegroupStatus(msg_map):
map_k = utils.NiceSort(msg_map.keys())
for nodegroup in map_k:
ToStdout(" For nodegroup: %s --> %s", nodegroup,
msg_map[nodegroup])
def _OutputPerNodeStatus(msg_map):
map_k = utils.NiceSort(msg_map.keys())
for node_name in map_k:
ToStdout(" Node: %s, status: %s", node_name, msg_map[node_name])
# Print the output
st_msg = "Provider: %s" % provider_name
ToStdout(st_msg)
ToStdout("---")
_OutputPerNodeStatus(nodes_valid)
_OutputPerNodeStatus(nodes_bad)
ToStdout(" --")
_OutputPerNodegroupStatus(nodegroups_valid)
_OutputPerNodegroupStatus(nodegroups_bad)
ToStdout("")
return 0
commands = {
"diagnose": (
DiagnoseExtStorage, ARGS_NONE, [PRIORITY_OPT],
"", "Diagnose all ExtStorage providers"),
"info": (
ShowExtStorageInfo, [ArgOs()], [PRIORITY_OPT],
"", "Show info about ExtStorage providers"),
}
def Main():
return GenericMain(commands)
| bsd-2-clause | 4,203,276,629,836,977,700 | 29.580488 | 78 | 0.652736 | false |
InfinitiveOS/external_skia | platform_tools/android/tests/gyp_to_android_tests.py | 68 | 1434 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test gyp_to_android.py
"""
import os
import shutil
import sys
import tempfile
import test_variables
import unittest
# Path to gyp_to_android
sys.path.append(test_variables.BIN_DIR)
import gyp_to_android
class AndroidMkCreationTest(unittest.TestCase):
def setUp(self):
# Create a temporary directory for storing the output (Android.mk)
self.__tmp_dir = tempfile.mkdtemp()
def test_create(self):
gyp_to_android.main(self.__tmp_dir)
# Now there should be a file named 'Android.mk' inside __tmp_dir
path_to_android_mk = os.path.join(self.__tmp_dir,
test_variables.ANDROID_MK)
self.assertTrue(os.path.exists(path_to_android_mk))
# In addition, there should be an 'Android.mk' inside /tests/
path_to_tests_android_mk = os.path.join(self.__tmp_dir, 'tests',
test_variables.ANDROID_MK)
self.assertTrue(os.path.exists(path_to_tests_android_mk))
def tearDown(self):
# Remove self.__tmp_dir, which is no longer needed.
shutil.rmtree(self.__tmp_dir)
def main():
loader = unittest.TestLoader()
suite = loader.loadTestsFromTestCase(AndroidMkCreationTest)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
main()
| bsd-3-clause | 539,606,457,793,804,900 | 24.607143 | 72 | 0.671548 | false |
sorenk/ansible | lib/ansible/modules/storage/netapp/netapp_e_volume_copy.py | 14 | 16472 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_volume_copy
short_description: Create volume copy pairs
description:
- Create and delete snapshots images on volume groups for NetApp E-series storage arrays.
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
extends_documentation_fragment:
- netapp.eseries
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API, for example C(https://prod-1.wahoo.acme.com/devmgr/v2).
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
source_volume_id:
description:
- The id of the volume copy source.
- If used, must be paired with destination_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
destination_volume_id:
description:
- The id of the volume copy destination.
- If used, must be paired with source_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
volume_copy_pair_id:
description:
- The id of a given volume copy pair
- Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id
- Can use to delete or check presence of volume pairs
- Must specify this or (destination_volume_id and source_volume_id)
state:
description:
- Whether the specified volume copy pair should exist or not.
required: True
choices: ['present', 'absent']
create_copy_pair_if_does_not_exist:
description:
- Defines if a copy pair will be created if it does not exist.
- If set to True destination_volume_id and source_volume_id are required.
choices: [True, False]
default: True
start_stop_copy:
description:
- starts a re-copy or stops a copy in progress
- "Note: If you stop the initial file copy before it it done the copy pair will be destroyed"
- Requires volume_copy_pair_id
search_volume_id:
description:
- Searches for all valid potential target and source volumes that could be used in a copy_pair
- Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id
"""
RESULTS = """
"""
EXAMPLES = """
---
msg:
description: Success message
returned: success
type: string
sample: Json facts for the volume copy that was created.
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
sample: Created Volume Copy Pair with ID
"""
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.netapp import request
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
(rc, resp) = request(url, method='GET', url_username=params['api_username'],
url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
volume_copy_pair_id = None
for potential_copy_pair in resp:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
volume_copy_pair_id = potential_copy_pair['id']
return volume_copy_pair_id
def create_copy_pair(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
rData = {
"sourceId": params['source_volume_id'],
"targetId": params['destination_volume_id']
}
(rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def delete_copy_pair_by_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 204:
return False, (rc, resp)
else:
return True, (rc, resp)
def find_volume_copy_pair_id_by_volume_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def start_stop_copy(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % (
params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
return True, response_data[0]['percentComplete']
else:
return False, response_data
def check_copy_status(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
if response_data['percentComplete'] != -1:
return True, response_data['percentComplete']
else:
return False, response_data['percentComplete']
else:
return False, response_data
def find_valid_copy_pair_targets_and_sources(params):
get_status = 'storage-systems/%s/volumes' % params['ssid']
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
source_capacity = None
candidates = []
for volume in response_data:
if volume['id'] == params['search_volume_id']:
source_capacity = volume['capacity']
else:
candidates.append(volume)
potential_sources = []
potential_targets = []
for volume in candidates:
if volume['capacity'] > source_capacity:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_targets.append(volume['id'])
else:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_sources.append(volume['id'])
return potential_targets, potential_sources
else:
raise Exception("Response [%s]" % response_code)
def main():
module = AnsibleModule(argument_spec=dict(
source_volume_id=dict(type='str'),
destination_volume_id=dict(type='str'),
copy_priority=dict(required=False, default=0, type='int'),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
validate_certs=dict(required=False, default=True),
targetWriteProtected=dict(required=False, default=True, type='bool'),
onlineCopy=dict(required=False, default=False, type='bool'),
volume_copy_pair_id=dict(type='str'),
status=dict(required=True, choices=['present', 'absent'], type='str'),
create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'),
start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'),
search_volume_id=dict(type='str'),
),
mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'],
['volume_copy_pair_id', 'source_volume_id'],
['volume_copy_pair_id', 'search_volume_id'],
['search_volume_id', 'destination_volume_id'],
['search_volume_id', 'source_volume_id'],
],
required_together=[['source_volume_id', 'destination_volume_id'],
],
required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ],
["start_stop_copy", 'stop', ['volume_copy_pair_id'], ],
["start_stop_copy", 'start', ['volume_copy_pair_id'], ],
]
)
params = module.params
if not params['api_url'].endswith('/'):
params['api_url'] += '/'
# Check if we want to search
if params['search_volume_id'] is not None:
try:
potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params)
except Exception as e:
module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % to_native(e))
module.exit_json(changed=False,
msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)),
search_volume_id=params['search_volume_id'],
valid_targets=potential_targets,
valid_sources=potential_sources)
# Check if we want to start or stop a copy operation
if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop':
# Get the current status info
currenty_running, status_info = check_copy_status(params)
# If we want to start
if params['start_stop_copy'] == 'start':
# If we have already started
if currenty_running is True:
module.exit_json(changed=False, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info)
# If we need to start
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info)
else:
module.fail_json(msg="Could not start volume copy pair Error: %s" % info)
# If we want to stop
else:
# If it has already stopped
if currenty_running is False:
module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
# If we need to stop it
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not stop volume copy pair Error: %s" % info)
# If we want the copy pair to exist we do this stuff
if params['status'] == 'present':
# We need to check if it exists first
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
# If no volume copy pair is found we need need to make it.
if params['volume_copy_pair_id'] is None:
# In order to create we can not do so with just a volume_copy_pair_id
copy_began_status, (rc, resp) = create_copy_pair(params)
if copy_began_status is True:
module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id'])
else:
module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp))
# If it does exist we do nothing
else:
# We verify that it exists
exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id(
params)
if exist_status:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id'])
else:
if exist_status_code == 404:
module.fail_json(
msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' %
params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % (
exist_status_code, exist_status_data))
module.fail_json(msg="Done")
# If we want it to not exist we do this
else:
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
# We delete it by the volume_copy_pair_id
delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params)
if delete_status is True:
module.exit_json(changed=True,
msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id'])
else:
if delete_status_code == 404:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % (
delete_status_code, delete_status_data))
if __name__ == '__main__':
main()
| gpl-3.0 | 5,547,773,659,108,487,000 | 40.18 | 145 | 0.580561 | false |
franosincic/edx-platform | cms/djangoapps/contentstore/tests/test_contentstore.py | 15 | 93797 | # -*- coding: utf-8 -*-
import copy
import mock
from mock import patch
import shutil
import lxml.html
from lxml import etree
import ddt
from datetime import timedelta
from fs.osfs import OSFS
from json import loads
from path import Path as path
from textwrap import dedent
from uuid import uuid4
from functools import wraps
from unittest import SkipTest
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from openedx.core.lib.tempdir import mkdtemp_clean
from common.test.utils import XssTestMixin
from contentstore.tests.utils import parse_json, AjaxEnabledTestClient, CourseTestCase
from contentstore.views.component import ADVANCED_COMPONENT_TYPES
from edxval.api import create_video, get_videos_for_course
from xmodule.contentstore.django import contentstore
from xmodule.contentstore.utils import restore_asset_from_trashcan, empty_asset_trashcan
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.inheritance import own_metadata
from opaque_keys.edx.keys import UsageKey, CourseKey
from opaque_keys.edx.locations import AssetLocation, CourseLocator
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, LibraryFactory, check_mongo_calls
from xmodule.modulestore.xml_exporter import export_course_to_xml
from xmodule.modulestore.xml_importer import import_course_from_xml, perform_xlint
from xmodule.capa_module import CapaDescriptor
from xmodule.course_module import CourseDescriptor, Textbook
from xmodule.seq_module import SequenceDescriptor
from contentstore.utils import delete_course_and_groups, reverse_url, reverse_course_url
from django_comment_common.utils import are_permissions_roles_seeded
from student import auth
from student.models import CourseEnrollment
from student.roles import CourseCreatorRole, CourseInstructorRole
from opaque_keys import InvalidKeyError
from contentstore.tests.utils import get_url
from course_action_state.models import CourseRerunState, CourseRerunUIStateManager
from course_action_state.managers import CourseActionStateItemNotFoundError
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore.django import modulestore
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
def requires_pillow_jpeg(func):
"""
A decorator to indicate that the function requires JPEG support for Pillow,
otherwise it cannot be run
"""
@wraps(func)
def decorated_func(*args, **kwargs):
"""
Execute the function if we have JPEG support in Pillow.
"""
try:
from PIL import Image
except ImportError:
raise SkipTest("Pillow is not installed (or not found)")
if not getattr(Image.core, "jpeg_decoder", False):
raise SkipTest("Pillow cannot open JPEG files")
return func(*args, **kwargs)
return decorated_func
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class ContentStoreTestCase(CourseTestCase):
"""
Base class for Content Store Test Cases
"""
class ImportRequiredTestCases(ContentStoreTestCase):
"""
Tests which legitimately need to import a course
"""
def test_no_static_link_rewrites_on_import(self):
course_items = import_course_from_xml(
self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True
)
course = course_items[0]
handouts_usage_key = course.id.make_usage_key('course_info', 'handouts')
handouts = self.store.get_item(handouts_usage_key)
self.assertIn('/static/', handouts.data)
handouts_usage_key = course.id.make_usage_key('html', 'toyhtml')
handouts = self.store.get_item(handouts_usage_key)
self.assertIn('/static/', handouts.data)
def test_xlint_fails(self):
err_cnt = perform_xlint(TEST_DATA_DIR, ['toy'])
self.assertGreater(err_cnt, 0)
def test_invalid_asset_overwrite(self):
"""
Tests that an asset with invalid displayname can be overwritten if multiple assets have same displayname.
It Verifies that:
During import, if ('/') or ('\') is present in displayname of an asset, it is replaced with underscores '_'.
Export does not fail when an asset has '/' in its displayname. If the converted display matches with
any other asset, then it will be replaced.
Asset name in XML: "/invalid\\displayname/subs-esLhHcdKGWvKs.srt"
"""
content_store = contentstore()
expected_displayname = '_invalid_displayname_subs-esLhHcdKGWvKs.srt'
import_course_from_xml(
self.store,
self.user.id,
TEST_DATA_DIR,
['import_draft_order'],
static_content_store=content_store,
verbose=True,
create_if_not_present=True
)
# Verify the course has imported successfully
course = self.store.get_course(self.store.make_course_key(
'test_org',
'import_draft_order',
'import_draft_order'
))
self.assertIsNotNone(course)
# Add a new asset in the course, and make sure to name it such that it overwrite the one existing
# asset in the course. (i.e. _invalid_displayname_subs-esLhHcdKGWvKs.srt)
asset_key = course.id.make_asset_key('asset', 'sample_asset.srt')
content = StaticContent(
asset_key, expected_displayname, 'application/text', 'test',
)
content_store.save(content)
# Get & verify that course actually has two assets
assets, count = content_store.get_all_content_for_course(course.id)
self.assertEqual(count, 2)
# Verify both assets have similar `displayname` after saving.
for asset in assets:
self.assertEquals(asset['displayname'], expected_displayname)
# Test course export does not fail
root_dir = path(mkdtemp_clean())
print 'Exporting to tempdir = {0}'.format(root_dir)
export_course_to_xml(self.store, content_store, course.id, root_dir, 'test_export')
filesystem = OSFS(root_dir / 'test_export/static')
exported_static_files = filesystem.listdir()
# Verify that asset have been overwritten during export.
self.assertEqual(len(exported_static_files), 1)
self.assertTrue(filesystem.exists(expected_displayname))
self.assertEqual(exported_static_files[0], expected_displayname)
# Remove exported course
shutil.rmtree(root_dir)
def test_about_overrides(self):
'''
This test case verifies that a course can use specialized override for about data,
e.g. /about/Fall_2012/effort.html
while there is a base definition in /about/effort.html
'''
course_items = import_course_from_xml(
self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True
)
course_key = course_items[0].id
effort = self.store.get_item(course_key.make_usage_key('about', 'effort'))
self.assertEqual(effort.data, '6 hours')
# this one should be in a non-override folder
effort = self.store.get_item(course_key.make_usage_key('about', 'end_date'))
self.assertEqual(effort.data, 'TBD')
@requires_pillow_jpeg
def test_asset_import(self):
'''
This test validates that an image asset is imported and a thumbnail was generated for a .gif
'''
content_store = contentstore()
import_course_from_xml(
self.store, self.user.id, TEST_DATA_DIR, ['toy'], static_content_store=content_store, verbose=True,
create_if_not_present=True
)
course = self.store.get_course(self.store.make_course_key('edX', 'toy', '2012_Fall'))
self.assertIsNotNone(course)
# make sure we have some assets in our contentstore
all_assets, __ = content_store.get_all_content_for_course(course.id)
self.assertGreater(len(all_assets), 0)
# make sure we have some thumbnails in our contentstore
all_thumbnails = content_store.get_all_content_thumbnails_for_course(course.id)
self.assertGreater(len(all_thumbnails), 0)
location = AssetLocation.from_deprecated_string('/c4x/edX/toy/asset/just_a_test.jpg')
content = content_store.find(location)
self.assertIsNotNone(content)
self.assertIsNotNone(content.thumbnail_location)
thumbnail = content_store.find(content.thumbnail_location)
self.assertIsNotNone(thumbnail)
def test_course_info_updates_import_export(self):
"""
Test that course info updates are imported and exported with all content fields ('data', 'items')
"""
content_store = contentstore()
data_dir = TEST_DATA_DIR
courses = import_course_from_xml(
self.store, self.user.id, data_dir, ['course_info_updates'],
static_content_store=content_store, verbose=True, create_if_not_present=True
)
course = courses[0]
self.assertIsNotNone(course)
course_updates = self.store.get_item(course.id.make_usage_key('course_info', 'updates'))
self.assertIsNotNone(course_updates)
# check that course which is imported has files 'updates.html' and 'updates.items.json'
filesystem = OSFS(data_dir + '/course_info_updates/info')
self.assertTrue(filesystem.exists('updates.html'))
self.assertTrue(filesystem.exists('updates.items.json'))
# verify that course info update module has same data content as in data file from which it is imported
# check 'data' field content
with filesystem.open('updates.html', 'r') as course_policy:
on_disk = course_policy.read()
self.assertEqual(course_updates.data, on_disk)
# check 'items' field content
with filesystem.open('updates.items.json', 'r') as course_policy:
on_disk = loads(course_policy.read())
self.assertEqual(course_updates.items, on_disk)
# now export the course to a tempdir and test that it contains files 'updates.html' and 'updates.items.json'
# with same content as in course 'info' directory
root_dir = path(mkdtemp_clean())
print 'Exporting to tempdir = {0}'.format(root_dir)
export_course_to_xml(self.store, content_store, course.id, root_dir, 'test_export')
# check that exported course has files 'updates.html' and 'updates.items.json'
filesystem = OSFS(root_dir / 'test_export/info')
self.assertTrue(filesystem.exists('updates.html'))
self.assertTrue(filesystem.exists('updates.items.json'))
# verify that exported course has same data content as in course_info_update module
with filesystem.open('updates.html', 'r') as grading_policy:
on_disk = grading_policy.read()
self.assertEqual(on_disk, course_updates.data)
with filesystem.open('updates.items.json', 'r') as grading_policy:
on_disk = loads(grading_policy.read())
self.assertEqual(on_disk, course_updates.items)
def test_rewrite_nonportable_links_on_import(self):
content_store = contentstore()
import_course_from_xml(
self.store, self.user.id, TEST_DATA_DIR, ['toy'],
static_content_store=content_store, create_if_not_present=True
)
# first check a static asset link
course_key = self.store.make_course_key('edX', 'toy', 'run')
html_module_location = course_key.make_usage_key('html', 'nonportable')
html_module = self.store.get_item(html_module_location)
self.assertIn('/static/foo.jpg', html_module.data)
# then check a intra courseware link
html_module_location = course_key.make_usage_key('html', 'nonportable_link')
html_module = self.store.get_item(html_module_location)
self.assertIn('/jump_to_id/nonportable_link', html_module.data)
def verify_content_existence(self, store, root_dir, course_id, dirname, category_name, filename_suffix=''):
filesystem = OSFS(root_dir / 'test_export')
self.assertTrue(filesystem.exists(dirname))
items = store.get_items(course_id, qualifiers={'category': category_name})
for item in items:
filesystem = OSFS(root_dir / ('test_export/' + dirname))
self.assertTrue(filesystem.exists(item.location.name + filename_suffix))
@mock.patch('xmodule.course_module.requests.get')
def test_export_course_roundtrip(self, mock_get):
mock_get.return_value.text = dedent("""
<?xml version="1.0"?><table_of_contents>
<entry page="5" page_label="ii" name="Table of Contents"/>
</table_of_contents>
""").strip()
content_store = contentstore()
course_id = self.import_and_populate_course()
root_dir = path(mkdtemp_clean())
print 'Exporting to tempdir = {0}'.format(root_dir)
# export out to a tempdir
export_course_to_xml(self.store, content_store, course_id, root_dir, 'test_export')
# check for static tabs
self.verify_content_existence(self.store, root_dir, course_id, 'tabs', 'static_tab', '.html')
# check for about content
self.verify_content_existence(self.store, root_dir, course_id, 'about', 'about', '.html')
# assert that there is an html and video directory in drafts:
draft_dir = OSFS(root_dir / 'test_export/drafts')
self.assertTrue(draft_dir.exists('html'))
self.assertTrue(draft_dir.exists('video'))
# and assert that they contain the created modules
self.assertIn(self.DRAFT_HTML + ".xml", draft_dir.listdir('html'))
self.assertIn(self.DRAFT_VIDEO + ".xml", draft_dir.listdir('video'))
# and assert the child of the orphaned draft wasn't exported
self.assertNotIn(self.ORPHAN_DRAFT_HTML + ".xml", draft_dir.listdir('html'))
# check for grading_policy.json
filesystem = OSFS(root_dir / 'test_export/policies/2012_Fall')
self.assertTrue(filesystem.exists('grading_policy.json'))
course = self.store.get_course(course_id)
# compare what's on disk compared to what we have in our course
with filesystem.open('grading_policy.json', 'r') as grading_policy:
on_disk = loads(grading_policy.read())
self.assertEqual(on_disk, course.grading_policy)
# check for policy.json
self.assertTrue(filesystem.exists('policy.json'))
# compare what's on disk to what we have in the course module
with filesystem.open('policy.json', 'r') as course_policy:
on_disk = loads(course_policy.read())
self.assertIn('course/2012_Fall', on_disk)
self.assertEqual(on_disk['course/2012_Fall'], own_metadata(course))
# remove old course
self.store.delete_course(course_id, self.user.id)
# reimport over old course
self.check_import(root_dir, content_store, course_id)
# import to different course id
new_course_id = self.store.make_course_key('anotherX', 'anotherToy', 'Someday')
self.check_import(root_dir, content_store, new_course_id)
self.assertCoursesEqual(course_id, new_course_id)
shutil.rmtree(root_dir)
def check_import(self, root_dir, content_store, course_id):
"""Imports the course in root_dir into the given course_id and verifies its content"""
# reimport
import_course_from_xml(
self.store,
self.user.id,
root_dir,
['test_export'],
static_content_store=content_store,
target_id=course_id,
)
# verify content of the course
self.check_populated_course(course_id)
# verify additional export attributes
def verify_export_attrs_removed(attributes):
"""Verifies all temporary attributes added during export are removed"""
self.assertNotIn('index_in_children_list', attributes)
self.assertNotIn('parent_sequential_url', attributes)
self.assertNotIn('parent_url', attributes)
vertical = self.store.get_item(course_id.make_usage_key('vertical', self.TEST_VERTICAL))
verify_export_attrs_removed(vertical.xml_attributes)
for child in vertical.get_children():
verify_export_attrs_removed(child.xml_attributes)
if hasattr(child, 'data'):
verify_export_attrs_removed(child.data)
def test_export_course_with_metadata_only_video(self):
content_store = contentstore()
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True)
course_id = self.store.make_course_key('edX', 'toy', '2012_Fall')
# create a new video module and add it as a child to a vertical
# this re-creates a bug whereby since the video template doesn't have
# anything in 'data' field, the export was blowing up
verticals = self.store.get_items(course_id, qualifiers={'category': 'vertical'})
self.assertGreater(len(verticals), 0)
parent = verticals[0]
ItemFactory.create(parent_location=parent.location, category="video", display_name="untitled")
root_dir = path(mkdtemp_clean())
print 'Exporting to tempdir = {0}'.format(root_dir)
# export out to a tempdir
export_course_to_xml(self.store, content_store, course_id, root_dir, 'test_export')
shutil.rmtree(root_dir)
def test_export_course_with_metadata_only_word_cloud(self):
"""
Similar to `test_export_course_with_metadata_only_video`.
"""
content_store = contentstore()
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['word_cloud'], create_if_not_present=True)
course_id = self.store.make_course_key('HarvardX', 'ER22x', '2013_Spring')
verticals = self.store.get_items(course_id, qualifiers={'category': 'vertical'})
self.assertGreater(len(verticals), 0)
parent = verticals[0]
ItemFactory.create(parent_location=parent.location, category="word_cloud", display_name="untitled")
root_dir = path(mkdtemp_clean())
print 'Exporting to tempdir = {0}'.format(root_dir)
# export out to a tempdir
export_course_to_xml(self.store, content_store, course_id, root_dir, 'test_export')
shutil.rmtree(root_dir)
def test_import_after_renaming_xml_data(self):
"""
Test that import works fine on split mongo after renaming the blocks url.
"""
split_store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.split) # pylint: disable=W0212
import_course_from_xml(
split_store, self.user.id, TEST_DATA_DIR,
['course_before_rename'],
create_if_not_present=True
)
course_after_rename = import_course_from_xml(
split_store, self.user.id, TEST_DATA_DIR,
['course_after_rename'],
create_if_not_present=True
)
all_items = split_store.get_items(course_after_rename[0].id, qualifiers={'category': 'chapter'})
renamed_chapter = [item for item in all_items if item.location.block_id == 'renamed_chapter'][0]
self.assertIsNotNone(renamed_chapter.published_on)
self.assertIsNotNone(renamed_chapter.parent)
self.assertTrue(renamed_chapter.location in course_after_rename[0].children)
original_chapter = [item for item in all_items
if item.location.block_id == 'b9870b9af59841a49e6e02765d0e3bbf'][0]
self.assertIsNone(original_chapter.published_on)
self.assertIsNone(original_chapter.parent)
self.assertFalse(original_chapter.location in course_after_rename[0].children)
def test_empty_data_roundtrip(self):
"""
Test that an empty `data` field is preserved through
export/import.
"""
content_store = contentstore()
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True)
course_id = self.store.make_course_key('edX', 'toy', '2012_Fall')
verticals = self.store.get_items(course_id, qualifiers={'category': 'vertical'})
self.assertGreater(len(verticals), 0)
parent = verticals[0]
# Create a module, and ensure that its `data` field is empty
word_cloud = ItemFactory.create(parent_location=parent.location, category="word_cloud", display_name="untitled")
del word_cloud.data
self.assertEquals(word_cloud.data, '')
# Export the course
root_dir = path(mkdtemp_clean())
export_course_to_xml(self.store, content_store, course_id, root_dir, 'test_roundtrip')
# Reimport and get the video back
import_course_from_xml(self.store, self.user.id, root_dir)
imported_word_cloud = self.store.get_item(course_id.make_usage_key('word_cloud', 'untitled'))
# It should now contain empty data
self.assertEquals(imported_word_cloud.data, '')
def test_html_export_roundtrip(self):
"""
Test that a course which has HTML that has style formatting is preserved in export/import
"""
content_store = contentstore()
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True)
course_id = self.store.make_course_key('edX', 'toy', '2012_Fall')
# Export the course
root_dir = path(mkdtemp_clean())
export_course_to_xml(self.store, content_store, course_id, root_dir, 'test_roundtrip')
# Reimport and get the video back
import_course_from_xml(self.store, self.user.id, root_dir, create_if_not_present=True)
# get the sample HTML with styling information
html_module = self.store.get_item(course_id.make_usage_key('html', 'with_styling'))
self.assertIn('<p style="font:italic bold 72px/30px Georgia, serif; color: red; ">', html_module.data)
# get the sample HTML with just a simple <img> tag information
html_module = self.store.get_item(course_id.make_usage_key('html', 'just_img'))
self.assertIn('<img src="/static/foo_bar.jpg" />', html_module.data)
def test_export_course_without_content_store(self):
# Create toy course
course_items = import_course_from_xml(
self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True
)
course_id = course_items[0].id
root_dir = path(mkdtemp_clean())
print 'Exporting to tempdir = {0}'.format(root_dir)
export_course_to_xml(self.store, None, course_id, root_dir, 'test_export_no_content_store')
# Delete the course from module store and reimport it
self.store.delete_course(course_id, self.user.id)
import_course_from_xml(
self.store, self.user.id, root_dir, ['test_export_no_content_store'],
static_content_store=None,
target_id=course_id
)
# Verify reimported course
items = self.store.get_items(
course_id,
qualifiers={
'category': 'sequential',
'name': 'vertical_sequential',
}
)
self.assertEqual(len(items), 1)
def test_export_course_no_xml_attributes(self):
"""
Test that a module without an `xml_attributes` attr will still be
exported successfully
"""
content_store = contentstore()
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True)
course_id = self.store.make_course_key('edX', 'toy', '2012_Fall')
verticals = self.store.get_items(course_id, qualifiers={'category': 'vertical'})
vertical = verticals[0]
# create OpenAssessmentBlock:
open_assessment = ItemFactory.create(
parent_location=vertical.location,
category="openassessment",
display_name="untitled",
)
# convert it to draft
draft_open_assessment = self.store.convert_to_draft(
open_assessment.location, self.user.id
)
# note that it has no `xml_attributes` attribute
self.assertFalse(hasattr(draft_open_assessment, "xml_attributes"))
# export should still complete successfully
root_dir = path(mkdtemp_clean())
export_course_to_xml(
self.store,
content_store,
course_id,
root_dir,
'test_no_xml_attributes'
)
@ddt.ddt
class MiscCourseTests(ContentStoreTestCase):
"""
Tests that rely on the toy courses.
"""
def setUp(self):
super(MiscCourseTests, self).setUp()
# save locs not items b/c the items won't have the subsequently created children in them until refetched
self.chapter_loc = self.store.create_child(
self.user.id, self.course.location, 'chapter', 'test_chapter'
).location
self.seq_loc = self.store.create_child(
self.user.id, self.chapter_loc, 'sequential', 'test_seq'
).location
self.vert_loc = self.store.create_child(self.user.id, self.seq_loc, 'vertical', 'test_vert').location
# now create some things quasi like the toy course had
self.problem = self.store.create_child(
self.user.id, self.vert_loc, 'problem', 'test_problem', fields={
"data": "<problem>Test</problem>"
}
)
self.store.create_child(
self.user.id, self.vert_loc, 'video', fields={
"youtube_id_0_75": "JMD_ifUUfsU",
"youtube_id_1_0": "OEoXaMPEzfM",
"youtube_id_1_25": "AKqURZnYqpk",
"youtube_id_1_5": "DYpADpL7jAY",
"name": "sample_video",
}
)
self.store.create_child(
self.user.id, self.vert_loc, 'video', fields={
"youtube_id_0_75": "JMD_ifUUfsU",
"youtube_id_1_0": "OEoXaMPEzfM",
"youtube_id_1_25": "AKqURZnYqpk",
"youtube_id_1_5": "DYpADpL7jAY",
"name": "truncated_video",
"end_time": 10.0,
}
)
self.store.create_child(
self.user.id, self.vert_loc, 'poll_question', fields={
"name": "T1_changemind_poll_foo_2",
"display_name": "Change your answer",
"question": "Have you changed your mind?",
"answers": [{"id": "yes", "text": "Yes"}, {"id": "no", "text": "No"}],
}
)
self.course = self.store.publish(self.course.location, self.user.id)
def check_components_on_page(self, component_types, expected_types):
"""
Ensure that the right types end up on the page.
component_types is the list of advanced components.
expected_types is the list of elements that should appear on the page.
expected_types and component_types should be similar, but not
exactly the same -- for example, 'video' in
component_types should cause 'Video' to be present.
"""
self.course.advanced_modules = component_types
self.store.update_item(self.course, self.user.id)
# just pick one vertical
resp = self.client.get_html(get_url('container_handler', self.vert_loc))
self.assertEqual(resp.status_code, 200)
for expected in expected_types:
self.assertIn(expected, resp.content)
@ddt.data("<script>alert(1)</script>", "alert('hi')", "</script><script>alert(1)</script>")
def test_container_handler_xss_prevent(self, malicious_code):
"""
Test that XSS attack is prevented
"""
resp = self.client.get_html(get_url('container_handler', self.vert_loc) + '?action=' + malicious_code)
self.assertEqual(resp.status_code, 200)
# Test that malicious code does not appear in html
self.assertNotIn(malicious_code, resp.content)
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', [])
def test_advanced_components_in_edit_unit(self):
# This could be made better, but for now let's just assert that we see the advanced modules mentioned in the page
# response HTML
self.check_components_on_page(
ADVANCED_COMPONENT_TYPES,
['Word cloud', 'Annotation', 'Text Annotation', 'Video Annotation', 'Image Annotation',
'split_test'],
)
@ddt.data('/Fake/asset/displayname', '\\Fake\\asset\\displayname')
def test_export_on_invalid_displayname(self, invalid_displayname):
""" Tests that assets with invalid 'displayname' does not cause export to fail """
content_store = contentstore()
exported_asset_name = '_Fake_asset_displayname'
# Create an asset with slash `invalid_displayname` '
asset_key = self.course.id.make_asset_key('asset', "fake_asset.txt")
content = StaticContent(
asset_key, invalid_displayname, 'application/text', 'test',
)
content_store.save(content)
# Verify that the course has only one asset and it has been added with an invalid asset name.
assets, count = content_store.get_all_content_for_course(self.course.id)
self.assertEqual(count, 1)
display_name = assets[0]['displayname']
self.assertEqual(display_name, invalid_displayname)
# Now export the course to a tempdir and test that it contains assets. The export should pass
root_dir = path(mkdtemp_clean())
print 'Exporting to tempdir = {0}'.format(root_dir)
export_course_to_xml(self.store, content_store, self.course.id, root_dir, 'test_export')
filesystem = OSFS(root_dir / 'test_export/static')
exported_static_files = filesystem.listdir()
# Verify that only single asset has been exported with the expected asset name.
self.assertTrue(filesystem.exists(exported_asset_name))
self.assertEqual(len(exported_static_files), 1)
# Remove tempdir
shutil.rmtree(root_dir)
def test_assets_overwrite(self):
""" Tests that assets will similar 'displayname' will be overwritten during export """
content_store = contentstore()
asset_displayname = 'Fake_asset.txt'
# Create two assets with similar 'displayname'
for i in range(2):
asset_path = 'sample_asset_{}.txt'.format(i)
asset_key = self.course.id.make_asset_key('asset', asset_path)
content = StaticContent(
asset_key, asset_displayname, 'application/text', 'test',
)
content_store.save(content)
# Fetch & verify course assets to be equal to 2.
assets, count = content_store.get_all_content_for_course(self.course.id)
self.assertEqual(count, 2)
# Verify both assets have similar 'displayname' after saving.
for asset in assets:
self.assertEquals(asset['displayname'], asset_displayname)
# Now export the course to a tempdir and test that it contains assets.
root_dir = path(mkdtemp_clean())
print 'Exporting to tempdir = {0}'.format(root_dir)
export_course_to_xml(self.store, content_store, self.course.id, root_dir, 'test_export')
# Verify that asset have been overwritten during export.
filesystem = OSFS(root_dir / 'test_export/static')
exported_static_files = filesystem.listdir()
self.assertTrue(filesystem.exists(asset_displayname))
self.assertEqual(len(exported_static_files), 1)
# Remove tempdir
shutil.rmtree(root_dir)
def test_advanced_components_require_two_clicks(self):
self.check_components_on_page(['word_cloud'], ['Word cloud'])
def test_malformed_edit_unit_request(self):
# just pick one vertical
usage_key = self.course.id.make_usage_key('vertical', None)
resp = self.client.get_html(get_url('container_handler', usage_key))
self.assertEqual(resp.status_code, 400)
def test_edit_unit(self):
"""Verifies rendering the editor in all the verticals in the given test course"""
self._check_verticals([self.vert_loc])
def _get_draft_counts(self, item):
cnt = 1 if getattr(item, 'is_draft', False) else 0
for child in item.get_children():
cnt = cnt + self._get_draft_counts(child)
return cnt
def test_get_items(self):
'''
This verifies a bug we had where the None setting in get_items() meant 'wildcard'
Unfortunately, None = published for the revision field, so get_items() would return
both draft and non-draft copies.
'''
self.store.convert_to_draft(self.problem.location, self.user.id)
# Query get_items() and find the html item. This should just return back a single item (not 2).
direct_store_items = self.store.get_items(
self.course.id, revision=ModuleStoreEnum.RevisionOption.published_only
)
items_from_direct_store = [item for item in direct_store_items if item.location == self.problem.location]
self.assertEqual(len(items_from_direct_store), 1)
self.assertFalse(getattr(items_from_direct_store[0], 'is_draft', False))
# Fetch from the draft store.
draft_store_items = self.store.get_items(
self.course.id, revision=ModuleStoreEnum.RevisionOption.draft_only
)
items_from_draft_store = [item for item in draft_store_items if item.location == self.problem.location]
self.assertEqual(len(items_from_draft_store), 1)
# TODO the below won't work for split mongo
self.assertTrue(getattr(items_from_draft_store[0], 'is_draft', False))
def test_draft_metadata(self):
'''
This verifies a bug we had where inherited metadata was getting written to the
module as 'own-metadata' when publishing. Also verifies the metadata inheritance is
properly computed
'''
# refetch course so it has all the children correct
course = self.store.update_item(self.course, self.user.id)
course.graceperiod = timedelta(days=1, hours=5, minutes=59, seconds=59)
course = self.store.update_item(course, self.user.id)
problem = self.store.get_item(self.problem.location)
self.assertEqual(problem.graceperiod, course.graceperiod)
self.assertNotIn('graceperiod', own_metadata(problem))
self.store.convert_to_draft(problem.location, self.user.id)
# refetch to check metadata
problem = self.store.get_item(problem.location)
self.assertEqual(problem.graceperiod, course.graceperiod)
self.assertNotIn('graceperiod', own_metadata(problem))
# publish module
self.store.publish(problem.location, self.user.id)
# refetch to check metadata
problem = self.store.get_item(problem.location)
self.assertEqual(problem.graceperiod, course.graceperiod)
self.assertNotIn('graceperiod', own_metadata(problem))
# put back in draft and change metadata and see if it's now marked as 'own_metadata'
self.store.convert_to_draft(problem.location, self.user.id)
problem = self.store.get_item(problem.location)
new_graceperiod = timedelta(hours=1)
self.assertNotIn('graceperiod', own_metadata(problem))
problem.graceperiod = new_graceperiod
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
problem.save()
self.assertIn('graceperiod', own_metadata(problem))
self.assertEqual(problem.graceperiod, new_graceperiod)
self.store.update_item(problem, self.user.id)
# read back to make sure it reads as 'own-metadata'
problem = self.store.get_item(problem.location)
self.assertIn('graceperiod', own_metadata(problem))
self.assertEqual(problem.graceperiod, new_graceperiod)
# republish
self.store.publish(problem.location, self.user.id)
# and re-read and verify 'own-metadata'
self.store.convert_to_draft(problem.location, self.user.id)
problem = self.store.get_item(problem.location)
self.assertIn('graceperiod', own_metadata(problem))
self.assertEqual(problem.graceperiod, new_graceperiod)
def test_get_depth_with_drafts(self):
# make sure no draft items have been returned
num_drafts = self._get_draft_counts(self.course)
self.assertEqual(num_drafts, 0)
# put into draft
self.store.convert_to_draft(self.problem.location, self.user.id)
# make sure we can query that item and verify that it is a draft
draft_problem = self.store.get_item(self.problem.location)
self.assertTrue(getattr(draft_problem, 'is_draft', False))
# now requery with depth
course = self.store.get_course(self.course.id, depth=None)
# make sure just one draft item have been returned
num_drafts = self._get_draft_counts(course)
self.assertEqual(num_drafts, 1)
@mock.patch('xmodule.course_module.requests.get')
def test_import_textbook_as_content_element(self, mock_get):
mock_get.return_value.text = dedent("""
<?xml version="1.0"?><table_of_contents>
<entry page="5" page_label="ii" name="Table of Contents"/>
</table_of_contents>
""").strip()
self.course.textbooks = [Textbook("Textbook", "https://s3.amazonaws.com/edx-textbooks/guttag_computation_v3/")]
course = self.store.update_item(self.course, self.user.id)
self.assertGreater(len(course.textbooks), 0)
def test_import_polls(self):
items = self.store.get_items(self.course.id, qualifiers={'category': 'poll_question'})
self.assertTrue(len(items) > 0)
# check that there's actually content in the 'question' field
self.assertGreater(len(items[0].question), 0)
def test_module_preview_in_whitelist(self):
"""
Tests the ajax callback to render an XModule
"""
with override_settings(COURSES_WITH_UNSAFE_CODE=[unicode(self.course.id)]):
# also try a custom response which will trigger the 'is this course in whitelist' logic
resp = self.client.get_json(
get_url('xblock_view_handler', self.vert_loc, kwargs={'view_name': 'container_preview'})
)
self.assertEqual(resp.status_code, 200)
vertical = self.store.get_item(self.vert_loc)
for child in vertical.children:
self.assertContains(resp, unicode(child))
def test_delete(self):
# make sure the parent points to the child object which is to be deleted
# need to refetch chapter b/c at the time it was assigned it had no children
chapter = self.store.get_item(self.chapter_loc)
self.assertIn(self.seq_loc, chapter.children)
self.client.delete(get_url('xblock_handler', self.seq_loc))
with self.assertRaises(ItemNotFoundError):
self.store.get_item(self.seq_loc)
chapter = self.store.get_item(self.chapter_loc)
# make sure the parent no longer points to the child object which was deleted
self.assertNotIn(self.seq_loc, chapter.children)
def test_asset_delete_and_restore(self):
'''
This test will exercise the soft delete/restore functionality of the assets
'''
asset_key = self._delete_asset_in_course()
# now try to find it in store, but they should not be there any longer
content = contentstore().find(asset_key, throw_on_not_found=False)
self.assertIsNone(content)
# now try to find it and the thumbnail in trashcan - should be in there
content = contentstore('trashcan').find(asset_key, throw_on_not_found=False)
self.assertIsNotNone(content)
# let's restore the asset
restore_asset_from_trashcan(unicode(asset_key))
# now try to find it in courseware store, and they should be back after restore
content = contentstore('trashcan').find(asset_key, throw_on_not_found=False)
self.assertIsNotNone(content)
def _delete_asset_in_course(self):
"""
Helper method for:
1) importing course from xml
2) finding asset in course (verifying non-empty)
3) computing thumbnail location of asset
4) deleting the asset from the course
"""
asset_key = self.course.id.make_asset_key('asset', 'sample_static.txt')
content = StaticContent(
asset_key, "Fake asset", "application/text", "test",
)
contentstore().save(content)
# go through the website to do the delete, since the soft-delete logic is in the view
url = reverse_course_url(
'assets_handler',
self.course.id,
kwargs={'asset_key_string': unicode(asset_key)}
)
resp = self.client.delete(url)
self.assertEqual(resp.status_code, 204)
return asset_key
def test_empty_trashcan(self):
'''
This test will exercise the emptying of the asset trashcan
'''
self._delete_asset_in_course()
# make sure there's something in the trashcan
all_assets, __ = contentstore('trashcan').get_all_content_for_course(self.course.id)
self.assertGreater(len(all_assets), 0)
# empty the trashcan
empty_asset_trashcan([self.course.id])
# make sure trashcan is empty
all_assets, count = contentstore('trashcan').get_all_content_for_course(self.course.id)
self.assertEqual(len(all_assets), 0)
self.assertEqual(count, 0)
def test_illegal_draft_crud_ops(self):
# this test presumes old mongo and split_draft not full split
with self.assertRaises(InvalidVersionError):
self.store.convert_to_draft(self.chapter_loc, self.user.id)
chapter = self.store.get_item(self.chapter_loc)
chapter.data = 'chapter data'
self.store.update_item(chapter, self.user.id)
newobject = self.store.get_item(self.chapter_loc)
self.assertFalse(getattr(newobject, 'is_draft', False))
with self.assertRaises(InvalidVersionError):
self.store.unpublish(self.chapter_loc, self.user.id)
def test_bad_contentstore_request(self):
"""
Test that user get proper responses for urls with invalid url or
asset/course key
"""
resp = self.client.get_html('/c4x/CDX/123123/asset/&invalid.png')
self.assertEqual(resp.status_code, 400)
resp = self.client.get_html('/c4x/CDX/123123/asset/invalid.png')
self.assertEqual(resp.status_code, 404)
# Now test that 404 response is returned when user tries to access
# asset of some invalid course from split ModuleStore
with self.store.default_store(ModuleStoreEnum.Type.split):
resp = self.client.get_html('/c4x/InvalidOrg/InvalidCourse/asset/invalid.png')
self.assertEqual(resp.status_code, 404)
def test_delete_course(self):
"""
This test creates a course, makes a draft item, and deletes the course. This will also assert that the
draft content is also deleted
"""
# add an asset
asset_key = self.course.id.make_asset_key('asset', 'sample_static.txt')
content = StaticContent(
asset_key, "Fake asset", "application/text", "test",
)
contentstore().save(content)
assets, count = contentstore().get_all_content_for_course(self.course.id)
self.assertGreater(len(assets), 0)
self.assertGreater(count, 0)
self.store.convert_to_draft(self.vert_loc, self.user.id)
# delete the course
self.store.delete_course(self.course.id, self.user.id)
# assert that there's absolutely no non-draft modules in the course
# this should also include all draft items
items = self.store.get_items(self.course.id)
self.assertEqual(len(items), 0)
# assert that all content in the asset library is also deleted
assets, count = contentstore().get_all_content_for_course(self.course.id)
self.assertEqual(len(assets), 0)
self.assertEqual(count, 0)
def test_course_handouts_rewrites(self):
"""
Test that the xblock_handler rewrites static handout links
"""
handouts = self.store.create_item(
self.user.id, self.course.id, 'course_info', 'handouts', fields={
"data": "<a href='/static/handouts/sample_handout.txt'>Sample</a>",
}
)
# get module info (json)
resp = self.client.get(get_url('xblock_handler', handouts.location))
# make sure we got a successful response
self.assertEqual(resp.status_code, 200)
# check that /static/ has been converted to the full path
# note, we know the link it should be because that's what in the 'toy' course in the test data
asset_key = self.course.id.make_asset_key('asset', 'handouts_sample_handout.txt')
self.assertContains(resp, unicode(asset_key))
def test_prefetch_children(self):
# make sure we haven't done too many round trips to DB:
# 1) the course,
# 2 & 3) for the chapters and sequentials
# Because we're querying from the top of the tree, we cache information needed for inheritance,
# so we don't need to make an extra query to compute it.
# set the branch to 'publish' in order to prevent extra lookups of draft versions
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, self.course.id):
with check_mongo_calls(3):
course = self.store.get_course(self.course.id, depth=2)
# make sure we pre-fetched a known sequential which should be at depth=2
self.assertIn(self.seq_loc, course.system.module_data)
# make sure we don't have a specific vertical which should be at depth=3
self.assertNotIn(self.vert_loc, course.system.module_data)
# Now, test with the branch set to draft. No extra round trips b/c it doesn't go deep enough to get
# beyond direct only categories
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, self.course.id):
with check_mongo_calls(3):
self.store.get_course(self.course.id, depth=2)
def _check_verticals(self, locations):
""" Test getting the editing HTML for each vertical. """
# Assert is here to make sure that the course being tested actually has verticals (units) to check.
self.assertGreater(len(locations), 0)
for loc in locations:
resp = self.client.get_html(get_url('container_handler', loc))
self.assertEqual(resp.status_code, 200)
@ddt.ddt
class ContentStoreTest(ContentStoreTestCase, XssTestMixin):
"""
Tests for the CMS ContentStore application.
"""
def setUp(self):
super(ContentStoreTest, self).setUp()
self.course_data = {
'org': 'MITx',
'number': '111',
'display_name': 'Robot Super Course',
'run': '2013_Spring'
}
def assert_created_course(self, number_suffix=None):
"""
Checks that the course was created properly.
"""
test_course_data = {}
test_course_data.update(self.course_data)
if number_suffix:
test_course_data['number'] = '{0}_{1}'.format(test_course_data['number'], number_suffix)
course_key = _get_course_id(self.store, test_course_data)
_create_course(self, course_key, test_course_data)
# Verify that the creator is now registered in the course.
self.assertTrue(CourseEnrollment.is_enrolled(self.user, course_key))
return test_course_data
def assert_create_course_failed(self, error_message):
"""
Checks that the course not created.
"""
resp = self.client.ajax_post('/course/', self.course_data)
self.assertEqual(resp.status_code, 400)
data = parse_json(resp)
self.assertEqual(data['error'], error_message)
def test_create_course(self):
"""Test new course creation - happy path"""
self.assert_created_course()
@override_settings(DEFAULT_COURSE_LANGUAGE='hr')
def test_create_course_default_language(self):
"""Test new course creation and verify default language"""
test_course_data = self.assert_created_course()
course_id = _get_course_id(self.store, test_course_data)
course_module = self.store.get_course(course_id)
self.assertEquals(course_module.language, 'hr')
def test_create_course_with_dots(self):
"""Test new course creation with dots in the name"""
self.course_data['org'] = 'org.foo.bar'
self.course_data['number'] = 'course.number'
self.course_data['run'] = 'run.name'
self.assert_created_course()
def test_create_course_check_forum_seeding(self):
"""Test new course creation and verify forum seeding """
test_course_data = self.assert_created_course(number_suffix=uuid4().hex)
self.assertTrue(are_permissions_roles_seeded(_get_course_id(self.store, test_course_data)))
def test_forum_unseeding_on_delete(self):
"""Test new course creation and verify forum unseeding """
test_course_data = self.assert_created_course(number_suffix=uuid4().hex)
course_id = _get_course_id(self.store, test_course_data)
self.assertTrue(are_permissions_roles_seeded(course_id))
delete_course_and_groups(course_id, self.user.id)
# should raise an exception for checking permissions on deleted course
with self.assertRaises(ItemNotFoundError):
are_permissions_roles_seeded(course_id)
def test_forum_unseeding_with_multiple_courses(self):
"""Test new course creation and verify forum unseeding when there are multiple courses"""
test_course_data = self.assert_created_course(number_suffix=uuid4().hex)
second_course_data = self.assert_created_course(number_suffix=uuid4().hex)
# unseed the forums for the first course
course_id = _get_course_id(self.store, test_course_data)
delete_course_and_groups(course_id, self.user.id)
# should raise an exception for checking permissions on deleted course
with self.assertRaises(ItemNotFoundError):
are_permissions_roles_seeded(course_id)
second_course_id = _get_course_id(self.store, second_course_data)
# permissions should still be there for the other course
self.assertTrue(are_permissions_roles_seeded(second_course_id))
def test_course_enrollments_and_roles_on_delete(self):
"""
Test that course deletion doesn't remove course enrollments or user's roles
"""
test_course_data = self.assert_created_course(number_suffix=uuid4().hex)
course_id = _get_course_id(self.store, test_course_data)
# test that a user gets his enrollment and its 'student' role as default on creating a course
self.assertTrue(CourseEnrollment.is_enrolled(self.user, course_id))
self.assertTrue(self.user.roles.filter(name="Student", course_id=course_id))
delete_course_and_groups(course_id, self.user.id)
# check that user's enrollment for this course is not deleted
self.assertTrue(CourseEnrollment.is_enrolled(self.user, course_id))
# check that user has form role "Student" for this course even after deleting it
self.assertTrue(self.user.roles.filter(name="Student", course_id=course_id))
def test_course_access_groups_on_delete(self):
"""
Test that course deletion removes users from 'instructor' and 'staff' groups of this course
of all format e.g, 'instructor_edX/Course/Run', 'instructor_edX.Course.Run', 'instructor_Course'
"""
test_course_data = self.assert_created_course(number_suffix=uuid4().hex)
course_id = _get_course_id(self.store, test_course_data)
# Add user in possible groups and check that user in instructor groups of this course
instructor_role = CourseInstructorRole(course_id)
auth.add_users(self.user, instructor_role, self.user)
self.assertTrue(len(instructor_role.users_with_role()) > 0)
# Now delete course and check that user not in instructor groups of this course
delete_course_and_groups(course_id, self.user.id)
# Update our cached user since its roles have changed
self.user = User.objects.get_by_natural_key(self.user.natural_key()[0])
self.assertFalse(instructor_role.has_user(self.user))
self.assertEqual(len(instructor_role.users_with_role()), 0)
def test_create_course_after_delete(self):
"""
Test that course creation works after deleting a course with the same URL
"""
test_course_data = self.assert_created_course()
course_id = _get_course_id(self.store, test_course_data)
delete_course_and_groups(course_id, self.user.id)
self.assert_created_course()
def test_create_course_duplicate_course(self):
"""Test new course creation - error path"""
self.client.ajax_post('/course/', self.course_data)
self.assert_course_creation_failed('There is already a course defined with the same organization and course number. Please change either organization or course number to be unique.')
def assert_course_creation_failed(self, error_message):
"""
Checks that the course did not get created
"""
test_enrollment = False
try:
course_id = _get_course_id(self.store, self.course_data)
initially_enrolled = CourseEnrollment.is_enrolled(self.user, course_id)
test_enrollment = True
except InvalidKeyError:
# b/c the intent of the test with bad chars isn't to test auth but to test the handler, ignore
pass
resp = self.client.ajax_post('/course/', self.course_data)
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertRegexpMatches(data['ErrMsg'], error_message)
if test_enrollment:
# One test case involves trying to create the same course twice. Hence for that course,
# the user will be enrolled. In the other cases, initially_enrolled will be False.
self.assertEqual(initially_enrolled, CourseEnrollment.is_enrolled(self.user, course_id))
def test_create_course_duplicate_number(self):
"""Test new course creation - error path"""
self.client.ajax_post('/course/', self.course_data)
self.course_data['display_name'] = 'Robot Super Course Two'
self.course_data['run'] = '2013_Summer'
self.assert_course_creation_failed('There is already a course defined with the same organization and course number. Please change either organization or course number to be unique.')
def test_create_course_case_change(self):
"""Test new course creation - error path due to case insensitive name equality"""
self.course_data['number'] = 'capital'
self.client.ajax_post('/course/', self.course_data)
cache_current = self.course_data['org']
self.course_data['org'] = self.course_data['org'].lower()
self.assert_course_creation_failed('There is already a course defined with the same organization and course number. Please change either organization or course number to be unique.')
self.course_data['org'] = cache_current
self.client.ajax_post('/course/', self.course_data)
cache_current = self.course_data['number']
self.course_data['number'] = self.course_data['number'].upper()
self.assert_course_creation_failed('There is already a course defined with the same organization and course number. Please change either organization or course number to be unique.')
def test_course_substring(self):
"""
Test that a new course can be created whose name is a substring of an existing course
"""
self.client.ajax_post('/course/', self.course_data)
cache_current = self.course_data['number']
self.course_data['number'] = '{}a'.format(self.course_data['number'])
resp = self.client.ajax_post('/course/', self.course_data)
self.assertEqual(resp.status_code, 200)
self.course_data['number'] = cache_current
self.course_data['org'] = 'a{}'.format(self.course_data['org'])
resp = self.client.ajax_post('/course/', self.course_data)
self.assertEqual(resp.status_code, 200)
def test_create_course_with_bad_organization(self):
"""Test new course creation - error path for bad organization name"""
self.course_data['org'] = 'University of California, Berkeley'
self.assert_course_creation_failed(r"(?s)Unable to create course 'Robot Super Course'.*")
def test_create_course_with_course_creation_disabled_staff(self):
"""Test new course creation -- course creation disabled, but staff access."""
with mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_COURSE_CREATION': True}):
self.assert_created_course()
def test_create_course_with_course_creation_disabled_not_staff(self):
"""Test new course creation -- error path for course creation disabled, not staff access."""
with mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_COURSE_CREATION': True}):
self.user.is_staff = False
self.user.save()
self.assert_course_permission_denied()
def test_create_course_no_course_creators_staff(self):
"""Test new course creation -- course creation group enabled, staff, group is empty."""
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True}):
self.assert_created_course()
def test_create_course_no_course_creators_not_staff(self):
"""Test new course creation -- error path for course creator group enabled, not staff, group is empty."""
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
self.user.is_staff = False
self.user.save()
self.assert_course_permission_denied()
def test_create_course_with_course_creator(self):
"""Test new course creation -- use course creator group"""
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
auth.add_users(self.user, CourseCreatorRole(), self.user)
self.assert_created_course()
def test_create_course_with_unicode_in_id_disabled(self):
"""
Test new course creation with feature setting: ALLOW_UNICODE_COURSE_ID disabled.
"""
with mock.patch.dict('django.conf.settings.FEATURES', {'ALLOW_UNICODE_COURSE_ID': False}):
error_message = "Special characters not allowed in organization, course number, and course run."
self.course_data['org'] = u'��������������'
self.assert_create_course_failed(error_message)
self.course_data['number'] = u'��chantillon'
self.assert_create_course_failed(error_message)
self.course_data['run'] = u'����������'
self.assert_create_course_failed(error_message)
def assert_course_permission_denied(self):
"""
Checks that the course did not get created due to a PermissionError.
"""
resp = self.client.ajax_post('/course/', self.course_data)
self.assertEqual(resp.status_code, 403)
def test_course_index_view_with_no_courses(self):
"""Test viewing the index page with no courses"""
resp = self.client.get_html('/home/')
self.assertContains(
resp,
'<h1 class="page-header">Studio Home</h1>',
status_code=200,
html=True
)
def test_course_factory(self):
"""Test that the course factory works correctly."""
course = CourseFactory.create()
self.assertIsInstance(course, CourseDescriptor)
def test_item_factory(self):
"""Test that the item factory works correctly."""
course = CourseFactory.create()
item = ItemFactory.create(parent_location=course.location)
self.assertIsInstance(item, SequenceDescriptor)
def test_course_index_view_with_course(self):
"""Test viewing the index page with an existing course"""
CourseFactory.create(display_name='Robot Super Educational Course')
resp = self.client.get_html('/home/')
self.assertContains(
resp,
'<h3 class="course-title">Robot Super Educational Course</h3>',
status_code=200,
html=True
)
def test_course_index_view_xss(self):
"""Test that the index page correctly escapes course names with script
tags."""
CourseFactory.create(
display_name='<script>alert("course XSS")</script>'
)
LibraryFactory.create(display_name='<script>alert("library XSS")</script>')
resp = self.client.get_html('/home/')
for xss in ('course', 'library'):
html = '<script>alert("{name} XSS")</script>'.format(
name=xss
)
self.assert_no_xss(resp, html)
def test_course_overview_view_with_course(self):
"""Test viewing the course overview page with an existing course"""
course = CourseFactory.create()
resp = self._show_course_overview(course.id)
self.assertContains(
resp,
'<article class="outline outline-complex outline-course" data-locator="{locator}" data-course-key="{course_key}">'.format(
locator=unicode(course.location),
course_key=unicode(course.id),
),
status_code=200,
html=True
)
def test_create_item(self):
"""Test creating a new xblock instance."""
course = CourseFactory.create()
section_data = {
'parent_locator': unicode(course.location),
'category': 'chapter',
'display_name': 'Section One',
}
resp = self.client.ajax_post(reverse_url('xblock_handler'), section_data)
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
retarget = unicode(course.id.make_usage_key('chapter', 'REPLACE')).replace('REPLACE', r'([0-9]|[a-f]){3,}')
self.assertRegexpMatches(data['locator'], retarget)
def test_capa_module(self):
"""Test that a problem treats markdown specially."""
course = CourseFactory.create()
problem_data = {
'parent_locator': unicode(course.location),
'category': 'problem'
}
resp = self.client.ajax_post(reverse_url('xblock_handler'), problem_data)
self.assertEqual(resp.status_code, 200)
payload = parse_json(resp)
problem_loc = UsageKey.from_string(payload['locator'])
problem = self.store.get_item(problem_loc)
# should be a CapaDescriptor
self.assertIsInstance(problem, CapaDescriptor, "New problem is not a CapaDescriptor")
context = problem.get_context()
self.assertIn('markdown', context, "markdown is missing from context")
self.assertNotIn('markdown', problem.editable_metadata_fields, "Markdown slipped into the editable metadata fields")
def test_cms_imported_course_walkthrough(self):
"""
Import and walk through some common URL endpoints. This just verifies non-500 and no other
correct behavior, so it is not a deep test
"""
def test_get_html(handler):
# Helper function for getting HTML for a page in Studio and
# checking that it does not error.
resp = self.client.get_html(
get_url(handler, course_key, 'course_key_string')
)
self.assertEqual(resp.status_code, 200)
course_items = import_course_from_xml(
self.store, self.user.id, TEST_DATA_DIR, ['simple'], create_if_not_present=True
)
course_key = course_items[0].id
resp = self._show_course_overview(course_key)
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'Chapter 2')
# go to various pages
test_get_html('import_handler')
test_get_html('export_handler')
test_get_html('course_team_handler')
test_get_html('course_info_handler')
test_get_html('assets_handler')
test_get_html('tabs_handler')
test_get_html('settings_handler')
test_get_html('grading_handler')
test_get_html('advanced_settings_handler')
test_get_html('textbooks_list_handler')
# go look at the Edit page
unit_key = course_key.make_usage_key('vertical', 'test_vertical')
resp = self.client.get_html(get_url('container_handler', unit_key))
self.assertEqual(resp.status_code, 200)
def delete_item(category, name):
""" Helper method for testing the deletion of an xblock item. """
item_key = course_key.make_usage_key(category, name)
resp = self.client.delete(get_url('xblock_handler', item_key))
self.assertEqual(resp.status_code, 204)
# delete a component
delete_item(category='html', name='test_html')
# delete a unit
delete_item(category='vertical', name='test_vertical')
# delete a unit
delete_item(category='sequential', name='test_sequence')
# delete a chapter
delete_item(category='chapter', name='chapter_2')
def test_import_into_new_course_id(self):
target_id = _get_course_id(self.store, self.course_data)
_create_course(self, target_id, self.course_data)
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], target_id=target_id)
modules = self.store.get_items(target_id)
# we should have a number of modules in there
# we can't specify an exact number since it'll always be changing
self.assertGreater(len(modules), 10)
#
# test various re-namespacing elements
#
# first check PDF textbooks, to make sure the url paths got updated
course_module = self.store.get_course(target_id)
self.assertEqual(len(course_module.pdf_textbooks), 1)
self.assertEqual(len(course_module.pdf_textbooks[0]["chapters"]), 2)
self.assertEqual(course_module.pdf_textbooks[0]["chapters"][0]["url"], '/static/Chapter1.pdf')
self.assertEqual(course_module.pdf_textbooks[0]["chapters"][1]["url"], '/static/Chapter2.pdf')
def test_import_into_new_course_id_wiki_slug_renamespacing(self):
# If reimporting into the same course do not change the wiki_slug.
target_id = self.store.make_course_key('edX', 'toy', '2012_Fall')
course_data = {
'org': target_id.org,
'number': target_id.course,
'display_name': 'Robot Super Course',
'run': target_id.run
}
_create_course(self, target_id, course_data)
course_module = self.store.get_course(target_id)
course_module.wiki_slug = 'toy'
course_module.save()
# Import a course with wiki_slug == location.course
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], target_id=target_id)
course_module = self.store.get_course(target_id)
self.assertEquals(course_module.wiki_slug, 'toy')
# But change the wiki_slug if it is a different course.
target_id = self.store.make_course_key('MITx', '111', '2013_Spring')
course_data = {
'org': target_id.org,
'number': target_id.course,
'display_name': 'Robot Super Course',
'run': target_id.run
}
_create_course(self, target_id, course_data)
# Import a course with wiki_slug == location.course
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], target_id=target_id)
course_module = self.store.get_course(target_id)
self.assertEquals(course_module.wiki_slug, 'MITx.111.2013_Spring')
# Now try importing a course with wiki_slug == '{0}.{1}.{2}'.format(location.org, location.course, location.run)
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['two_toys'], target_id=target_id)
course_module = self.store.get_course(target_id)
self.assertEquals(course_module.wiki_slug, 'MITx.111.2013_Spring')
def test_import_metadata_with_attempts_empty_string(self):
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['simple'], create_if_not_present=True)
did_load_item = False
try:
course_key = self.store.make_course_key('edX', 'simple', 'problem')
usage_key = course_key.make_usage_key('problem', 'ps01-simple')
self.store.get_item(usage_key)
did_load_item = True
except ItemNotFoundError:
pass
# make sure we found the item (e.g. it didn't error while loading)
self.assertTrue(did_load_item)
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_forum_id_generation(self, default_store):
"""
Test that a discussion item, even if it doesn't set its discussion_id,
consistently generates the same one
"""
course = CourseFactory.create(default_store=default_store)
# create a discussion item
discussion_item = self.store.create_item(self.user.id, course.id, 'discussion', 'new_component')
# now fetch it from the modulestore to instantiate its descriptor
fetched = self.store.get_item(discussion_item.location)
# refetch it to be safe
refetched = self.store.get_item(discussion_item.location)
# and make sure the same discussion items have the same discussion ids
self.assertEqual(fetched.discussion_id, discussion_item.discussion_id)
self.assertEqual(fetched.discussion_id, refetched.discussion_id)
# and make sure that the id isn't the old "$$GUID$$"
self.assertNotEqual(discussion_item.discussion_id, '$$GUID$$')
def test_metadata_inheritance(self):
course_items = import_course_from_xml(
self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True
)
course = course_items[0]
verticals = self.store.get_items(course.id, qualifiers={'category': 'vertical'})
# let's assert on the metadata_inheritance on an existing vertical
for vertical in verticals:
self.assertEqual(course.xqa_key, vertical.xqa_key)
self.assertEqual(course.start, vertical.start)
self.assertGreater(len(verticals), 0)
# crate a new module and add it as a child to a vertical
parent = verticals[0]
new_block = self.store.create_child(
self.user.id, parent.location, 'html', 'new_component'
)
# flush the cache
new_block = self.store.get_item(new_block.location)
# check for grace period definition which should be defined at the course level
self.assertEqual(parent.graceperiod, new_block.graceperiod)
self.assertEqual(parent.start, new_block.start)
self.assertEqual(course.start, new_block.start)
self.assertEqual(course.xqa_key, new_block.xqa_key)
#
# now let's define an override at the leaf node level
#
new_block.graceperiod = timedelta(1)
self.store.update_item(new_block, self.user.id)
# flush the cache and refetch
new_block = self.store.get_item(new_block.location)
self.assertEqual(timedelta(1), new_block.graceperiod)
def test_default_metadata_inheritance(self):
course = CourseFactory.create()
vertical = ItemFactory.create(parent_location=course.location)
course.children.append(vertical)
# in memory
self.assertIsNotNone(course.start)
self.assertEqual(course.start, vertical.start)
self.assertEqual(course.textbooks, [])
self.assertIn('GRADER', course.grading_policy)
self.assertIn('GRADE_CUTOFFS', course.grading_policy)
# by fetching
fetched_course = self.store.get_item(course.location)
fetched_item = self.store.get_item(vertical.location)
self.assertIsNotNone(fetched_course.start)
self.assertEqual(course.start, fetched_course.start)
self.assertEqual(fetched_course.start, fetched_item.start)
self.assertEqual(course.textbooks, fetched_course.textbooks)
def test_image_import(self):
"""Test backwards compatibilty of course image."""
content_store = contentstore()
# Use conditional_and_poll, as it's got an image already
courses = import_course_from_xml(
self.store,
self.user.id,
TEST_DATA_DIR,
['conditional_and_poll'],
static_content_store=content_store,
create_if_not_present=True
)
course = courses[0]
# Make sure the course image is set to the right place
self.assertEqual(course.course_image, 'images_course_image.jpg')
# Ensure that the imported course image is present -- this shouldn't raise an exception
asset_key = course.id.make_asset_key('asset', course.course_image)
content_store.find(asset_key)
def _show_course_overview(self, course_key):
"""
Show the course overview page.
"""
resp = self.client.get_html(get_url('course_handler', course_key, 'course_key_string'))
return resp
def test_wiki_slug(self):
"""When creating a course a unique wiki_slug should be set."""
course_key = _get_course_id(self.store, self.course_data)
_create_course(self, course_key, self.course_data)
course_module = self.store.get_course(course_key)
self.assertEquals(course_module.wiki_slug, 'MITx.111.2013_Spring')
def test_course_handler_with_invalid_course_key_string(self):
"""Test viewing the course overview page with invalid course id"""
response = self.client.get_html('/course/edX/test')
self.assertEquals(response.status_code, 404)
class MetadataSaveTestCase(ContentStoreTestCase):
"""Test that metadata is correctly cached and decached."""
def setUp(self):
super(MetadataSaveTestCase, self).setUp()
course = CourseFactory.create()
video_sample_xml = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
from="00:00:01"
to="00:01:00">
<source src="http://www.example.com/file.mp4"/>
<track src="http://www.example.com/track"/>
</video>
'''
self.video_descriptor = ItemFactory.create(
parent_location=course.location, category='video',
data={'data': video_sample_xml}
)
def test_metadata_not_persistence(self):
"""
Test that descriptors which set metadata fields in their
constructor are correctly deleted.
"""
self.assertIn('html5_sources', own_metadata(self.video_descriptor))
attrs_to_strip = {
'show_captions',
'youtube_id_1_0',
'youtube_id_0_75',
'youtube_id_1_25',
'youtube_id_1_5',
'start_time',
'end_time',
'source',
'html5_sources',
'track'
}
location = self.video_descriptor.location
for field_name in attrs_to_strip:
delattr(self.video_descriptor, field_name)
self.assertNotIn('html5_sources', own_metadata(self.video_descriptor))
self.store.update_item(self.video_descriptor, self.user.id)
module = self.store.get_item(location)
self.assertNotIn('html5_sources', own_metadata(module))
def test_metadata_persistence(self):
# TODO: create the same test as `test_metadata_not_persistence`,
# but check persistence for some other module.
pass
class RerunCourseTest(ContentStoreTestCase):
"""
Tests for Rerunning a course via the view handler
"""
def setUp(self):
super(RerunCourseTest, self).setUp()
self.destination_course_data = {
'org': 'MITx',
'number': '111',
'display_name': 'Robot Super Course',
'run': '2013_Spring'
}
def post_rerun_request(
self, source_course_key, destination_course_data=None, response_code=200, expect_error=False
):
"""Create and send an ajax post for the rerun request"""
# create data to post
rerun_course_data = {'source_course_key': unicode(source_course_key)}
if not destination_course_data:
destination_course_data = self.destination_course_data
rerun_course_data.update(destination_course_data)
destination_course_key = _get_course_id(self.store, destination_course_data)
# post the request
course_url = get_url('course_handler', destination_course_key, 'course_key_string')
response = self.client.ajax_post(course_url, rerun_course_data)
# verify response
self.assertEqual(response.status_code, response_code)
if not expect_error:
json_resp = parse_json(response)
self.assertNotIn('ErrMsg', json_resp)
destination_course_key = CourseKey.from_string(json_resp['destination_course_key'])
return destination_course_key
def get_course_listing_elements(self, html, course_key):
"""Returns the elements in the course listing section of html that have the given course_key"""
return html.cssselect('.course-item[data-course-key="{}"]'.format(unicode(course_key)))
def get_unsucceeded_course_action_elements(self, html, course_key):
"""Returns the elements in the unsucceeded course action section that have the given course_key"""
return html.cssselect('.courses-processing li[data-course-key="{}"]'.format(unicode(course_key)))
def assertInCourseListing(self, course_key):
"""
Asserts that the given course key is in the accessible course listing section of the html
and NOT in the unsucceeded course action section of the html.
"""
course_listing = lxml.html.fromstring(self.client.get_html('/home/').content)
self.assertEqual(len(self.get_course_listing_elements(course_listing, course_key)), 1)
self.assertEqual(len(self.get_unsucceeded_course_action_elements(course_listing, course_key)), 0)
def assertInUnsucceededCourseActions(self, course_key):
"""
Asserts that the given course key is in the unsucceeded course action section of the html
and NOT in the accessible course listing section of the html.
"""
course_listing = lxml.html.fromstring(self.client.get_html('/home/').content)
self.assertEqual(len(self.get_course_listing_elements(course_listing, course_key)), 0)
self.assertEqual(len(self.get_unsucceeded_course_action_elements(course_listing, course_key)), 1)
def verify_rerun_course(self, source_course_key, destination_course_key, destination_display_name):
"""
Verify the contents of the course rerun action
"""
rerun_state = CourseRerunState.objects.find_first(course_key=destination_course_key)
expected_states = {
'state': CourseRerunUIStateManager.State.SUCCEEDED,
'display_name': destination_display_name,
'source_course_key': source_course_key,
'course_key': destination_course_key,
'should_display': True,
}
for field_name, expected_value in expected_states.iteritems():
self.assertEquals(getattr(rerun_state, field_name), expected_value)
# Verify that the creator is now enrolled in the course.
self.assertTrue(CourseEnrollment.is_enrolled(self.user, destination_course_key))
# Verify both courses are in the course listing section
self.assertInCourseListing(source_course_key)
self.assertInCourseListing(destination_course_key)
def test_rerun_course_no_videos_in_val(self):
"""
Test when rerunning a course with no videos, VAL copies nothing
"""
source_course = CourseFactory.create()
destination_course_key = self.post_rerun_request(source_course.id)
self.verify_rerun_course(source_course.id, destination_course_key, self.destination_course_data['display_name'])
videos = list(get_videos_for_course(destination_course_key))
self.assertEqual(0, len(videos))
self.assertInCourseListing(destination_course_key)
def test_rerun_course_success(self):
source_course = CourseFactory.create()
create_video(
dict(
edx_video_id="tree-hugger",
courses=[source_course.id],
status='test',
duration=2,
encoded_videos=[]
)
)
destination_course_key = self.post_rerun_request(source_course.id)
self.verify_rerun_course(source_course.id, destination_course_key, self.destination_course_data['display_name'])
# Verify that the VAL copies videos to the rerun
source_videos = list(get_videos_for_course(source_course.id))
target_videos = list(get_videos_for_course(destination_course_key))
self.assertEqual(1, len(source_videos))
self.assertEqual(source_videos, target_videos)
def test_rerun_course_resets_advertised_date(self):
source_course = CourseFactory.create(advertised_start="01-12-2015")
destination_course_key = self.post_rerun_request(source_course.id)
destination_course = self.store.get_course(destination_course_key)
self.assertEqual(None, destination_course.advertised_start)
def test_rerun_of_rerun(self):
source_course = CourseFactory.create()
rerun_course_key = self.post_rerun_request(source_course.id)
rerun_of_rerun_data = {
'org': rerun_course_key.org,
'number': rerun_course_key.course,
'display_name': 'rerun of rerun',
'run': 'rerun2'
}
rerun_of_rerun_course_key = self.post_rerun_request(rerun_course_key, rerun_of_rerun_data)
self.verify_rerun_course(rerun_course_key, rerun_of_rerun_course_key, rerun_of_rerun_data['display_name'])
def test_rerun_course_fail_no_source_course(self):
existent_course_key = CourseFactory.create().id
non_existent_course_key = CourseLocator("org", "non_existent_course", "non_existent_run")
destination_course_key = self.post_rerun_request(non_existent_course_key)
# Verify that the course rerun action is marked failed
rerun_state = CourseRerunState.objects.find_first(course_key=destination_course_key)
self.assertEquals(rerun_state.state, CourseRerunUIStateManager.State.FAILED)
self.assertIn("Cannot find a course at", rerun_state.message)
# Verify that the creator is not enrolled in the course.
self.assertFalse(CourseEnrollment.is_enrolled(self.user, non_existent_course_key))
# Verify that the existing course continues to be in the course listings
self.assertInCourseListing(existent_course_key)
# Verify that the failed course is NOT in the course listings
self.assertInUnsucceededCourseActions(destination_course_key)
def test_rerun_course_fail_duplicate_course(self):
existent_course_key = CourseFactory.create().id
destination_course_data = {
'org': existent_course_key.org,
'number': existent_course_key.course,
'display_name': 'existing course',
'run': existent_course_key.run
}
destination_course_key = self.post_rerun_request(
existent_course_key, destination_course_data, expect_error=True
)
# Verify that the course rerun action doesn't exist
with self.assertRaises(CourseActionStateItemNotFoundError):
CourseRerunState.objects.find_first(course_key=destination_course_key)
# Verify that the existing course continues to be in the course listing
self.assertInCourseListing(existent_course_key)
def test_rerun_with_permission_denied(self):
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
source_course = CourseFactory.create()
auth.add_users(self.user, CourseCreatorRole(), self.user)
self.user.is_staff = False
self.user.save()
self.post_rerun_request(source_course.id, response_code=403, expect_error=True)
def test_rerun_error(self):
error_message = "Mock Error Message"
with mock.patch(
'xmodule.modulestore.mixed.MixedModuleStore.clone_course',
mock.Mock(side_effect=Exception(error_message))
):
source_course = CourseFactory.create()
destination_course_key = self.post_rerun_request(source_course.id)
rerun_state = CourseRerunState.objects.find_first(course_key=destination_course_key)
self.assertEquals(rerun_state.state, CourseRerunUIStateManager.State.FAILED)
self.assertIn(error_message, rerun_state.message)
def test_rerun_error_trunc_message(self):
"""
CourseActionUIState.message is sometimes populated with the contents
of Python tracebacks. This test ensures we don't crash when attempting
to insert a value exceeding its max_length (note that sqlite does not
complain if this happens, but MySQL throws an error).
"""
with mock.patch(
'xmodule.modulestore.mixed.MixedModuleStore.clone_course',
mock.Mock(side_effect=Exception()),
):
source_course = CourseFactory.create()
message_too_long = "traceback".rjust(CourseRerunState.MAX_MESSAGE_LENGTH * 2, '-')
with mock.patch('traceback.format_exc', return_value=message_too_long):
destination_course_key = self.post_rerun_request(source_course.id)
rerun_state = CourseRerunState.objects.find_first(course_key=destination_course_key)
self.assertEquals(rerun_state.state, CourseRerunUIStateManager.State.FAILED)
self.assertTrue(rerun_state.message.endswith("traceback"))
self.assertEqual(len(rerun_state.message), CourseRerunState.MAX_MESSAGE_LENGTH)
def test_rerun_course_wiki_slug(self):
"""
Test that unique wiki_slug is assigned to rerun course.
"""
course_data = {
'org': 'edX',
'number': '123',
'display_name': 'Rerun Course',
'run': '2013'
}
source_wiki_slug = '{0}.{1}.{2}'.format(course_data['org'], course_data['number'], course_data['run'])
source_course_key = _get_course_id(self.store, course_data)
_create_course(self, source_course_key, course_data)
source_course = self.store.get_course(source_course_key)
# Verify created course's wiki_slug.
self.assertEquals(source_course.wiki_slug, source_wiki_slug)
destination_course_data = course_data
destination_course_data['run'] = '2013_Rerun'
destination_course_key = self.post_rerun_request(
source_course.id, destination_course_data=destination_course_data
)
self.verify_rerun_course(source_course.id, destination_course_key, destination_course_data['display_name'])
destination_course = self.store.get_course(destination_course_key)
destination_wiki_slug = '{0}.{1}.{2}'.format(
destination_course.id.org, destination_course.id.course, destination_course.id.run
)
# Verify rerun course's wiki_slug.
self.assertEquals(destination_course.wiki_slug, destination_wiki_slug)
class ContentLicenseTest(ContentStoreTestCase):
"""
Tests around content licenses
"""
def test_course_license_export(self):
content_store = contentstore()
root_dir = path(mkdtemp_clean())
self.course.license = "creative-commons: BY SA"
self.store.update_item(self.course, None)
export_course_to_xml(self.store, content_store, self.course.id, root_dir, 'test_license')
fname = "{block}.xml".format(block=self.course.scope_ids.usage_id.block_id)
run_file_path = root_dir / "test_license" / "course" / fname
run_xml = etree.parse(run_file_path.open())
self.assertEqual(run_xml.getroot().get("license"), "creative-commons: BY SA")
def test_video_license_export(self):
content_store = contentstore()
root_dir = path(mkdtemp_clean())
video_descriptor = ItemFactory.create(
parent_location=self.course.location, category='video',
license="all-rights-reserved"
)
export_course_to_xml(self.store, content_store, self.course.id, root_dir, 'test_license')
fname = "{block}.xml".format(block=video_descriptor.scope_ids.usage_id.block_id)
video_file_path = root_dir / "test_license" / "video" / fname
video_xml = etree.parse(video_file_path.open())
self.assertEqual(video_xml.getroot().get("license"), "all-rights-reserved")
def test_license_import(self):
course_items = import_course_from_xml(
self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True
)
course = course_items[0]
self.assertEqual(course.license, "creative-commons: BY")
videos = self.store.get_items(course.id, qualifiers={'category': 'video'})
self.assertEqual(videos[0].license, "all-rights-reserved")
class EntryPageTestCase(TestCase):
"""
Tests entry pages that aren't specific to a course.
"""
def setUp(self):
super(EntryPageTestCase, self).setUp()
self.client = AjaxEnabledTestClient()
def _test_page(self, page, status_code=200):
resp = self.client.get_html(page)
self.assertEqual(resp.status_code, status_code)
def test_how_it_works(self):
self._test_page("/howitworks")
def test_signup(self):
self._test_page("/signup")
def test_login(self):
self._test_page("/signin")
def test_logout(self):
# Logout redirects.
self._test_page("/logout", 302)
class SigninPageTestCase(TestCase):
"""
Tests that the CSRF token is directly included in the signin form. This is
important to make sure that the script is functional independently of any
other script.
"""
def test_csrf_token_is_present_in_form(self):
# Expected html:
# <form>
# ...
# <fieldset>
# ...
# <input name="csrfmiddlewaretoken" value="...">
# ...
# </fieldset>
# ...
#</form>
response = self.client.get("/signin")
csrf_token = response.cookies.get("csrftoken")
form = lxml.html.fromstring(response.content).get_element_by_id("login_form")
csrf_input_field = form.find(".//input[@name='csrfmiddlewaretoken']")
self.assertIsNotNone(csrf_token)
self.assertIsNotNone(csrf_token.value)
self.assertIsNotNone(csrf_input_field)
self.assertEqual(csrf_token.value, csrf_input_field.attrib["value"])
def _create_course(test, course_key, course_data):
"""
Creates a course via an AJAX request and verifies the URL returned in the response.
"""
course_url = get_url('course_handler', course_key, 'course_key_string')
response = test.client.ajax_post(course_url, course_data)
test.assertEqual(response.status_code, 200)
data = parse_json(response)
test.assertNotIn('ErrMsg', data)
test.assertEqual(data['url'], course_url)
def _get_course_id(store, course_data):
"""Returns the course ID."""
return store.make_course_key(course_data['org'], course_data['number'], course_data['run'])
| agpl-3.0 | -166,267,854,494,191,520 | 42.28024 | 190 | 0.645101 | false |
SUSE/azure-storage-python | tests/test_table_batch.py | 3 | 21414 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import unittest
from datetime import datetime
from dateutil.tz import tzutc
from azure.storage.table import (
Entity,
EntityProperty,
TableService,
TableBatch,
EdmType,
AzureBatchOperationError,
AzureBatchValidationError,
)
from tests.testcase import (
StorageTestCase,
record,
)
#------------------------------------------------------------------------------
TEST_TABLE_PREFIX = 'table'
#------------------------------------------------------------------------------
class StorageTableBatchTest(StorageTestCase):
def setUp(self):
super(StorageTableBatchTest, self).setUp()
self.ts = self._create_storage_service(TableService, self.settings)
self.test_tables = []
self.table_name = self._get_table_reference()
if not self.is_playback():
self.ts.create_table(self.table_name)
def tearDown(self):
if not self.is_playback():
for table_name in self.test_tables:
try:
self.ts.delete_table(table_name)
except:
pass
return super(StorageTableBatchTest, self).tearDown()
#--Helpers-----------------------------------------------------------------
def _get_table_reference(self, prefix=TEST_TABLE_PREFIX):
table_name = self.get_resource_name(prefix)
self.test_tables.append(table_name)
return table_name
def _create_default_entity_dict(self, partition=None, row=None):
'''
Creates a dictionary-based entity with fixed values, using all
of the supported data types.
'''
partition = partition if partition is not None else self.get_resource_name('pk')
row = row if row is not None else self.get_resource_name('rk')
return {'PartitionKey': partition,
'RowKey': row,
'age': 39,
'sex': 'male',
'married': True,
'deceased': False,
'optional': None,
'ratio': 3.1,
'evenratio': 3.0,
'large': 933311100,
'Birthday': datetime(1973, 10, 4),
'birthday': datetime(1970, 10, 4),
'other': EntityProperty(EdmType.INT32, 20),
'clsid': EntityProperty(
EdmType.GUID,
'c9da6455-213d-42c9-9a79-3e9149a57833')}
def _create_updated_entity_dict(self, partition, row):
'''
Creates a dictionary-based entity with fixed values, with a
different set of values than the default entity. It
adds fields, changes field values, changes field types,
and removes fields when compared to the default entity.
'''
return {'PartitionKey': partition,
'RowKey': row,
'age': 'abc',
'sex': 'female',
'sign': 'aquarius',
'birthday': datetime(1991, 10, 4)}
def _assert_default_entity(self, entity):
'''
Asserts that the entity passed in matches the default entity.
'''
self.assertEqual(entity.age, 39)
self.assertEqual(entity.sex, 'male')
self.assertEqual(entity.married, True)
self.assertEqual(entity.deceased, False)
self.assertFalse(hasattr(entity, "optional"))
self.assertFalse(hasattr(entity, "aquarius"))
self.assertEqual(entity.ratio, 3.1)
self.assertEqual(entity.evenratio, 3.0)
self.assertEqual(entity.large, 933311100)
self.assertEqual(entity.Birthday, datetime(1973, 10, 4, tzinfo=tzutc()))
self.assertEqual(entity.birthday, datetime(1970, 10, 4, tzinfo=tzutc()))
self.assertIsInstance(entity.other, EntityProperty)
self.assertEqual(entity.other.type, EdmType.INT32)
self.assertEqual(entity.other.value, 20)
self.assertIsInstance(entity.clsid, EntityProperty)
self.assertEqual(entity.clsid.type, EdmType.GUID)
self.assertEqual(entity.clsid.value,
'c9da6455-213d-42c9-9a79-3e9149a57833')
self.assertTrue(hasattr(entity, "Timestamp"))
self.assertIsInstance(entity.Timestamp, datetime)
self.assertIsNotNone(entity.etag)
def _assert_updated_entity(self, entity):
'''
Asserts that the entity passed in matches the updated entity.
'''
self.assertEqual(entity.age, 'abc')
self.assertEqual(entity.sex, 'female')
self.assertFalse(hasattr(entity, "married"))
self.assertFalse(hasattr(entity, "deceased"))
self.assertEqual(entity.sign, 'aquarius')
self.assertFalse(hasattr(entity, "optional"))
self.assertFalse(hasattr(entity, "ratio"))
self.assertFalse(hasattr(entity, "evenratio"))
self.assertFalse(hasattr(entity, "large"))
self.assertFalse(hasattr(entity, "Birthday"))
self.assertEqual(entity.birthday, datetime(1991, 10, 4, tzinfo=tzutc()))
self.assertFalse(hasattr(entity, "other"))
self.assertFalse(hasattr(entity, "clsid"))
self.assertTrue(hasattr(entity, "Timestamp"))
self.assertIsNotNone(entity.etag)
#--Test cases for batch ---------------------------------------------
@record
def test_batch_insert(self):
# Arrange
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert'
entity.test = EntityProperty(EdmType.BOOLEAN, 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(EdmType.INT64, '1234567890')
entity.test5 = datetime.utcnow()
batch = TableBatch()
batch.insert_entity(entity)
resp = self.ts.commit_batch(self.table_name, batch)
# Assert
self.assertIsNotNone(resp)
result = self.ts.get_entity(self.table_name, '001', 'batch_insert')
self.assertEqual(resp[0], result.etag)
@record
def test_batch_update(self):
# Arrange
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_update'
entity.test = EntityProperty(EdmType.BOOLEAN, 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(EdmType.INT64, '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
entity = self.ts.get_entity(self.table_name, '001', 'batch_update')
self.assertEqual(3, entity.test3)
entity.test2 = 'value1'
batch = TableBatch()
batch.update_entity(entity)
resp = self.ts.commit_batch(self.table_name, batch)
# Assert
self.assertIsNotNone(resp)
entity = self.ts.get_entity(self.table_name, '001', 'batch_update')
self.assertEqual('value1', entity.test2)
self.assertEqual(resp[0], entity.etag)
@record
def test_batch_merge(self):
# Arrange
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_merge'
entity.test = EntityProperty(EdmType.BOOLEAN, 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(EdmType.INT64, '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
entity = self.ts.get_entity(self.table_name, '001', 'batch_merge')
self.assertEqual(3, entity.test3)
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_merge'
entity.test2 = 'value1'
batch = TableBatch()
batch.merge_entity(entity)
resp = self.ts.commit_batch(self.table_name, batch)
# Assert
self.assertIsNotNone(resp)
entity = self.ts.get_entity(self.table_name, '001', 'batch_merge')
self.assertEqual('value1', entity.test2)
self.assertEqual(1234567890, entity.test4)
self.assertEqual(resp[0], entity.etag)
@record
def test_batch_update_if_match(self):
# Arrange
entity = self._create_default_entity_dict()
etag = self.ts.insert_entity(self.table_name, entity)
# Act
sent_entity = self._create_updated_entity_dict(entity['PartitionKey'], entity['RowKey'])
batch = TableBatch()
batch.update_entity(sent_entity, etag)
resp = self.ts.commit_batch(self.table_name, batch)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(self.table_name, entity['PartitionKey'], entity['RowKey'])
self._assert_updated_entity(received_entity)
self.assertEqual(resp[0], received_entity.etag)
@record
def test_batch_update_if_doesnt_match(self):
# Arrange
entity = self._create_default_entity_dict()
self.ts.insert_entity(self.table_name, entity)
# Act
sent_entity1 = self._create_updated_entity_dict(entity['PartitionKey'], entity['RowKey'])
batch = TableBatch()
batch.update_entity(
sent_entity1,
if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"')
try:
self.ts.commit_batch(self.table_name, batch)
except AzureBatchOperationError as error:
self.assertEqual(error.code, 'UpdateConditionNotSatisfied')
self.assertTrue(str(error).startswith('The update condition specified in the request was not satisfied.'))
else:
self.fail('AzureBatchOperationError was expected')
# Assert
received_entity = self.ts.get_entity(self.table_name, entity['PartitionKey'], entity['RowKey'])
self._assert_default_entity(received_entity)
@record
def test_batch_insert_replace(self):
# Arrange
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert_replace'
entity.test = EntityProperty(EdmType.BOOLEAN, 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(EdmType.INT64, '1234567890')
entity.test5 = datetime.utcnow()
batch = TableBatch()
batch.insert_or_replace_entity(entity)
resp = self.ts.commit_batch(self.table_name, batch)
# Assert
self.assertIsNotNone(resp)
entity = self.ts.get_entity(
self.table_name, '001', 'batch_insert_replace')
self.assertIsNotNone(entity)
self.assertEqual('value', entity.test2)
self.assertEqual(1234567890, entity.test4)
self.assertEqual(resp[0], entity.etag)
@record
def test_batch_insert_merge(self):
# Arrange
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert_merge'
entity.test = EntityProperty(EdmType.BOOLEAN, 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(EdmType.INT64, '1234567890')
entity.test5 = datetime.utcnow()
batch = TableBatch()
batch.insert_or_merge_entity(entity)
resp = self.ts.commit_batch(self.table_name, batch)
# Assert
self.assertIsNotNone(resp)
entity = self.ts.get_entity(
self.table_name, '001', 'batch_insert_merge')
self.assertIsNotNone(entity)
self.assertEqual('value', entity.test2)
self.assertEqual(1234567890, entity.test4)
self.assertEqual(resp[0], entity.etag)
@record
def test_batch_delete(self):
# Arrange
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_delete'
entity.test = EntityProperty(EdmType.BOOLEAN, 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(EdmType.INT64, '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
entity = self.ts.get_entity(self.table_name, '001', 'batch_delete')
self.assertEqual(3, entity.test3)
batch = TableBatch()
batch.delete_entity('001', 'batch_delete')
resp = self.ts.commit_batch(self.table_name, batch)
# Assert
self.assertIsNotNone(resp)
self.assertIsNone(resp[0])
@record
def test_batch_inserts(self):
# Arrange
# Act
entity = Entity()
entity.PartitionKey = 'batch_inserts'
entity.test = EntityProperty(EdmType.BOOLEAN, 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(EdmType.INT64, '1234567890')
batch = TableBatch()
for i in range(100):
entity.RowKey = str(i)
batch.insert_entity(entity)
self.ts.commit_batch(self.table_name, batch)
entities = list(self.ts.query_entities(self.table_name, "PartitionKey eq 'batch_inserts'", ''))
# Assert
self.assertIsNotNone(entities)
self.assertEqual(100, len(entities))
@record
def test_batch_all_operations_together(self):
# Arrange
# Act
entity = Entity()
entity.PartitionKey = '003'
entity.RowKey = 'batch_all_operations_together-1'
entity.test = EntityProperty(EdmType.BOOLEAN, 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(EdmType.INT64, '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-2'
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-3'
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-4'
self.ts.insert_entity(self.table_name, entity)
batch = TableBatch()
entity.RowKey = 'batch_all_operations_together'
batch.insert_entity(entity)
entity.RowKey = 'batch_all_operations_together-1'
batch.delete_entity(entity.PartitionKey, entity.RowKey)
entity.RowKey = 'batch_all_operations_together-2'
entity.test3 = 10
batch.update_entity(entity)
entity.RowKey = 'batch_all_operations_together-3'
entity.test3 = 100
batch.merge_entity(entity)
entity.RowKey = 'batch_all_operations_together-4'
entity.test3 = 10
batch.insert_or_replace_entity(entity)
entity.RowKey = 'batch_all_operations_together-5'
batch.insert_or_merge_entity(entity)
resp = self.ts.commit_batch(self.table_name, batch)
# Assert
self.assertEqual(6, len(resp))
entities = list(self.ts.query_entities(self.table_name, "PartitionKey eq '003'", ''))
self.assertEqual(5, len(entities))
@record
def test_batch_all_operations_together_context_manager(self):
# Arrange
# Act
entity = Entity()
entity.PartitionKey = '003'
entity.RowKey = 'batch_all_operations_together-1'
entity.test = EntityProperty(EdmType.BOOLEAN, 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(EdmType.INT64, '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-2'
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-3'
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-4'
self.ts.insert_entity(self.table_name, entity)
with self.ts.batch(self.table_name) as batch:
entity.RowKey = 'batch_all_operations_together'
batch.insert_entity(entity)
entity.RowKey = 'batch_all_operations_together-1'
batch.delete_entity(entity.PartitionKey, entity.RowKey)
entity.RowKey = 'batch_all_operations_together-2'
entity.test3 = 10
batch.update_entity(entity)
entity.RowKey = 'batch_all_operations_together-3'
entity.test3 = 100
batch.merge_entity(entity)
entity.RowKey = 'batch_all_operations_together-4'
entity.test3 = 10
batch.insert_or_replace_entity(entity)
entity.RowKey = 'batch_all_operations_together-5'
batch.insert_or_merge_entity(entity)
# Assert
entities = list(self.ts.query_entities(self.table_name, "PartitionKey eq '003'", ''))
self.assertEqual(5, len(entities))
@record
def test_batch_reuse(self):
# Arrange
table2 = self._get_table_reference('table2')
self.ts.create_table(table2)
# Act
entity = Entity()
entity.PartitionKey = '003'
entity.RowKey = 'batch_all_operations_together-1'
entity.test = EntityProperty(EdmType.BOOLEAN, 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(EdmType.INT64, '1234567890')
entity.test5 = datetime.utcnow()
batch = TableBatch()
batch.insert_entity(entity)
entity.RowKey = 'batch_all_operations_together-2'
batch.insert_entity(entity)
entity.RowKey = 'batch_all_operations_together-3'
batch.insert_entity(entity)
entity.RowKey = 'batch_all_operations_together-4'
batch.insert_entity(entity)
self.ts.commit_batch(self.table_name, batch)
self.ts.commit_batch(table2, batch)
batch = TableBatch()
entity.RowKey = 'batch_all_operations_together'
batch.insert_entity(entity)
entity.RowKey = 'batch_all_operations_together-1'
batch.delete_entity(entity.PartitionKey, entity.RowKey)
entity.RowKey = 'batch_all_operations_together-2'
entity.test3 = 10
batch.update_entity(entity)
entity.RowKey = 'batch_all_operations_together-3'
entity.test3 = 100
batch.merge_entity(entity)
entity.RowKey = 'batch_all_operations_together-4'
entity.test3 = 10
batch.insert_or_replace_entity(entity)
entity.RowKey = 'batch_all_operations_together-5'
batch.insert_or_merge_entity(entity)
self.ts.commit_batch(self.table_name, batch)
resp = self.ts.commit_batch(table2, batch)
# Assert
self.assertEqual(6, len(resp))
entities = list(self.ts.query_entities(self.table_name, "PartitionKey eq '003'", ''))
self.assertEqual(5, len(entities))
@record
def test_batch_same_row_operations_fail(self):
# Arrange
entity = self._create_default_entity_dict('001', 'batch_negative_1')
self.ts.insert_entity(self.table_name, entity)
# Act
with self.assertRaises(AzureBatchValidationError):
batch = TableBatch()
entity = self._create_updated_entity_dict(
'001', 'batch_negative_1')
batch.update_entity(entity)
entity = self._create_default_entity_dict(
'001', 'batch_negative_1')
batch.merge_entity(entity)
# Assert
@record
def test_batch_different_partition_operations_fail(self):
# Arrange
entity = self._create_default_entity_dict('001', 'batch_negative_1')
self.ts.insert_entity(self.table_name, entity)
# Act
with self.assertRaises(AzureBatchValidationError):
batch = TableBatch()
entity = self._create_updated_entity_dict(
'001', 'batch_negative_1')
batch.update_entity(entity)
entity = self._create_default_entity_dict(
'002', 'batch_negative_1')
batch.insert_entity(entity)
# Assert
@record
def test_batch_too_many_ops(self):
# Arrange
entity = self._create_default_entity_dict('001', 'batch_negative_1')
self.ts.insert_entity(self.table_name, entity)
# Act
with self.assertRaises(AzureBatchValidationError):
batch = TableBatch()
for i in range(0, 101):
entity = Entity()
entity.PartitionKey = 'large'
entity.RowKey = 'item{0}'.format(i)
batch.insert_entity(entity)
self.ts.commit_batch(self.table_name, batch)
# Assert
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -2,093,046,100,258,508,000 | 35.664384 | 118 | 0.599664 | false |
TomTranter/OpenPNM | openpnm/io/MAT.py | 1 | 3499 | import scipy.io as spio
from flatdict import FlatDict
from openpnm.io import GenericIO, Dict
from openpnm.utils import sanitize_dict, logging, Workspace
logger = logging.getLogger(__name__)
ws = Workspace()
class MAT(GenericIO):
r"""
MAT files are a format used by Matlab
Notes
-----
The 'mat' file must contain data formatted as follows:
1. The file can contain either or both pore and throat data.
2. The property names should be in the format of ``pore_volume`` or
``throat_surface_area``. In OpenPNM the first '_' will be replaced by
a '.' to give ``'pore.volume'`` or ``'throat.surface_area'``.
3. Boolean data represented as 1's and 0's will be converted to the
Python boolean True and False. These will become \'labels\' in
OpenPNM.
"""
@classmethod
def save(cls, network, phases=[], filename=''):
r"""
Write Network to a Mat file for exporting to Matlab.
Parameters
----------
network : OpenPNM Network Object
filename : string
Desired file name, defaults to network name if not given
phases : list of phase objects ([])
Phases that have properties we want to write to file
"""
project, network, phases = cls._parse_args(network=network,
phases=phases)
network = network[0]
# Write to file
if filename == '':
filename = project.name
filename = cls._parse_filename(filename=filename, ext='mat')
d = Dict.to_dict(network=network, phases=phases, interleave=True)
d = FlatDict(d, delimiter='|')
d = sanitize_dict(d)
new_d = {}
for key in list(d.keys()):
new_key = key.replace('|', '_').replace('.', '_')
new_d[new_key] = d.pop(key)
spio.savemat(file_name=filename, mdict=new_d)
@classmethod
def load(cls, filename, project=None):
r"""
Loads data onto the given network from an appropriately formatted
'mat' file (i.e. MatLAB output).
Parameters
----------
filename : string (optional)
The name of the file containing the data to import. The formatting
of this file is outlined below.
project : OpenPNM Project object
A GenericNetwork is created and added to the specified Project.
If no Project object is supplied then one will be created and
returned.
Returns
-------
If no project object is supplied then one will be created and returned.
"""
filename = cls._parse_filename(filename=filename, ext='mat')
data = spio.loadmat(filename)
# Reinsert the '.' separator into the array names
for item in list(data.keys()):
if item in ['__header__', '__version__', '__globals__']:
data.pop(item)
continue
elif '_pore_' in item:
path, prop = item.split('_pore_')
new_key = path + '|pore.' + prop
elif '_throat_' in item:
path, prop = item.split('_throat_')
new_key = path + '|throat.' + prop
data[new_key] = data.pop(item)
if project is None:
project = ws.new_project()
project = Dict.from_dict(data, project=project, delim='|')
project = cls._convert_data(project)
return project
| mit | 1,862,196,102,815,398,400 | 32.32381 | 79 | 0.570163 | false |
ImageEngine/gaffer | python/GafferImageTest/ResizeTest.py | 5 | 11709 | ##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class ResizeTest( GafferImageTest.ImageTestCase ) :
def testDefaultFormat( self ) :
r = GafferImage.Resize()
self.assertTrue( r["format"].getValue().getDisplayWindow().isEmpty() )
f1 = GafferImage.Format( imath.Box2i( imath.V2i( 1, 2 ), imath.V2i( 11, 12 ) ), 1 )
f2 = GafferImage.Format( imath.Box2i( imath.V2i( 100, 200 ), imath.V2i( 1100, 1200 ) ), 1 )
c1 = Gaffer.Context()
c2 = Gaffer.Context()
GafferImage.FormatPlug.setDefaultFormat( c1, f1 )
GafferImage.FormatPlug.setDefaultFormat( c2, f2 )
with c1 :
self.assertEqual( r["out"]["format"].getValue(), f1 )
with c2 :
self.assertEqual( r["out"]["format"].getValue(), f2 )
def testChannelDataPassThrough( self ) :
# Resize to the same size as the input image.
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 512 ) ), 1 ) )
c["color"].setValue( imath.Color4f( 0.25, 0.5, 0.75, 1 ) )
r = GafferImage.Resize()
r["in"].setInput( c["out"] )
r["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 512 ) ), 1 ) )
# Assert that the pixel data is passed clean through, even
# if we request a new pixel aspect ratio.
for pixelAspect in ( 0.5, 1, 2 ) :
r["format"]["pixelAspect"].setValue( pixelAspect )
for channel in [ "R", "G", "B", "A" ] :
self.assertEqual(
c["out"].channelDataHash( channel, imath.V2i( 0 ) ),
r["out"].channelDataHash( channel, imath.V2i( 0 ) ),
)
self.assertTrue(
c["out"].channelData( channel, imath.V2i( 0 ), _copy = False ).isSame(
r["out"].channelData( channel, imath.V2i( 0 ), _copy = False )
)
)
# Tests that hashes pass through when the input data is not Flat
def testNonFlatThrows( self ) :
resize = GafferImage.Resize()
resize["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 1024 ) ), 1 ) )
self.assertRaisesDeepNotSupported( resize )
def testFit( self ) :
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 256, 128 ) ), 1 ) )
c["color"].setValue( imath.Color4f( 0.25, 0.5, 0.75, 1 ) )
r = GafferImage.Resize()
r["in"].setInput( c["out"] )
r["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 1024, 256 ) ), 1 ) )
self.assertEqual( r["fitMode"].getValue(), r.FitMode.Horizontal )
horizontalDataWindow = r["out"]["dataWindow"].getValue()
displayWindow = r["format"].getValue().getDisplayWindow()
self.assertEqual( horizontalDataWindow.min().x, displayWindow.min().x )
self.assertEqual( horizontalDataWindow.max().x, displayWindow.max().x )
self.assertTrue( horizontalDataWindow.min().y < displayWindow.min().y )
self.assertTrue( horizontalDataWindow.max().y > displayWindow.max().y )
r["fitMode"].setValue( r.FitMode.Vertical )
verticalDataWindow = r["out"]["dataWindow"].getValue()
self.assertTrue( verticalDataWindow.min().x > displayWindow.min().x )
self.assertTrue( verticalDataWindow.max().x < displayWindow.max().x )
self.assertEqual( verticalDataWindow.min().y, displayWindow.min().y )
self.assertEqual( verticalDataWindow.max().y, displayWindow.max().y )
r["fitMode"].setValue( r.FitMode.Fit )
self.assertEqual( r["out"]["dataWindow"].getValue(), verticalDataWindow )
r["fitMode"].setValue( r.FitMode.Fill )
self.assertEqual( r["out"]["dataWindow"].getValue(), horizontalDataWindow )
r["fitMode"].setValue( r.FitMode.Distort )
self.assertEqual( r["out"]["dataWindow"].getValue(), displayWindow )
def testMismatchedDataWindow( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 256, 256 ) ), 1 ) )
crop = GafferImage.Crop()
crop["in"].setInput( constant["out"] )
crop["areaSource"].setValue( crop.AreaSource.Area )
crop["area"].setValue( imath.Box2i( imath.V2i( 64 ), imath.V2i( 128 ) ) )
crop["affectDisplayWindow"].setValue( False )
crop["affectDataWindow"].setValue( True )
resize = GafferImage.Resize()
resize["in"].setInput( crop["out"] )
resize["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 512, 512 ) ), 1 ) )
self.assertEqual(
resize["out"]["dataWindow"].getValue(),
imath.Box2i( imath.V2i( 128 ), imath.V2i( 256 ) )
)
def testDataWindowRounding( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 200, 150 ) ), 1 ) )
resize = GafferImage.Resize()
resize["in"].setInput( constant["out"] )
for width in range( 1, 2000 ) :
resize["format"].setValue( GafferImage.Format( width, 150, 1 ) )
dataWindow = resize["out"]["dataWindow"].getValue()
self.assertEqual( dataWindow.min().x, 0 )
self.assertEqual( dataWindow.max().x, width )
resize["fitMode"].setValue( resize.FitMode.Vertical )
for height in range( 1, 2000 ) :
resize["format"].setValue( GafferImage.Format( 200, height, 1 ) )
dataWindow = resize["out"]["dataWindow"].getValue()
self.assertEqual( dataWindow.min().y, 0 )
self.assertEqual( dataWindow.max().y, height )
def testFilterAffectsChannelData( self ) :
r = GafferImage.Resize()
cs = GafferTest.CapturingSlot( r.plugDirtiedSignal() )
r["filter"].setValue( "gaussian" )
self.assertTrue( r["out"]["channelData"] in set( c[0] for c in cs ) )
def testSamplerBoundsViolationCrash( self ) :
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 3792, 3160 ) )
r = GafferImage.Resize()
r["in"].setInput( c["out"] )
r["format"].setValue( GafferImage.Format( 1920, 1080 ) )
r["fitMode"].setValue( r.FitMode.Vertical )
GafferImageTest.processTiles( r["out"] )
def testDownsizingSamplerBounds( self ) :
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 50, 53 ) )
r = GafferImage.Resize()
r["in"].setInput( c["out"] )
r["fitMode"].setValue( r.FitMode.Distort )
# Downsize to every single size smaller than the input,
# to check for sampler bounds violations similar to those
# which motivated the test above.
for width in range( 1, 50 ) :
for height in range( 1, 53 ) :
r["format"].setValue( GafferImage.Format( width, height ) )
GafferImageTest.processTiles( r["out"] )
def testFormatDependencies( self ) :
r = GafferImage.Resize()
cs = GafferTest.CapturingSlot( r.plugDirtiedSignal() )
r["format"].setValue( GafferImage.Format( 100, 200, 2 ) )
dirtiedPlugs = set( c[0] for c in cs )
self.assertTrue( r["out"]["format"] in dirtiedPlugs )
self.assertTrue( r["out"]["dataWindow"] in dirtiedPlugs )
def testDisable( self ) :
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 100, 100 ) )
r = GafferImage.Resize()
r["in"].setInput( c["out"] )
r["format"].setValue( GafferImage.Format( 200, 200 ) )
self.assertEqual( r["out"]["format"].getValue(), GafferImage.Format( 200, 200 ) )
self.assertEqual( r["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 200 ) ) )
r["enabled"].setValue( False )
self.assertEqual( r["out"]["format"].getValue(), GafferImage.Format( 100, 100 ) )
self.assertEqual( r["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 100 ) ) )
def testEmptyDataWindow( self ) :
e = self.emptyImage()
r = GafferImage.Resize()
r["in"].setInput( e["out"] )
r["format"].setValue( GafferImage.Format( 2121, 1012 ) )
self.assertEqual( r["out"]["dataWindow"].getValue(), imath.Box2i() )
def testPixelAspectRatio( self ) :
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 1000, 1000 ) )
r = GafferImage.Resize()
r["in"].setInput( c["out"] )
r["format"].setValue( GafferImage.Format( 1500, 1000 ) )
for fitMode in r.FitMode.values :
r["fitMode"].setValue( fitMode )
for inputPixelAspect in ( 0.5, 1, 2 ) :
c["format"]["pixelAspect"].setValue( inputPixelAspect )
for outputPixelAspect in ( 0.5, 1, 2 ) :
r["format"]["pixelAspect"].setValue( outputPixelAspect )
if fitMode == r.FitMode.Horizontal :
self.assertEqual( r["out"]["dataWindow"].getValue().min().x, r["out"]["format"].getValue().getDisplayWindow().min().x )
self.assertEqual( r["out"]["dataWindow"].getValue().max().x, r["out"]["format"].getValue().getDisplayWindow().max().x )
elif fitMode == r.FitMode.Vertical :
self.assertEqual( r["out"]["dataWindow"].getValue().min().y, r["out"]["format"].getValue().getDisplayWindow().min().y )
self.assertEqual( r["out"]["dataWindow"].getValue().max().y, r["out"]["format"].getValue().getDisplayWindow().max().y )
if fitMode != r.FitMode.Distort :
# All fit modes other than Distort should ensure that the aspect
# ratio of the output data window is the same as the aspect ratio
# of the input data window.
inputDataWindow = r["in"]["dataWindow"].getValue()
inputFormat = r["in"]["format"].getValue()
inputAspect = (inputDataWindow.size().x) * inputFormat.getPixelAspect() / (inputDataWindow.size().y)
outputDataWindow = r["out"]["dataWindow"].getValue()
outputFormat = r["out"]["format"].getValue()
outputAspect = (outputDataWindow.size().x) * outputFormat.getPixelAspect() / (outputDataWindow.size().y)
# `delta` accounts for the fact that we're comparing integer data windows
# which have been expanded to enclose "fractional" pixels.
self.assertAlmostEqual( outputAspect, inputAspect, delta = 0.01 )
else :
# Distort mode - data window fills output format.
self.assertEqual( r["out"]["dataWindow"].getValue(), r["out"]["format"].getValue().getDisplayWindow() )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -3,489,131,475,276,243,000 | 36.770968 | 125 | 0.668631 | false |
Edraak/edraak-platform | openedx/core/djangoapps/theming/storage.py | 11 | 13777 | """
Comprehensive Theming support for Django's collectstatic functionality.
See https://docs.djangoproject.com/en/1.8/ref/contrib/staticfiles/
"""
import os.path
import posixpath
import re
from django.conf import settings
from django.contrib.staticfiles.finders import find
from django.contrib.staticfiles.storage import CachedFilesMixin, StaticFilesStorage
from django.utils._os import safe_join
from django.utils.six.moves.urllib.parse import ( # pylint: disable=no-name-in-module, import-error
unquote,
urldefrag,
urlsplit
)
from pipeline.storage import PipelineMixin
from openedx.core.djangoapps.theming.helpers import (
get_current_theme,
get_project_root_name,
get_theme_base_dir,
get_themes,
is_comprehensive_theming_enabled
)
class ThemeStorage(StaticFilesStorage):
"""
Comprehensive theme aware Static files storage.
"""
# prefix for file path, this prefix is added at the beginning of file path before saving static files during
# collectstatic command.
# e.g. having "edx.org" as prefix will cause files to be saved as "edx.org/images/logo.png"
# instead of "images/logo.png"
prefix = None
def __init__(self, location=None, base_url=None, file_permissions_mode=None,
directory_permissions_mode=None, prefix=None):
self.prefix = prefix
super(ThemeStorage, self).__init__(
location=location,
base_url=base_url,
file_permissions_mode=file_permissions_mode,
directory_permissions_mode=directory_permissions_mode,
)
def url(self, name):
"""
Returns url of the asset, themed url will be returned if the asset is themed otherwise default
asset url will be returned.
Args:
name: name of the asset, e.g. 'images/logo.png'
Returns:
url of the asset, e.g. '/static/red-theme/images/logo.png' if current theme is red-theme and logo
is provided by red-theme otherwise '/static/images/logo.png'
"""
prefix = ''
theme = get_current_theme()
# get theme prefix from site address if if asset is accessed via a url
if theme:
prefix = theme.theme_dir_name
# get theme prefix from storage class, if asset is accessed during collectstatic run
elif self.prefix:
prefix = self.prefix
# join theme prefix with asset name if theme is applied and themed asset exists
if prefix and self.themed(name, prefix):
name = os.path.join(prefix, name)
return super(ThemeStorage, self).url(name)
def themed(self, name, theme):
"""
Returns True if given asset override is provided by the given theme otherwise returns False.
Args:
name: asset name e.g. 'images/logo.png'
theme: theme name e.g. 'red-theme', 'edx.org'
Returns:
True if given asset override is provided by the given theme otherwise returns False
"""
if not is_comprehensive_theming_enabled():
return False
# in debug mode check static asset from within the project directory
if settings.DEBUG:
themes_location = get_theme_base_dir(theme, suppress_error=True)
# Nothing can be themed if we don't have a theme location or required params.
if not all((themes_location, theme, name)):
return False
themed_path = "/".join([
themes_location,
theme,
get_project_root_name(),
"static/"
])
name = name[1:] if name.startswith("/") else name
path = safe_join(themed_path, name)
return os.path.exists(path)
# in live mode check static asset in the static files dir defined by "STATIC_ROOT" setting
else:
return self.exists(os.path.join(theme, name))
class ThemeCachedFilesMixin(CachedFilesMixin):
"""
Comprehensive theme aware CachedFilesMixin.
Main purpose of subclassing CachedFilesMixin is to override the following methods.
1 - _url
2 - url_converter
_url:
This method takes asset name as argument and is responsible for adding hash to the name to support caching.
This method is called during both collectstatic command and live server run.
When called during collectstatic command that name argument will be asset name inside STATIC_ROOT,
for non themed assets it will be the usual path (e.g. 'images/logo.png') but for themed asset it will
also contain themes dir prefix (e.g. 'red-theme/images/logo.png'). So, here we check whether the themed asset
exists or not, if it exists we pass the same name up in the MRO chain for further processing and if it does not
exists we strip theme name and pass the new asset name to the MRO chain for further processing.
When called during server run, we get the theme dir for the current site using `get_current_theme` and
make sure to prefix theme dir to the asset name. This is done to ensure the usage of correct hash in file name.
e.g. if our red-theme overrides 'images/logo.png' and we do not prefix theme dir to the asset name, the hash for
'{platform-dir}/lms/static/images/logo.png' would be used instead of
'{themes_base_dir}/red-theme/images/logo.png'
url_converter:
This function returns another function that is responsible for hashing urls that appear inside assets
(e.g. url("images/logo.png") inside css). The method defined in the superclass adds a hash to file and returns
relative url of the file.
e.g. for url("../images/logo.png") it would return url("../images/logo.790c9a5340cb.png"). However we would
want it to return absolute url (e.g. url("/static/images/logo.790c9a5340cb.png")) so that it works properly
with themes.
The overridden method here simply comments out the line that convert absolute url to relative url,
hence absolute urls are used instead of relative urls.
"""
def _processed_asset_name(self, name):
"""
Returns either a themed or unthemed version of the given asset name,
depending on several factors.
See the class docstring for more info.
"""
theme = get_current_theme()
if theme and theme.theme_dir_name not in name:
# during server run, append theme name to the asset name if it is not already there
# this is ensure that correct hash is created and default asset is not always
# used to create hash of themed assets.
name = os.path.join(theme.theme_dir_name, name)
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
asset_name = name
if not self.exists(clean_name):
# if themed asset does not exists then use default asset
theme = name.split("/", 1)[0]
# verify that themed asset was accessed
if theme in [theme.theme_dir_name for theme in get_themes()]:
asset_name = "/".join(name.split("/")[1:])
return asset_name
def _url(self, hashed_name_func, name, force=False, hashed_files=None):
"""
This override method swaps out `name` with a processed version.
See the class docstring for more info.
"""
processed_asset_name = self._processed_asset_name(name)
return super(ThemeCachedFilesMixin, self)._url(hashed_name_func, processed_asset_name, force, hashed_files)
def url_converter(self, name, hashed_files, template=None):
"""
This is an override of url_converter from CachedFilesMixin.
It changes one line near the end of the method (see the NOTE) in order
to return absolute urls instead of relative urls. This behavior is
necessary for theme overrides, as we get 404 on assets with relative
urls on a themed site.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Convert the matched URL to a normalized and hashed URL.
This requires figuring out which files the matched URL resolves
to and calling the url() method of the storage.
"""
matched, url = matchobj.groups()
# Ignore absolute/protocol-relative and data-uri URLs.
if re.match(r'^[a-z]+:', url):
return matched
# Ignore absolute URLs that don't point to a static file (dynamic
# CSS / JS?). Note that STATIC_URL cannot be empty.
if url.startswith('/') and not url.startswith(settings.STATIC_URL):
return matched
# Strip off the fragment so a path-like fragment won't interfere.
url_path, fragment = urldefrag(url)
if url_path.startswith('/'):
# Otherwise the condition above would have returned prematurely.
assert url_path.startswith(settings.STATIC_URL)
target_name = url_path[len(settings.STATIC_URL):]
else:
# We're using the posixpath module to mix paths and URLs conveniently.
source_name = name if os.sep == '/' else name.replace(os.sep, '/')
target_name = posixpath.join(posixpath.dirname(source_name), url_path)
# Determine the hashed name of the target file with the storage backend.
hashed_url = self._url(
self._stored_name, unquote(target_name),
force=True, hashed_files=hashed_files,
)
# NOTE:
# The line below was commented out so that absolute urls are used instead of relative urls to make themed
# assets work correctly.
#
# The line is commented and not removed to make future django upgrade easier and show exactly what is
# changed in this method override
#
#transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:])
transformed_url = hashed_url # This line was added.
# Restore the fragment that was stripped off earlier.
if fragment:
transformed_url += ('?#' if '?#' in url else '#') + fragment
# Return the hashed version to the file
return template % unquote(transformed_url)
return converter
class ThemePipelineMixin(PipelineMixin):
"""
Mixin to make sure themed assets are also packaged and used along with non themed assets.
if a source asset for a particular package is not present then the default asset is used.
e.g. in the following package and for 'red-theme'
'style-vendor': {
'source_filenames': [
'js/vendor/afontgarde/afontgarde.css',
'css/vendor/font-awesome.css',
'css/vendor/jquery.qtip.min.css',
'css/vendor/responsive-carousel/responsive-carousel.css',
'css/vendor/responsive-carousel/responsive-carousel.slide.css',
],
'output_filename': 'css/lms-style-vendor.css'
}
'red-theme/css/vendor/responsive-carousel/responsive-carousel.css' will be used of it exists otherwise
'css/vendor/responsive-carousel/responsive-carousel.css' will be used to create 'red-theme/css/lms-style-vendor.css'
"""
packing = True
def post_process(self, paths, dry_run=False, **options):
"""
This post_process hook is used to package all themed assets.
"""
if dry_run:
return
themes = get_themes()
for theme in themes:
css_packages = self.get_themed_packages(theme.theme_dir_name, settings.PIPELINE_CSS)
from pipeline.packager import Packager
packager = Packager(storage=self, css_packages=css_packages)
for package_name in packager.packages['css']:
package = packager.package_for('css', package_name)
output_file = package.output_filename
if self.packing:
packager.pack_stylesheets(package)
paths[output_file] = (self, output_file)
yield output_file, output_file, True
super_class = super(ThemePipelineMixin, self)
if hasattr(super_class, 'post_process'):
for name, hashed_name, processed in super_class.post_process(paths.copy(), dry_run, **options):
yield name, hashed_name, processed
@staticmethod
def get_themed_packages(prefix, packages):
"""
Update paths with the themed assets,
Args:
prefix: theme prefix for which to update asset paths e.g. 'red-theme', 'edx.org' etc.
packages: packages to update
Returns: list of updated paths and a boolean indicating whether any path was path or not
"""
themed_packages = {}
for name in packages:
# collect source file names for the package
source_files = []
for path in packages[name].get('source_filenames', []):
# if themed asset exists use that, otherwise use default asset.
if find(os.path.join(prefix, path)):
source_files.append(os.path.join(prefix, path))
else:
source_files.append(path)
themed_packages[name] = {
'output_filename': os.path.join(prefix, packages[name].get('output_filename', '')),
'source_filenames': source_files,
}
return themed_packages
| agpl-3.0 | -9,073,491,955,328,966,000 | 42.053125 | 120 | 0.628221 | false |
ijstokes/sql-analysis-with-django-orm | datasnoop/employees/admin_filter.py | 1 | 1857 | from django.contrib import admin
# Register your models here.
from datasnoop.employees.models import Departments, DeptEmp, DeptManager, Employees, Salaries, Titles
#for cls in (Departments, DeptEmp, DeptManager, Employees, Salaries, Titles):
# admin.site.register(cls)
class DepartmentsAdmin(admin.ModelAdmin):
list_display = ('dept_name', 'dept_no')
search_fields = ['dept_name']
admin.site.register(Departments, DepartmentsAdmin)
class DeptEmpAdmin(admin.ModelAdmin):
list_display = ('emp_no', 'dept_no', 'from_date', 'to_date')
list_filter = ('dept_no', 'from_date', 'to_date')
search_fields = ['emp_no__last_name', 'dept_no__dept_name']
date_hierarchy = 'from_date'
admin.site.register(DeptEmp, DeptEmpAdmin)
class DeptManagerAdmin(admin.ModelAdmin):
list_display = ('emp_no', 'dept_no', 'from_date', 'to_date')
list_filter = ('dept_no', 'from_date', 'to_date')
search_fields = ['emp_no__last_name', 'dept_no__dept_name']
date_hierarchy = 'from_date'
admin.site.register(DeptManager, DeptManagerAdmin)
class EmployeesAdmin(admin.ModelAdmin):
list_display = ('emp_no', 'last_name', 'first_name', 'gender', 'birth_date', 'hire_date')
list_filter = ('gender', 'birth_date', 'hire_date')
search_fields = ['emp_no__last_name']
date_hierarchy = 'birth_date'
admin.site.register(Employees, EmployeesAdmin)
class SalariesAdmin(admin.ModelAdmin):
list_display = ('emp_no', 'salary', 'from_date', 'to_date')
list_filter = ('from_date', 'to_date')
search_fields = ['emp_no__last_name']
date_hierarchy = 'from_date'
admin.site.register(Salaries, SalariesAdmin)
class TitlesAdmin(admin.ModelAdmin):
list_display = ('emp_no', 'title', 'from_date', 'to_date')
search_fields = ['emp_no__last_name']
date_hierarchy = 'from_date'
admin.site.register(Titles, TitlesAdmin)
| bsd-3-clause | 2,275,079,886,573,565,200 | 38.510638 | 101 | 0.686053 | false |
vauxoo-dev/stock-logistics-warehouse | stock_inventory_preparation_filter/models/stock_inventory.py | 10 | 5857 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api, _
class StockInventoryEmptyLines(models.Model):
_name = 'stock.inventory.line.empty'
product_code = fields.Char(
string='Product Code', size=64, required=True)
product_qty = fields.Float(
string='Quantity', required=True, default=1.0)
inventory_id = fields.Many2one(
comodel_name='stock.inventory', string='Inventory',
required=True, ondelete="cascade")
class StockInventoryFake(object):
def __init__(self, inventory, product=None, lot=None):
self.id = inventory.id
self.location_id = inventory.location_id
self.product_id = product
self.lot_id = lot
self.partner_id = inventory.partner_id
self.package_id = inventory.package_id
class StockInventory(models.Model):
_inherit = 'stock.inventory'
@api.model
def _get_available_filters(self):
"""This function will return the list of filters allowed according to
the options checked in 'Settings/Warehouse'.
:return: list of tuple
"""
res_filters = super(StockInventory, self)._get_available_filters()
res_filters.append(('categories', _('Selected Categories')))
res_filters.append(('products', _('Selected Products')))
for res_filter in res_filters:
if res_filter[0] == 'lot':
res_filters.append(('lots', _('Selected Lots')))
res_filters.append(('empty', _('Empty list')))
return res_filters
filter = fields.Selection(
selection=_get_available_filters, string='Selection Filter',
required=True)
categ_ids = fields.Many2many(
comodel_name='product.category', relation='rel_inventories_categories',
column1='inventory_id', column2='category_id', string='Categories')
product_ids = fields.Many2many(
comodel_name='product.product', relation='rel_inventories_products',
column1='inventory_id', column2='product_id', string='Products')
lot_ids = fields.Many2many(
comodel_name='stock.production.lot', relation='rel_inventories_lots',
column1='inventory_id', column2='lot_id', string='Lots')
empty_line_ids = fields.One2many(
comodel_name='stock.inventory.line.empty', inverse_name='inventory_id',
string='Capture Lines')
@api.model
def _get_inventory_lines(self, inventory):
vals = []
product_tmpl_obj = self.env['product.template']
product_obj = self.env['product.product']
if inventory.filter in ('categories', 'products'):
products = product_obj
if inventory.filter == 'categories':
product_tmpls = product_tmpl_obj.search(
[('categ_id', 'in', inventory.categ_ids.ids)])
products = product_obj.search(
[('product_tmpl_id', 'in', product_tmpls.ids)])
elif inventory.filter == 'products':
products = inventory.product_ids
for product in products:
fake_inventory = StockInventoryFake(inventory, product=product)
vals += super(StockInventory, self)._get_inventory_lines(
fake_inventory)
elif inventory.filter == 'lots':
for lot in inventory.lot_ids:
fake_inventory = StockInventoryFake(inventory, lot=lot)
vals += super(StockInventory, self)._get_inventory_lines(
fake_inventory)
elif inventory.filter == 'empty':
tmp_lines = {}
empty_line_obj = self.env['stock.inventory.line.empty']
for line in inventory.empty_line_ids:
if line.product_code in tmp_lines:
tmp_lines[line.product_code] += line.product_qty
else:
tmp_lines[line.product_code] = line.product_qty
inventory.empty_line_ids.unlink()
for product_code in tmp_lines.keys():
products = product_obj.search(
[('default_code', '=', product_code)])
if products:
product = products[0]
fake_inventory = StockInventoryFake(
inventory, product=product)
values = super(StockInventory, self)._get_inventory_lines(
fake_inventory)
if values:
values[0]['product_qty'] = tmp_lines[product_code]
else:
empty_line_obj.create(
{
'product_code': product_code,
'product_qty': tmp_lines[product_code],
'inventory_id': inventory.id,
})
vals += values
else:
vals = super(StockInventory, self)._get_inventory_lines(
inventory)
return vals
| agpl-3.0 | 4,326,832,313,444,453,400 | 43.037594 | 79 | 0.569746 | false |
kiddinn/plaso | tests/parsers/sqlite_plugins/twitter_android.py | 3 | 3470 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for Twitter on Android plugin."""
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import twitter_android
from tests.parsers.sqlite_plugins import test_lib
class TwitterAndroidTest(test_lib.SQLitePluginTestCase):
"""Tests for Twitter on Android database plugin."""
def testProcess(self):
"""Test the Process function on a Twitter Android file."""
plugin = twitter_android.TwitterAndroidPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['twitter_android.db'], plugin)
# We should have 850 events in total.
self.assertEqual(storage_writer.number_of_events, 850)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetSortedEvents())
# Test a status event.
expected_event_values = {
'author_identifier': 2730978846,
'content': (
'@CarolMovie wins BEST PICTURE at #NYFCC!!! CONGRATS #TeamCarol!!! '
'Love love! #carolfilm https://t.co/ycy9cHPLZ7'),
'data_type': 'twitter:android:status',
'date_time': '2015-12-02 17:47:17.000',
'favorited': 0,
'identifier': 4,
'retweeted': 0,
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION,
'username': 'CarolMovieFans'}
self.CheckEventValues(storage_writer, events[482], expected_event_values)
# Test a search event.
expected_event_values = {
'data_type': 'twitter:android:search',
'date_time': '2015-12-02 20:49:38.153',
'name': 'rosegold',
'search_query': 'rosegold',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION}
self.CheckEventValues(storage_writer, events[837], expected_event_values)
# Test a profile creation event.
expected_event_values = {
'data_type': 'twitter:android:contact',
'date_time': '2008-06-03 18:30:55.000',
'description': (
'Started in a San Francisco by bike messenger Rob Honeycutt, '
'Timbuk2 has been making tough as hell messenger bags, backpacks '
'and travel bags since 1989.'),
'followers': 23582,
'friends': 2725,
'identifier': 62,
'image_url': (
'https://pbs.twimg.com/profile_images/461846147129024512/'
'FOKZJ7hB_normal.jpeg'),
'location': 'San Francisco, CA',
'name': 'Timbuk2',
'statuses': 18937,
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION,
'user_identifier': 14995801,
'username': 'timbuk2',
'web_url': 'http://t.co/Z0MZo7f2ne'}
self.CheckEventValues(storage_writer, events[24], expected_event_values)
# Test a friended event.
expected_event_values = {
'data_type': 'twitter:android:contact',
'date_time': '2015-12-02 20:48:32.382',
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[581], expected_event_values)
# Test a profile update event.
expected_event_values = {
'data_type': 'twitter:android:contact',
'date_time': '2015-12-02 20:49:33.349',
'timestamp_desc': definitions.TIME_DESCRIPTION_UPDATE}
self.CheckEventValues(storage_writer, events[806], expected_event_values)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -1,690,875,949,172,419,800 | 34.773196 | 80 | 0.645533 | false |
duhzecca/cinder | cinder/tests/unit/api/contrib/test_types_extra_specs.py | 20 | 11529 | # Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
# Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import mock
import webob
from cinder.api.contrib import types_extra_specs
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
import cinder.wsgi
def return_create_volume_type_extra_specs(context, volume_type_id,
extra_specs):
return stub_volume_type_extra_specs()
def return_volume_type_extra_specs(context, volume_type_id):
return stub_volume_type_extra_specs()
def return_empty_volume_type_extra_specs(context, volume_type_id):
return {}
def delete_volume_type_extra_specs(context, volume_type_id, key):
pass
def delete_volume_type_extra_specs_not_found(context, volume_type_id, key):
raise exception.VolumeTypeExtraSpecsNotFound("Not Found")
def stub_volume_type_extra_specs():
specs = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return specs
def volume_type_get(context, id, inactive=False, expected_fields=None):
pass
class VolumeTypesExtraSpecsTest(test.TestCase):
def setUp(self):
super(VolumeTypesExtraSpecsTest, self).setUp()
self.flags(host='fake')
self.stubs.Set(cinder.db, 'volume_type_get', volume_type_get)
self.api_path = '/v2/fake/os-volume-types/1/extra_specs'
self.controller = types_extra_specs.VolumeTypeExtraSpecsController()
"""to reset notifier drivers left over from other api/contrib tests"""
def test_index(self):
self.stubs.Set(cinder.db, 'volume_type_extra_specs_get',
return_volume_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.index(req, 1)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_index_no_data(self):
self.stubs.Set(cinder.db, 'volume_type_extra_specs_get',
return_empty_volume_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.index(req, 1)
self.assertEqual(0, len(res_dict['extra_specs']))
def test_show(self):
self.stubs.Set(cinder.db, 'volume_type_extra_specs_get',
return_volume_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path + '/key5')
res_dict = self.controller.show(req, 1, 'key5')
self.assertEqual('value5', res_dict['key5'])
def test_show_spec_not_found(self):
self.stubs.Set(cinder.db, 'volume_type_extra_specs_get',
return_empty_volume_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path + '/key6')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1, 'key6')
def test_delete(self):
self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete',
delete_volume_type_extra_specs)
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path + '/key5')
self.controller.delete(req, 1, 'key5')
self.assertEqual(1, len(self.notifier.notifications))
def test_delete_not_found(self):
self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete',
delete_volume_type_extra_specs_not_found)
req = fakes.HTTPRequest.blank(self.api_path + '/key6')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1, 'key6')
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_string_length')
def test_create(self, mock_validate):
self.stubs.Set(cinder.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"extra_specs": {"key1": "value1"}}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.create(req, 1, body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
@mock.patch.object(cinder.db, 'volume_type_extra_specs_update_or_create')
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_string_length')
def test_create_key_allowed_chars(
self, mock_validate, volume_type_extra_specs_update_or_create):
mock_return_value = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
volume_type_extra_specs_update_or_create.\
return_value = mock_return_value
body = {"extra_specs": {"other_alphanum.-_:": "value1"}}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.create(req, 1, body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
self.assertEqual('value1',
res_dict['extra_specs']['other_alphanum.-_:'])
@mock.patch.object(cinder.db, 'volume_type_extra_specs_update_or_create')
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_string_length')
def test_create_too_many_keys_allowed_chars(
self, mock_validate, volume_type_extra_specs_update_or_create):
mock_return_value = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
volume_type_extra_specs_update_or_create.\
return_value = mock_return_value
body = {"extra_specs": {"other_alphanum.-_:": "value1",
"other2_alphanum.-_:": "value2",
"other3_alphanum.-_:": "value3"}}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.create(req, 1, body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
self.assertEqual('value1',
res_dict['extra_specs']['other_alphanum.-_:'])
self.assertEqual('value2',
res_dict['extra_specs']['other2_alphanum.-_:'])
self.assertEqual('value3',
res_dict['extra_specs']['other3_alphanum.-_:'])
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_string_length')
def test_update_item(self, mock_validate):
self.stubs.Set(cinder.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"key1": "value1"}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path + '/key1')
res_dict = self.controller.update(req, 1, 'key1', body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
self.assertEqual('value1', res_dict['key1'])
def test_update_item_too_many_keys(self):
self.stubs.Set(cinder.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"key1": "value1", "key2": "value2"}
req = fakes.HTTPRequest.blank(self.api_path + '/key1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'key1', body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(cinder.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"key1": "value1"}
req = fakes.HTTPRequest.blank(self.api_path + '/bad')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'bad', body)
def _extra_specs_empty_update(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '1', body)
def test_update_no_body(self):
self._extra_specs_empty_update(body=None)
def test_update_empty_body(self):
self._extra_specs_empty_update(body={})
def _extra_specs_create_bad_body(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, '1', body)
def test_create_no_body(self):
self._extra_specs_create_bad_body(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._extra_specs_create_bad_body(body=body)
def test_create_malformed_entity(self):
body = {'extra_specs': 'string'}
self._extra_specs_create_bad_body(body=body)
def test_create_invalid_key(self):
body = {"extra_specs": {"ke/y1": "value1"}}
self._extra_specs_create_bad_body(body=body)
def test_create_invalid_too_many_key(self):
body = {"key1": "value1", "ke/y2": "value2", "key3": "value3"}
self._extra_specs_create_bad_body(body=body)
class VolumeTypeExtraSpecsSerializerTest(test.TestCase):
def test_index_create_serializer(self):
serializer = types_extra_specs.VolumeTypeExtraSpecsTemplate()
# Just getting some input data
extra_specs = stub_volume_type_extra_specs()
text = serializer.serialize(dict(extra_specs=extra_specs))
tree = etree.fromstring(text)
self.assertEqual('extra_specs', tree.tag)
self.assertEqual(len(extra_specs), len(tree))
seen = set(extra_specs.keys())
for child in tree:
self.assertIn(child.tag, seen)
self.assertEqual(extra_specs[child.tag], child.text)
seen.remove(child.tag)
self.assertEqual(0, len(seen))
def test_update_show_serializer(self):
serializer = types_extra_specs.VolumeTypeExtraSpecTemplate()
exemplar = dict(key1='value1')
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('key1', tree.tag)
self.assertEqual('value1', tree.text)
self.assertEqual(0, len(tree))
| apache-2.0 | -7,949,947,533,359,578,000 | 37.43 | 78 | 0.610634 | false |
chatelak/RMG-Py | rmgpy/data/thermo.py | 2 | 61952 | #!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green ([email protected]) and the
# RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
"""
import os.path
import re
import math
import logging
import numpy
from copy import deepcopy
from base import Database, Entry, makeLogicNode, DatabaseError
import rmgpy.constants as constants
from rmgpy.thermo import NASAPolynomial, NASA, ThermoData, Wilhoit
from rmgpy.molecule import Molecule, Atom, Bond, Group
import rmgpy.molecule
from rmgpy.species import Species
#: This dictionary is used to add multiplicity to species label
_multiplicity_labels = {1:'S',2:'D',3:'T',4:'Q',5:'V',}
################################################################################
def saveEntry(f, entry):
"""
Write a Pythonic string representation of the given `entry` in the thermo
database to the file object `f`.
"""
f.write('entry(\n')
f.write(' index = {0:d},\n'.format(entry.index))
f.write(' label = "{0}",\n'.format(entry.label))
if isinstance(entry.item, Molecule):
f.write(' molecule = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList(removeH=False))
f.write('""",\n')
elif isinstance(entry.item, Group):
f.write(' group = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList())
f.write('""",\n')
else:
f.write(' group = "{0}",\n'.format(entry.item))
if isinstance(entry.data, ThermoData):
f.write(' thermo = ThermoData(\n')
f.write(' Tdata = {0!r},\n'.format(entry.data.Tdata))
f.write(' Cpdata = {0!r},\n'.format(entry.data.Cpdata))
f.write(' H298 = {0!r},\n'.format(entry.data.H298))
f.write(' S298 = {0!r},\n'.format(entry.data.S298))
if entry.data.Tmin is not None: f.write(' Tmin = {0!r},\n'.format(entry.data.Tmin))
if entry.data.Tmax is not None: f.write(' Tmax = {0!r},\n'.format(entry.data.Tmax))
f.write(' ),\n')
elif isinstance(entry.data, Wilhoit):
f.write(' thermo = Wilhoit(\n')
f.write(' cp0 = {0!r},\n'.format(entry.data.cp0))
f.write(' cpInf = {0!r},\n'.format(entry.data.cpInf))
f.write(' a0 = {0:g},\n'.format(entry.data.a0))
f.write(' a1 = {0:g},\n'.format(entry.data.a1))
f.write(' a2 = {0:g},\n'.format(entry.data.a2))
f.write(' a3 = {0:g},\n'.format(entry.data.a3))
f.write(' B = {0!r},\n'.format(entry.data.B))
f.write(' H0 = {0!r},\n'.format(entry.data.H0))
f.write(' S0 = {0!r},\n'.format(entry.data.S0))
if entry.data.Tmin is not None: f.write(' Tmin = {0!r},\n'.format(entry.data.Tmin))
if entry.data.Tmax is not None: f.write(' Tmax = {0!r},\n'.format(entry.data.Tmax))
f.write(' ),\n')
elif isinstance(entry.data, NASA):
f.write(' thermo = NASA(\n')
f.write(' polynomials = [\n')
for poly in entry.data.polynomials:
f.write(' {0!r},\n'.format(poly))
f.write(' ],\n')
if entry.data.Tmin is not None: f.write(' Tmin = {0!r},\n'.format(entry.data.Tmin))
if entry.data.Tmax is not None: f.write(' Tmax = {0!r},\n'.format(entry.data.Tmax))
f.write(' ),\n')
else:
f.write(' thermo = {0!r},\n'.format(entry.data))
if entry.reference is not None: f.write(' reference = {0!r},\n'.format(entry.reference))
if entry.referenceType != "": f.write(' referenceType = "{0}",\n'.format(entry.referenceType))
f.write(' shortDesc = u"""')
try:
f.write(entry.shortDesc.encode('utf-8'))
except (UnicodeEncodeError, UnicodeDecodeError):
f.write(entry.shortDesc.strip().encode('ascii', 'replace'))
f.write('""",\n')
f.write(' longDesc = \n')
f.write('u"""\n')
try:
f.write(entry.longDesc.strip().encode('utf-8') + "\n")
except (UnicodeEncodeError, UnicodeDecodeError):
f.write(entry.longDesc.strip().encode('ascii', 'replace') + "\n")
f.write('""",\n')
f.write(')\n\n')
def generateOldLibraryEntry(data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
if isinstance(data, ThermoData):
return '{0:9g} {1:9g} {2:9g} {3:9g} {4:9g} {5:9g} {6:9g} {7:9g} {8:9g} {9:9g} {10:9g} {11:9g}'.format(
data.H298.value_si/4184.,
data.S298.value_si/4.184,
data.Cpdata.value_si[0]/4.184,
data.Cpdata.value_si[1]/4.184,
data.Cpdata.value_si[2]/4.184,
data.Cpdata.value_si[3]/4.184,
data.Cpdata.value_si[4]/4.184,
data.Cpdata.value_si[5]/4.184,
data.Cpdata.value_si[6]/4.184,
data.H298.uncertainty/4184.,
data.S298.uncertainty/4.184,
max(data.Cpdata.uncertainty)/4.184,
)
elif isinstance(data, basestring):
return data
else:
return '{0:9g} {1:9g} {2:9g} {3:9g} {4:9g} {5:9g} {6:9g} {7:9g} {8:9g} {9:9g} {10:9g} {11:9g}'.format(
data.getEnthalpy(298)/4184.,
data.getEntropy(298)/4.184,
data.getHeatCapacity(300)/4.184,
data.getHeatCapacity(400)/4.184,
data.getHeatCapacity(500)/4.184,
data.getHeatCapacity(600)/4.184,
data.getHeatCapacity(800)/4.184,
data.getHeatCapacity(1000)/4.184,
data.getHeatCapacity(1500)/4.184,
0,
0,
0,
)
def processOldLibraryEntry(data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
return ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],"K"),
Cpdata = ([float(d) for d in data[2:9]],"cal/(mol*K)","+|-",float(data[11])),
H298 = (float(data[0]),"kcal/mol","+|-",float(data[9])),
S298 = (float(data[1]),"cal/(mol*K)","+|-",float(data[10])),
)
def addThermoData(thermoData1, thermoData2, groupAdditivity=False):
"""
Add the thermodynamic data `thermoData2` to the data `thermoData1`,
and return `thermoData1`.
If `groupAdditivity` is True, append comments related to group additivity estimation
"""
if len(thermoData1.Tdata.value_si) != len(thermoData2.Tdata.value_si) or any([T1 != T2 for T1, T2 in zip(thermoData1.Tdata.value_si, thermoData2.Tdata.value_si)]):
raise Exception('Cannot add these ThermoData objects due to their having different temperature points.')
for i in range(thermoData1.Tdata.value_si.shape[0]):
thermoData1.Cpdata.value_si[i] += thermoData2.Cpdata.value_si[i]
thermoData1.H298.value_si += thermoData2.H298.value_si
thermoData1.S298.value_si += thermoData2.S298.value_si
if groupAdditivity:
if thermoData1.comment:
thermoData1.comment += ' + {0}'.format(thermoData2.comment)
else:
thermoData1.comment = 'Thermo group additivity estimation: ' + thermoData2.comment
return thermoData1
def removeThermoData(thermoData1, thermoData2):
"""
Remove the thermodynamic data `thermoData2` from the data `thermoData1`,
and return `thermoData1`.
"""
if len(thermoData1.Tdata.value_si) != len(thermoData2.Tdata.value_si) or any([T1 != T2 for T1, T2 in zip(thermoData1.Tdata.value_si, thermoData2.Tdata.value_si)]):
raise Exception('Cannot take the difference between these ThermoData objects due to their having different temperature points.')
for i in range(thermoData1.Tdata.value_si.shape[0]):
thermoData1.Cpdata.value_si[i] -= thermoData2.Cpdata.value_si[i]
thermoData1.H298.value_si -= thermoData2.H298.value_si
thermoData1.S298.value_si -= thermoData2.S298.value_si
return thermoData1
def averageThermoData(thermoDataList=[]):
"""
Average a list of thermoData values together.
Sets uncertainty values to be the approximately the 95% confidence interval, equivalent to
2 standard deviations calculated using the sample standard variance:
Uncertainty = 2s
s = sqrt( sum(abs(x - x.mean())^2) / N - 1) where N is the number of values averaged
Note that uncertainties are only computed when number of values is greater than 1.
"""
import copy
numValues = len(thermoDataList)
if numValues == 0:
raise Exception('No thermo data values were inputted to be averaged.')
else:
print 'Averaging thermo data over {0} value(s).'.format(numValues)
if numValues == 1:
return copy.deepcopy(thermoDataList[0])
else:
averagedThermoData = copy.deepcopy(thermoDataList[0])
for thermoData in thermoDataList[1:]:
averagedThermoData = addThermoData(averagedThermoData, thermoData)
for i in range(averagedThermoData.Tdata.value_si.shape[0]):
averagedThermoData.Cpdata.value_si[i] /= numValues
cpData = [thermoData.Cpdata.value_si[i] for thermoData in thermoDataList]
averagedThermoData.Cpdata.uncertainty[i] = 2*numpy.std(cpData, ddof=1)
HData = [thermoData.H298.value_si for thermoData in thermoDataList]
averagedThermoData.H298.value_si /= numValues
averagedThermoData.H298.uncertainty_si = 2*numpy.std(HData, ddof=1)
SData = [thermoData.S298.value_si for thermoData in thermoDataList]
averagedThermoData.S298.value_si /= numValues
averagedThermoData.S298.uncertainty_si = 2*numpy.std(SData, ddof=1)
return averagedThermoData
################################################################################
class ThermoDepository(Database):
"""
A class for working with the RMG thermodynamics depository.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self, index, label, molecule, thermo, reference=None, referenceType='', shortDesc='', longDesc='', rank=None):
entry = Entry(
index = index,
label = label,
item = Molecule().fromAdjacencyList(molecule),
data = thermo,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
rank = rank,
)
self.entries[label] = entry
return entry
def saveEntry(self, f, entry):
"""
Write the given `entry` in the thermo database to the file object `f`.
"""
return saveEntry(f, entry)
################################################################################
class ThermoLibrary(Database):
"""
A class for working with a RMG thermodynamics library.
"""
def __init__(self, label='', name='',solvent=None, shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self,
index,
label,
molecule,
thermo,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
rank=None,
):
molecule = Molecule().fromAdjacencyList(molecule)
# Internal checks for adding entry to the thermo library
if label in self.entries.keys():
raise DatabaseError('Found a duplicate molecule with label {0} in the thermo library. Please correct your library.'.format(label))
for entry in self.entries.values():
if molecule.isIsomorphic(entry.item):
if molecule.multiplicity == entry.item.multiplicity:
raise DatabaseError('Adjacency list and multiplicity of {0} matches that of existing molecule {1} in thermo library. Please correct your library.'.format(label, entry.label))
self.entries[label] = Entry(
index = index,
label = label,
item = molecule,
data = thermo,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
rank = rank,
)
def saveEntry(self, f, entry):
"""
Write the given `entry` in the thermo database to the file object `f`.
"""
return saveEntry(f, entry)
def generateOldLibraryEntry(self, data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
return generateOldLibraryEntry(data)
def processOldLibraryEntry(self, data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
return processOldLibraryEntry(data)
################################################################################
class ThermoGroups(Database):
"""
A class for working with an RMG thermodynamics group additivity database.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self,
index,
label,
group,
thermo,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
rank=3,
):
"""
Default rank for thermo groups is 3.
"""
if group[0:3].upper() == 'OR{' or group[0:4].upper() == 'AND{' or group[0:7].upper() == 'NOT OR{' or group[0:8].upper() == 'NOT AND{':
item = makeLogicNode(group)
else:
item = Group().fromAdjacencyList(group)
self.entries[label] = Entry(
index = index,
label = label,
item = item,
data = thermo,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
rank = rank,
)
def saveEntry(self, f, entry):
"""
Write the given `entry` in the thermo database to the file object `f`.
"""
return saveEntry(f, entry)
def generateOldLibraryEntry(self, data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
return generateOldLibraryEntry(data)
def processOldLibraryEntry(self, data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
return processOldLibraryEntry(data)
################################################################################
class ThermoDatabase(object):
"""
A class for working with the RMG thermodynamics database.
"""
def __init__(self):
self.depository = {}
self.libraries = {}
self.groups = {}
self.libraryOrder = []
self.local_context = {
'ThermoData': ThermoData,
'Wilhoit': Wilhoit,
'NASAPolynomial': NASAPolynomial,
'NASA': NASA,
}
self.global_context = {}
def __reduce__(self):
"""
A helper function used when pickling a ThermoDatabase object.
"""
d = {
'depository': self.depository,
'libraries': self.libraries,
'groups': self.groups,
'libraryOrder': self.libraryOrder,
}
return (ThermoDatabase, (), d)
def __setstate__(self, d):
"""
A helper function used when unpickling a ThermoDatabase object.
"""
self.depository = d['depository']
self.libraries = d['libraries']
self.groups = d['groups']
self.libraryOrder = d['libraryOrder']
def load(self, path, libraries=None, depository=True):
"""
Load the thermo database from the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
if depository:
self.loadDepository(os.path.join(path, 'depository'))
else:
self.depository = {}
self.loadLibraries(os.path.join(path, 'libraries'), libraries)
self.loadGroups(os.path.join(path, 'groups'))
def loadDepository(self, path):
"""
Load the thermo database from the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
self.depository = {}
self.depository['stable'] = ThermoDepository().load(os.path.join(path, 'stable.py'), self.local_context, self.global_context)
self.depository['radical'] = ThermoDepository().load(os.path.join(path, 'radical.py'), self.local_context, self.global_context)
def loadLibraries(self, path, libraries=None):
"""
Load the thermo database from the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
self.libraries = {}; self.libraryOrder = []
for (root, dirs, files) in os.walk(os.path.join(path)):
for f in files:
name, ext = os.path.splitext(f)
if ext.lower() == '.py' and (libraries is None or name in libraries):
logging.info('Loading thermodynamics library from {0} in {1}...'.format(f, root))
library = ThermoLibrary()
library.load(os.path.join(root, f), self.local_context, self.global_context)
library.label = os.path.splitext(f)[0]
self.libraries[library.label] = library
self.libraryOrder.append(library.label)
if libraries is not None:
self.libraryOrder = libraries
def loadGroups(self, path):
"""
Load the thermo database from the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
logging.info('Loading thermodynamics group database from {0}...'.format(path))
self.groups = {}
self.groups['group'] = ThermoGroups(label='group').load(os.path.join(path, 'group.py' ), self.local_context, self.global_context)
self.groups['gauche'] = ThermoGroups(label='gauche').load(os.path.join(path, 'gauche.py' ), self.local_context, self.global_context)
self.groups['int15'] = ThermoGroups(label='int15').load(os.path.join(path, 'int15.py' ), self.local_context, self.global_context)
self.groups['ring'] = ThermoGroups(label='ring').load(os.path.join(path, 'ring.py' ), self.local_context, self.global_context)
self.groups['radical'] = ThermoGroups(label='radical').load(os.path.join(path, 'radical.py'), self.local_context, self.global_context)
self.groups['polycyclic'] = ThermoGroups(label='polycyclic').load(os.path.join(path, 'polycyclic.py'), self.local_context, self.global_context)
self.groups['other'] = ThermoGroups(label='other').load(os.path.join(path, 'other.py' ), self.local_context, self.global_context)
def save(self, path):
"""
Save the thermo database to the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
path = os.path.abspath(path)
if not os.path.exists(path): os.mkdir(path)
self.saveDepository(os.path.join(path, 'depository'))
self.saveLibraries(os.path.join(path, 'libraries'))
self.saveGroups(os.path.join(path, 'groups'))
def saveDepository(self, path):
"""
Save the thermo depository to the given `path` on disk, where `path`
points to the top-level folder of the thermo depository.
"""
if not os.path.exists(path): os.mkdir(path)
for depo in self.depository.keys():
self.depository[depo].save(os.path.join(path, depo+'.py'))
def saveLibraries(self, path):
"""
Save the thermo libraries to the given `path` on disk, where `path`
points to the top-level folder of the thermo libraries.
"""
if not os.path.exists(path): os.mkdir(path)
for library in self.libraries.values():
library.save(os.path.join(path, '{0}.py'.format(library.label)))
def saveGroups(self, path):
"""
Save the thermo groups to the given `path` on disk, where `path`
points to the top-level folder of the thermo groups.
"""
if not os.path.exists(path): os.mkdir(path)
for group in self.groups.keys():
self.groups[group].save(os.path.join(path, group+'.py'))
def loadOld(self, path):
"""
Load the old RMG thermo database from the given `path` on disk, where
`path` points to the top-level folder of the old RMG database.
"""
# The old database does not have a depository, so create an empty one
self.depository = {}
self.depository['stable'] = ThermoDepository(label='stable', name='Stable Molecules')
self.depository['radical'] = ThermoDepository(label='radical', name='Radical Molecules')
for (root, dirs, files) in os.walk(os.path.join(path, 'thermo_libraries')):
if os.path.exists(os.path.join(root, 'Dictionary.txt')) and os.path.exists(os.path.join(root, 'Library.txt')):
library = ThermoLibrary(label=os.path.basename(root), name=os.path.basename(root))
library.loadOld(
dictstr = os.path.join(root, 'Dictionary.txt'),
treestr = '',
libstr = os.path.join(root, 'Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = False,
)
library.label = os.path.basename(root)
self.libraries[library.label] = library
self.groups = {}
self.groups['group'] = ThermoGroups(label='group', name='Functional Group Additivity Values').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Group_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Group_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Group_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
self.groups['gauche'] = ThermoGroups(label='gauche', name='Gauche Interaction Corrections').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Gauche_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Gauche_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Gauche_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
self.groups['int15'] = ThermoGroups(label='int15', name='1,5-Interaction Corrections').loadOld(
dictstr = os.path.join(path, 'thermo_groups', '15_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', '15_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', '15_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
self.groups['radical'] = ThermoGroups(label='radical', name='Radical Corrections').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Radical_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Radical_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Radical_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
self.groups['ring'] = ThermoGroups(label='ring', name='Ring Corrections').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Ring_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Ring_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Ring_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
self.groups['polycyclic'] = ThermoGroups(label='other', name='Polycyclic Ring Corrections').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Polycyclic_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Polycyclic_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Polycyclic_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
self.groups['other'] = ThermoGroups(label='other', name='Other Corrections').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Other_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Other_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Other_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
def pruneHeteroatoms(self, allowed=['C','H','O','S']):
"""
Remove all species from thermo libraries that contain atoms other than those allowed.
This is useful before saving the database for use in RMG-Java
"""
allowedElements = [rmgpy.molecule.element.getElement(label) for label in allowed]
for library in self.libraries.values():
logging.info("Removing hetoroatoms from thermo library '{0}'".format(library.name))
toDelete = []
for entry in library.entries.values():
for atom in entry.item.atoms:
if atom.element not in allowedElements:
toDelete.append(entry.label)
break
for label in toDelete:
logging.info(" {0}".format(label))
library.entries.pop(label)
def saveOld(self, path):
"""
Save the old RMG thermo database to the given `path` on disk, where
`path` points to the top-level folder of the old RMG database.
"""
# Depository not used in old database, so it is not saved
librariesPath = os.path.join(path, 'thermo_libraries')
if not os.path.exists(librariesPath): os.mkdir(librariesPath)
for library in self.libraries.values():
libraryPath = os.path.join(librariesPath, library.label)
if not os.path.exists(libraryPath): os.mkdir(libraryPath)
library.saveOld(
dictstr = os.path.join(libraryPath, 'Dictionary.txt'),
treestr = '',
libstr = os.path.join(libraryPath, 'Library.txt'),
)
groupsPath = os.path.join(path, 'thermo_groups')
if not os.path.exists(groupsPath): os.mkdir(groupsPath)
self.groups['group'].saveOld(
dictstr = os.path.join(groupsPath, 'Group_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Group_Tree.txt'),
libstr = os.path.join(groupsPath, 'Group_Library.txt'),
)
self.groups['gauche'].saveOld(
dictstr = os.path.join(groupsPath, 'Gauche_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Gauche_Tree.txt'),
libstr = os.path.join(groupsPath, 'Gauche_Library.txt'),
)
self.groups['int15'].saveOld(
dictstr = os.path.join(groupsPath, '15_Dictionary.txt'),
treestr = os.path.join(groupsPath, '15_Tree.txt'),
libstr = os.path.join(groupsPath, '15_Library.txt'),
)
self.groups['radical'].saveOld(
dictstr = os.path.join(groupsPath, 'Radical_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Radical_Tree.txt'),
libstr = os.path.join(groupsPath, 'Radical_Library.txt'),
)
self.groups['ring'].saveOld(
dictstr = os.path.join(groupsPath, 'Ring_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Ring_Tree.txt'),
libstr = os.path.join(groupsPath, 'Ring_Library.txt'),
)
self.groups['polycyclic'].saveOld(
dictstr = os.path.join(groupsPath, 'Polycyclic_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Polycyclic_Tree.txt'),
libstr = os.path.join(groupsPath, 'Polycyclic_Library.txt'),
)
self.groups['other'].saveOld(
dictstr = os.path.join(groupsPath, 'Other_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Other_Tree.txt'),
libstr = os.path.join(groupsPath, 'Other_Library.txt'),
)
def getThermoData(self, species, trainingSet=None, quantumMechanics=None):
"""
Return the thermodynamic parameters for a given :class:`Species`
object `species`. This function first searches the loaded libraries
in order, returning the first match found, before falling back to
estimation via group additivity.
Returns: ThermoData
"""
thermo0 = None
thermo0 = self.getThermoDataFromLibraries(species)
if thermo0 is not None:
logging.info("Found thermo for {0} in {1}".format(species.label,thermo0[0].comment.lower()))
assert len(thermo0) == 3, "thermo0 should be a tuple at this point: (thermoData, library, entry)"
thermo0 = thermo0[0]
elif quantumMechanics:
original_molecule = species.molecule[0]
if quantumMechanics.settings.onlyCyclics and not original_molecule.isCyclic():
pass
else: # try a QM calculation
if original_molecule.getRadicalCount() > quantumMechanics.settings.maxRadicalNumber:
# Too many radicals for direct calculation: use HBI.
logging.info("{0} radicals on {1} exceeds limit of {2}. Using HBI method.".format(
original_molecule.getRadicalCount(),
species.label,
quantumMechanics.settings.maxRadicalNumber,
))
# Need to estimate thermo via each resonance isomer
thermo = []
for molecule in species.molecule:
molecule.clearLabeledAtoms()
# Try to see if the saturated molecule can be found in the libraries
tdata = self.estimateRadicalThermoViaHBI(molecule, self.getThermoDataFromLibraries)
priority = 1
if tdata is None:
# Then attempt quantum mechanics job on the saturated molecule
tdata = self.estimateRadicalThermoViaHBI(molecule, quantumMechanics.getThermoData)
priority = 2
if tdata is None:
# Fall back to group additivity
tdata = self.estimateThermoViaGroupAdditivity(molecule)
priority = 3
thermo.append((priority, tdata.getEnthalpy(298.), molecule, tdata))
if len(thermo) > 1:
# Sort thermo first by the priority, then by the most stable H298 value
thermo = sorted(thermo, key=lambda x: (x[0], x[1]))
for i in range(len(thermo)):
logging.info("Resonance isomer {0} {1} gives H298={2:.0f} J/mol".format(i+1, thermo[i][2].toSMILES(), thermo[i][1]))
# Save resonance isomers reordered by their thermo
species.molecule = [item[2] for item in thermo]
original_molecule = species.molecule[0]
thermo0 = thermo[0][3]
# If priority == 2
if thermo[0][0] == 2:
# Write the QM molecule thermo to a library so that can be used in future RMG jobs. (Do this only if it came from a QM calculation)
quantumMechanics.database.loadEntry(index = len(quantumMechanics.database.entries) + 1,
label = original_molecule.toSMILES() + '_({0})'.format(_multiplicity_labels[original_molecule.multiplicity]),
molecule = original_molecule.toAdjacencyList(),
thermo = thermo0,
shortDesc = thermo0.comment
)
# # For writing thermodata HBI check for QM molecules
# with open('thermoHBIcheck.txt','a') as f:
# f.write('// {0!r}\n'.format(thermo0).replace('),','),\n// '))
# f.write('{0}\n'.format(original_molecule.toSMILES()))
# f.write('{0}\n\n'.format(original_molecule.toAdjacencyList(removeH=False)))
else: # Not too many radicals: do a direct calculation.
thermo0 = quantumMechanics.getThermoData(original_molecule) # returns None if it fails
if thermo0 is not None:
# Write the QM molecule thermo to a library so that can be used in future RMG jobs.
quantumMechanics.database.loadEntry(index = len(quantumMechanics.database.entries) + 1,
label = original_molecule.toSMILES() + '_({0})'.format(_multiplicity_labels[original_molecule.multiplicity]),
molecule = original_molecule.toAdjacencyList(),
thermo = thermo0,
shortDesc = thermo0.comment
)
if thermo0 is None:
# Use group additivity methods to determine thermo for molecule (or if QM fails completely)
original_molecule = species.molecule[0]
if original_molecule.getRadicalCount() > 0:
# Molecule is a radical, use the HBI method
thermo = []
for molecule in species.molecule:
molecule.clearLabeledAtoms()
# First see if the saturated molecule is in the libaries
tdata = self.estimateRadicalThermoViaHBI(molecule, self.getThermoDataFromLibraries)
priority = 1
if tdata is None:
# Otherwise use normal group additivity to obtain the thermo for the molecule
tdata = self.estimateThermoViaGroupAdditivity(molecule)
priority = 2
thermo.append((priority, tdata.getEnthalpy(298.), molecule, tdata))
if len(thermo) > 1:
# Sort thermo first by the priority, then by the most stable H298 value
thermo = sorted(thermo, key=lambda x: (x[0], x[1]))
for i in range(len(thermo)):
logging.info("Resonance isomer {0} {1} gives H298={2:.0f} J/mol".format(i+1, thermo[i][2].toSMILES(), thermo[i][1]))
# Save resonance isomers reordered by their thermo
species.molecule = [item[2] for item in thermo]
thermo0 = thermo[0][3]
else:
# Saturated molecule, does not need HBI method
thermo0 = self.getThermoDataFromGroups(species)
# Make sure to calculate Cp0 and CpInf if it wasn't done already
self.findCp0andCpInf(species, thermo0)
# Return the resulting thermo parameters
return thermo0
def getThermoDataFromLibraries(self, species, trainingSet=None):
"""
Return the thermodynamic parameters for a given :class:`Species`
object `species`. This function first searches the loaded libraries
in order, returning the first match found, before failing and returning None.
`trainingSet` is used to identify if function is called during training set or not.
During training set calculation we want to use gas phase thermo to not affect reverse
rate calculation.
Returns: ThermoData or None
"""
import rmgpy.rmg.main
thermoData = None
#chatelak 11/15/14: modification to introduce liquid phase thermo libraries
libraryList=deepcopy(self.libraryOrder) #copy the value to not affect initial object
if rmgpy.rmg.main.solvent is not None:
liqLibraries=[]
#Liquid phase simulation part:
#This bloc "for": Identify liquid phase libraries and store them in liqLibraries
for iterLib in libraryList:
if self.libraries[iterLib].solvent:
liqLibraries.append(iterLib)
#Check in liqLibraries if thermo for species exists and return the first match. Only if function not called by trainingSet
if liqLibraries and trainingSet is None:
for label in liqLibraries:
thermoData = self.getThermoDataFromLibrary(species, self.libraries[label])
if thermoData is not None:
assert len(thermoData) == 3, "thermoData should be a tuple at this point"
#Watch out comments changed: this is used later to apply solvation or not on species matching thermo. If required, Modify this carefully.
thermoData[0].comment += 'Liquid thermo library: ' + label
return thermoData
#Remove liqLibraries from libraryList if: called by training set (trainingSet=True) or if no thermo found in liqLibrairies
#if no liquid library found this does nothing.
for libIter in liqLibraries:
libraryList.remove(libIter)
# Condition to execute this part: gas phase simulation or training set or liquid phase simulation with : noliquid libraries found or no matching species found in liquid libraries
# If gas phase simulation libraryList = self.libraryOrder (just like before modifications) and they are all gas phase, already checked by checkLibrairies function in database.load()
# Check the libraries in order; return the first successful match
for label in libraryList:
thermoData = self.getThermoDataFromLibrary(species, self.libraries[label])
if thermoData is not None:
assert len(thermoData) == 3, "thermoData should be a tuple at this point"
if rmgpy.rmg.main.solvent is not None and trainingSet is None:
thermoData[0].comment += 'Thermo library corrected for liquid phase: ' + label
else:
thermoData[0].comment += 'Thermo library: ' + label
return thermoData
return None
def findCp0andCpInf(self, species, thermoData):
"""
Calculate the Cp0 and CpInf values, and add them to the thermoData object.
Modifies thermoData in place and doesn't return anything
"""
if not isinstance(thermoData,ThermoData):
return # Just skip it
raise Exception("Trying to add Cp0 to something that's not a ThermoData: {0!r}".format(thermoData))
if thermoData.Cp0 is None:
Cp0 = species.calculateCp0()
thermoData.Cp0 = (Cp0,"J/(mol*K)")
if thermoData.CpInf is None:
CpInf = species.calculateCpInf()
thermoData.CpInf = (CpInf,"J/(mol*K)")
def getAllThermoData(self, species):
"""
Return all possible sets of thermodynamic parameters for a given
:class:`Species` object `species`. The hits from the depository come
first, then the libraries (in order), and then the group additivity
estimate. This method is useful for a generic search job.
Returns: a list of tuples (ThermoData, source, entry)
(Source is a library or depository, or None)
"""
thermoDataList = []
# Data from depository comes first
thermoDataList.extend(self.getThermoDataFromDepository(species))
# Data from libraries comes second
for label in self.libraryOrder:
data = self.getThermoDataFromLibrary(species, self.libraries[label])
if data:
assert len(data) == 3, "thermoData should be a tuple at this point"
data[0].comment += label
thermoDataList.append(data)
# Last entry is always the estimate from group additivity
# Make it a tuple
data = (self.getThermoDataFromGroups(species), None, None)
thermoDataList.append(data)
# Return all of the resulting thermo parameters
return thermoDataList
def getThermoDataFromDepository(self, species):
"""
Return all possible sets of thermodynamic parameters for a given
:class:`Species` object `species` from the depository. If no
depository is loaded, a :class:`DatabaseError` is raised.
Returns: a list of tuples (thermoData, depository, entry) without any Cp0 or CpInf data.
"""
items = []
for label, entry in self.depository['stable'].entries.iteritems():
for molecule in species.molecule:
if molecule.isIsomorphic(entry.item):
items.append((deepcopy(entry.data), self.depository['stable'], entry))
break
for label, entry in self.depository['radical'].entries.iteritems():
for molecule in species.molecule:
if molecule.isIsomorphic(entry.item):
items.append((deepcopy(entry.data), self.depository['radical'], entry))
break
return items
def getThermoDataFromLibrary(self, species, library):
"""
Return the set of thermodynamic parameters corresponding to a given
:class:`Species` object `species` from the specified thermodynamics
`library`. If `library` is a string, the list of libraries is searched
for a library with that name. If no match is found in that library,
``None`` is returned. If no corresponding library is found, a
:class:`DatabaseError` is raised.
Returns a tuple: (ThermoData, library, entry) or None.
"""
for label, entry in library.entries.iteritems():
for molecule in species.molecule:
if molecule.isIsomorphic(entry.item) and entry.data is not None:
thermoData = deepcopy(entry.data)
self.findCp0andCpInf(species, thermoData)
return (thermoData, library, entry)
return None
def getThermoDataFromGroups(self, species):
"""
Return the set of thermodynamic parameters corresponding to a given
:class:`Species` object `species` by estimation using the group
additivity values. If no group additivity values are loaded, a
:class:`DatabaseError` is raised.
The resonance isomer (molecule) with the lowest H298 is used, and as a side-effect
the resonance isomers (items in `species.molecule` list) are sorted in ascending order.
Returns: ThermoData
"""
thermo = []
for molecule in species.molecule:
molecule.clearLabeledAtoms()
molecule.updateAtomTypes()
tdata = self.estimateThermoViaGroupAdditivity(molecule)
thermo.append(tdata)
indices = self.prioritizeThermo(species, thermo)
species.molecule = [species.molecule[ind] for ind in indices]
thermoData = thermo[indices[0]]
self.findCp0andCpInf(species, thermoData)
return thermoData
def prioritizeThermo(self, species, thermoDataList):
"""
Use some metrics to reorder a list of thermo data from best to worst.
Return a list of indices with the desired order associated with the index of thermo from the data list.
"""
if len(species.molecule) > 1:
# Go further only if there is more than one isomer
if species.molecule[0].isCyclic():
# Special treatment for cyclic compounds
entries = []
for thermo in thermoDataList:
ringGroups, polycyclicGroups = self.getRingGroupsFromComments(thermo)
# Use rank as a metric for prioritizing thermo.
# The smaller the rank, the better.
sumRank = numpy.sum([entry.rank for entry in ringGroups + polycyclicGroups])
entries.append((thermo, sumRank))
# Sort first by rank, then by enthalpy at 298 K
entries = sorted(entries, key=lambda entry: (entry[1], entry[0].getEnthalpy(298.)))
indices = [thermoDataList.index(entry[0]) for entry in entries]
else:
# For noncyclics, default to original algorithm of ordering thermo based on the most stable enthalpy
H298 = numpy.array([t.getEnthalpy(298.) for t in thermoDataList])
indices = H298.argsort()
else:
indices = [0]
return indices
def estimateRadicalThermoViaHBI(self, molecule, stableThermoEstimator ):
"""
Estimate the thermodynamics of a radical by saturating it,
applying the provided stableThermoEstimator method on the saturated species,
then applying hydrogen bond increment corrections for the radical
site(s) and correcting for the symmetry.
"""
assert molecule.isRadical(), "Method only valid for radicals."
saturatedStruct = molecule.copy(deep=True)
added = saturatedStruct.saturate()
saturatedStruct.props['saturated'] = True
# Get thermo estimate for saturated form of structure
if stableThermoEstimator == self.getThermoDataFromLibraries:
# Get data from libraries
saturatedSpec = Species(molecule=[saturatedStruct])
thermoData_sat = stableThermoEstimator(saturatedSpec)
if thermoData_sat:
assert len(thermoData_sat) == 3, "thermoData should be a tuple at this point: (thermoData, library, entry)"
thermoData_sat = thermoData_sat[0]
else:
thermoData_sat = stableThermoEstimator(saturatedStruct)
if thermoData_sat is None:
# logging.info("Thermo data of saturated {0} of molecule {1} is None.".format(saturatedStruct, molecule))
return None
assert thermoData_sat is not None, "Thermo data of saturated {0} of molecule {1} is None!".format(saturatedStruct, molecule)
# Convert to ThermoData object if necessary in order to add and subtract from enthalpy and entropy values
if not isinstance(thermoData_sat, ThermoData):
thermoData_sat = thermoData_sat.toThermoData()
if not stableThermoEstimator == self.computeGroupAdditivityThermo:
#remove the symmetry contribution to the entropy of the saturated molecule
##assumes that the thermo data comes from QMTP or from a thermolibrary
thermoData_sat.S298.value_si += constants.R * math.log(saturatedStruct.getSymmetryNumber())
thermoData = thermoData_sat
# Correct entropy for symmetry number of radical structure
thermoData.S298.value_si -= constants.R * math.log(molecule.getSymmetryNumber())
# For each radical site, get radical correction
# Only one radical site should be considered at a time; all others
# should be saturated with hydrogen atoms
for atom in added:
# Remove the added hydrogen atoms and bond and restore the radical
for H, bond in added[atom]:
saturatedStruct.removeBond(bond)
saturatedStruct.removeAtom(H)
atom.incrementRadical()
saturatedStruct.update()
try:
self.__addGroupThermoData(thermoData, self.groups['radical'], saturatedStruct, {'*':atom})
except KeyError:
logging.error("Couldn't find in radical thermo database:")
logging.error(molecule)
logging.error(molecule.toAdjacencyList())
raise
# Re-saturate
for H, bond in added[atom]:
saturatedStruct.addAtom(H)
saturatedStruct.addBond(bond)
atom.decrementRadical()
# Subtract the enthalpy of the added hydrogens
for H, bond in added[atom]:
thermoData.H298.value_si -= 52.103 * 4184
return thermoData
def estimateThermoViaGroupAdditivity(self, molecule):
"""
Return the set of thermodynamic parameters corresponding to a given
:class:`Molecule` object `molecule` by estimation using the group
additivity values. If no group additivity values are loaded, a
:class:`DatabaseError` is raised.
"""
# For thermo estimation we need the atoms to already be sorted because we
# iterate over them; if the order changes during the iteration then we
# will probably not visit the right atoms, and so will get the thermo wrong
molecule.sortAtoms()
if molecule.isRadical(): # radical species
thermoData = self.estimateRadicalThermoViaHBI(molecule, self.computeGroupAdditivityThermo)
return thermoData
else: # non-radical species
thermoData = self.computeGroupAdditivityThermo(molecule)
# Correct entropy for symmetry number
if not 'saturated' in molecule.props:
thermoData.S298.value_si -= constants.R * math.log(molecule.getSymmetryNumber())
return thermoData
def computeGroupAdditivityThermo(self, molecule):
"""
Return the set of thermodynamic parameters corresponding to a given
:class:`Molecule` object `molecule` by estimation using the group
additivity values. If no group additivity values are loaded, a
:class:`DatabaseError` is raised.
The entropy is not corrected for the symmetry of the molecule.
This should be done later by the calling function.
"""
assert not molecule.isRadical(), "This method is only for saturated non-radical species."
# For thermo estimation we need the atoms to already be sorted because we
# iterate over them; if the order changes during the iteration then we
# will probably not visit the right atoms, and so will get the thermo wrong
molecule.sortAtoms()
# Create the ThermoData object
thermoData = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],"K"),
Cpdata = ([0.0,0.0,0.0,0.0,0.0,0.0,0.0],"J/(mol*K)"),
H298 = (0.0,"kJ/mol"),
S298 = (0.0,"J/(mol*K)"),
)
cyclic = molecule.isCyclic()
# Generate estimate of thermodynamics
for atom in molecule.atoms:
# Iterate over heavy (non-hydrogen) atoms
if atom.isNonHydrogen():
# Get initial thermo estimate from main group database
try:
self.__addGroupThermoData(thermoData, self.groups['group'], molecule, {'*':atom})
except KeyError:
logging.error("Couldn't find in main thermo database:")
logging.error(molecule)
logging.error(molecule.toAdjacencyList())
raise
# Correct for gauche and 1,5- interactions
if not cyclic:
try:
self.__addGroupThermoData(thermoData, self.groups['gauche'], molecule, {'*':atom})
except KeyError: pass
try:
self.__addGroupThermoData(thermoData, self.groups['int15'], molecule, {'*':atom})
except KeyError: pass
try:
self.__addGroupThermoData(thermoData, self.groups['other'], molecule, {'*':atom})
except KeyError: pass
# Do ring corrections separately because we only want to match
# each ring one time
if cyclic:
monorings, polyrings = molecule.getDisparateRings()
for ring in monorings:
# Make a temporary structure containing only the atoms in the ring
# NB. if any of the ring corrections depend on ligands not in the ring, they will not be found!
try:
self.__addRingCorrectionThermoData(thermoData, self.groups['ring'], molecule, ring)
except KeyError:
logging.error("Couldn't find a match in the monocyclic ring database even though monocyclic rings were found.")
logging.error(molecule)
logging.error(molecule.toAdjacencyList())
raise
for ring in polyrings:
# Make a temporary structure containing only the atoms in the ring
# NB. if any of the ring corrections depend on ligands not in the ring, they will not be found!
try:
self.__addRingCorrectionThermoData(thermoData, self.groups['polycyclic'], molecule, ring)
except KeyError:
logging.error("Couldn't find a match in the polycyclic ring database even though polycyclic rings were found.")
logging.error(molecule)
logging.error(molecule.toAdjacencyList())
raise
return thermoData
def __addRingCorrectionThermoData(self, thermoData, ring_database, molecule, ring):
"""
Determine the ring correction group additivity thermodynamic data for the given
`ring` in the `molecule`, and add it to the existing thermo data
`thermoData`.
"""
matchedRingEntries = []
# label each atom in the ring individually to try to match the group
# for each ring, save only the ring that is matches the most specific leaf in the tree.
for atom in ring:
atoms = {'*':atom}
entry = ring_database.descendTree(molecule, atoms)
matchedRingEntries.append(entry)
if matchedRingEntries is []:
raise KeyError('Node not found in database.')
# Decide which group to keep
depthList = [len(ring_database.ancestors(entry)) for entry in matchedRingEntries]
mostSpecificMatchIndices = [i for i, x in enumerate(depthList) if x == max(depthList)]
mostSpecificMatchedEntries = [matchedRingEntries[idx] for idx in mostSpecificMatchIndices]
if len(set(mostSpecificMatchedEntries)) != 1:
logging.warning('More than one type of node was found to be most specific for this ring.')
logging.warning('This is either due to a database error in the ring or polycyclic groups, or a partial match between the group and the full ring.')
logging.warning(mostSpecificMatchedEntries)
# Condense the number of most specific groups down to one
mostSpecificMatchedEntry = matchedRingEntries[mostSpecificMatchIndices[0]]
node = mostSpecificMatchedEntry
while node.data is None and node is not None:
node = node.parent
if node is None:
raise DatabaseError('Unable to determine thermo parameters for {0}: no data for {1} or any of its ancestors.'.format(molecule, mostSpecificGroup) )
data = node.data; comment = node.label
while isinstance(data, basestring) and data is not None:
for entry in ring_database.entries.values():
if entry.label == data:
data = entry.data
comment = entry.label
break
data.comment = '{0}({1})'.format(ring_database.label, comment)
if thermoData is None:
return data
else:
return addThermoData(thermoData, data, groupAdditivity=True)
def __addGroupThermoData(self, thermoData, database, molecule, atom):
"""
Determine the group additivity thermodynamic data for the atom `atom`
in the structure `structure`, and add it to the existing thermo data
`thermoData`.
"""
node0 = database.descendTree(molecule, atom, None)
if node0 is None:
raise KeyError('Node not found in database.')
# It's possible (and allowed) that items in the tree may not be in the
# library, in which case we need to fall up the tree until we find an
# ancestor that has an entry in the library
node = node0
while node.data is None and node is not None:
node = node.parent
if node is None:
raise DatabaseError('Unable to determine thermo parameters for {0}: no data for node {1} or any of its ancestors.'.format(molecule, node0) )
data = node.data; comment = node.label
while isinstance(data, basestring) and data is not None:
for entry in database.entries.values():
if entry.label == data:
data = entry.data
comment = entry.label
break
data.comment = '{0}({1})'.format(database.label, comment)
# This code prints the hierarchy of the found node; useful for debugging
# result = ''
# while node is not None:
# result = ' -> ' + node.label + result
# node = node.parent
# print result[4:]
if thermoData is None:
return data
else:
return addThermoData(thermoData, data, groupAdditivity=True)
def getRingGroupsFromComments(self, thermoData):
"""
Takes a string of comments from group additivity estimation, and extracts the ring and polycyclic ring groups
from them, returning them as lists.
"""
tokens = thermoData.comment.split()
ringGroups = []
polycyclicGroups = []
for token in tokens:
if 'ring' in token:
splitTokens = re.split("\(|\)",token)
assert len(splitTokens) == 3
groupLabel = splitTokens[1]
ringGroups.append(self.groups['ring'].entries[groupLabel])
if 'polycyclic' in token:
splitTokens = re.split("\(|\)",token)
assert len(splitTokens) == 3
groupLabel = splitTokens[1]
polycyclicGroups.append(self.groups['polycyclic'].entries[groupLabel])
return ringGroups, polycyclicGroups | mit | 3,841,461,204,941,031,400 | 46.076748 | 195 | 0.581886 | false |
TheMOOCAgency/edx-platform | openedx/core/djangoapps/cors_csrf/tests/test_views.py | 16 | 2390 | """Tests for cross-domain request views. """
import ddt
import json
from django.test import TestCase
from django.core.urlresolvers import reverse, NoReverseMatch
from config_models.models import cache
from ..models import XDomainProxyConfiguration
@ddt.ddt
class XDomainProxyTest(TestCase):
"""Tests for the xdomain proxy end-point. """
def setUp(self):
"""Clear model-based config cache. """
super(XDomainProxyTest, self).setUp()
try:
self.url = reverse('xdomain_proxy')
except NoReverseMatch:
self.skipTest('xdomain_proxy URL is not configured')
cache.clear()
def test_xdomain_proxy_disabled(self):
self._configure(False)
response = self._load_page()
self.assertEqual(response.status_code, 404)
@ddt.data(None, [' '], [' ', ' '])
def test_xdomain_proxy_enabled_no_whitelist(self, whitelist):
self._configure(True, whitelist=whitelist)
response = self._load_page()
self.assertEqual(response.status_code, 404)
@ddt.data(
(['example.com'], ['example.com']),
(['example.com', 'sub.example.com'], ['example.com', 'sub.example.com']),
([' example.com '], ['example.com']),
([' ', 'example.com'], ['example.com']),
)
@ddt.unpack
def test_xdomain_proxy_enabled_with_whitelist(self, whitelist, expected_whitelist):
self._configure(True, whitelist=whitelist)
response = self._load_page()
self._check_whitelist(response, expected_whitelist)
def _configure(self, is_enabled, whitelist=None):
"""Enable or disable the end-point and configure the whitelist. """
config = XDomainProxyConfiguration.current()
config.enabled = is_enabled
if whitelist:
config.whitelist = "\n".join(whitelist)
config.save()
cache.clear()
def _load_page(self):
"""Load the end-point. """
return self.client.get(reverse('xdomain_proxy'))
def _check_whitelist(self, response, expected_whitelist):
"""Verify that the domain whitelist is rendered on the page. """
rendered_whitelist = json.dumps({
domain: '*'
for domain in expected_whitelist
})
self.assertContains(response, 'xdomain.min.js')
self.assertContains(response, rendered_whitelist)
| agpl-3.0 | 6,803,070,102,987,931,000 | 31.739726 | 87 | 0.625941 | false |
kingvuplus/ee | lib/python/Screens/SoftwareUpdate.py | 4 | 13104 | from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.ParentalControlSetup import ProtectedScreen
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.TextBox import TextBox
#from Screens.About import CommitInfo
from Components.config import config
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Ipkg import IpkgComponent
from Components.Sources.StaticText import StaticText
from Components.Slider import Slider
from Tools.BoundFunction import boundFunction
from Tools.Directories import fileExists
from enigma import eTimer, getBoxType, eDVBDB
from urllib import urlopen
import socket
import os
import re
import time
class UpdatePlugin(Screen, ProtectedScreen):
skin = """
<screen name="UpdatePlugin" position="center,center" size="550,300">
<widget name="activityslider" position="0,0" size="550,5" />
<widget name="slider" position="0,150" size="550,30" />
<widget source="package" render="Label" position="10,30" size="540,20" font="Regular;18" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
<widget source="status" render="Label" position="10,180" size="540,100" font="Regular;20" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, *args):
Screen.__init__(self, session)
ProtectedScreen.__init__(self)
self.sliderPackages = { "dreambox-dvb-modules": 1, "enigma2": 2, "tuxbox-image-info": 3 }
self.setTitle(_("Software update"))
self.slider = Slider(0, 4)
self["slider"] = self.slider
self.activityslider = Slider(0, 100)
self["activityslider"] = self.activityslider
self.status = StaticText(_("Please wait..."))
self["status"] = self.status
self.package = StaticText(_("Package list update"))
self["package"] = self.package
self.oktext = _("Press OK on your remote control to continue.")
self.packages = 0
self.error = 0
self.processed_packages = []
self.total_packages = None
self.channellist_only = 0
self.channellist_name = ''
self.updating = False
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
self.onClose.append(self.__close)
self["actions"] = ActionMap(["WizardActions"],
{
"ok": self.exit,
"back": self.exit
}, -1)
self.activity = 0
self.activityTimer = eTimer()
self.activityTimer.callback.append(self.checkTraficLightBH)
self.activityTimer.callback.append(self.doActivityTimer)
self.activityTimer.start(100, True)
def isProtected(self):
return config.ParentalControl.setuppinactive.value and\
(not config.ParentalControl.config_sections.main_menu.value and not config.ParentalControl.config_sections.configuration.value or hasattr(self.session, 'infobar') and self.session.infobar is None) and\
config.ParentalControl.config_sections.software_update.value
def checkTraficLight(self):
self.activityTimer.callback.remove(self.checkTraficLight)
self.activityTimer.start(100, False)
currentTimeoutDefault = socket.getdefaulttimeout()
socket.setdefaulttimeout(3)
message = ""
picon = None
default = True
try:
# TODO: Use Twisted's URL fetcher, urlopen is evil. And it can
# run in parallel to the package update.
status = urlopen("http://openpli.org/status").read().split('!', 1)
if getBoxType() in status[0].split(','):
message = len(status) > 1 and status[1] or _("The current beta image might not be stable.\nFor more information see %s.") % ("www.openpli.org")
picon = MessageBox.TYPE_ERROR
default = False
except:
message = _("The status of the current beta image could not be checked because %s can not be reached.") % ("www.openpli.org")
picon = MessageBox.TYPE_ERROR
default = False
socket.setdefaulttimeout(currentTimeoutDefault)
if default:
self.showDisclaimer()
else:
message += "\n" + _("Do you want to update your receiver?")
self.session.openWithCallback(self.startActualUpdate, MessageBox, message, default = default, picon = picon)
def checkTraficLightBH(self):
self.activityTimer.callback.remove(self.checkTraficLightBH)
self.activityTimer.start(100, False)
self.startActualUpdate(True)
def showDisclaimer(self, justShow=False):
if config.usage.show_update_disclaimer.value or justShow:
message = _("The OpenPLi team would like to point out that upgrading to the latest nightly build comes not only with the latest features, but also with some risks. After the update, it is possible that your device no longer works as expected. We recommend you create backups with Autobackup or Backupsuite. This allows you to quickly and easily restore your device to its previous state, should you experience any problems. If you encounter a 'bug', please report the issue on www.openpli.org.\n\nDo you understand this?")
list = not justShow and [(_("no"), False), (_("yes"), True), (_("yes") + " " + _("and never show this message again"), "never")] or []
self.session.openWithCallback(boundFunction(self.disclaimerCallback, justShow), MessageBox, message, list=list)
else:
self.startActualUpdate(True)
def disclaimerCallback(self, justShow, answer):
if answer == "never":
config.usage.show_update_disclaimer.value = False
config.usage.show_update_disclaimer.save()
if justShow and answer:
self.ipkgCallback(IpkgComponent.EVENT_DONE, None)
else:
self.startActualUpdate(answer)
def getLatestImageTimestamp(self):
currentTimeoutDefault = socket.getdefaulttimeout()
socket.setdefaulttimeout(3)
latestImageTimestamp = ""
try:
# TODO: Use Twisted's URL fetcher, urlopen is evil. And it can
# run in parallel to the package update.
latestImageTimestamp = re.findall('<dd>(.*?)</dd>', urlopen("http://openpli.org/download/"+getBoxType()+"/").read())[0][:16]
latestImageTimestamp = time.strftime(_("%d-%b-%Y %-H:%M"), time.strptime(latestImageTimestamp, "%Y/%m/%d %H:%M"))
except:
pass
socket.setdefaulttimeout(currentTimeoutDefault)
return latestImageTimestamp
def startActualUpdate(self,answer):
if answer:
self.updating = True
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
else:
self.close()
def doActivityTimer(self):
self.activity += 1
if self.activity == 100:
self.activity = 0
self.activityslider.setValue(self.activity)
def showUpdateCompletedMessage(self):
self.setEndMessage(ngettext("Update completed, %d package was installed.", "Update completed, %d packages were installed.", self.packages) % self.packages)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_DOWNLOAD:
self.status.setText(_("Downloading"))
elif event == IpkgComponent.EVENT_UPGRADE:
if self.sliderPackages.has_key(param):
self.slider.setValue(self.sliderPackages[param])
self.package.setText(param)
self.status.setText(_("Upgrading") + ": %s/%s" % (self.packages, self.total_packages))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_INSTALL:
self.package.setText(param)
self.status.setText(_("Installing"))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_REMOVE:
self.package.setText(param)
self.status.setText(_("Removing"))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_CONFIGURING:
self.package.setText(param)
self.status.setText(_("Configuring"))
elif event == IpkgComponent.EVENT_MODIFIED:
if config.plugins.softwaremanager.overwriteConfigFiles.value in ("N", "Y"):
self.ipkg.write(True and config.plugins.softwaremanager.overwriteConfigFiles.value)
else:
self.session.openWithCallback(
self.modificationCallback,
MessageBox,
_("A configuration file (%s) has been modified since it was installed.\nDo you want to keep your modifications?") % (param)
)
elif event == IpkgComponent.EVENT_ERROR:
self.error += 1
elif event == IpkgComponent.EVENT_DONE:
if self.updating:
self.updating = False
self.ipkg.startCmd(IpkgComponent.CMD_UPGRADE_LIST)
elif self.ipkg.currentCommand == IpkgComponent.CMD_UPGRADE_LIST:
self.total_packages = len(self.ipkg.getFetchedList())
if self.total_packages:
# latestImageTimestamp = self.getLatestImageTimestamp()
latestImageTimestamp = ""
if latestImageTimestamp:
message = _("Do you want to update your receiver to %s?") % self.getLatestImageTimestamp() + "\n"
else:
message = _("Do you want to update your receiver?") + "\n"
message += "(" + (ngettext("%s updated package available", "%s updated packages available", self.total_packages) % self.total_packages) + ")"
if self.total_packages > 150:
message += " " + _("Reflash recommended!")
choices = [(_("Update and reboot (recommended)"), "cold"),
(_("Update and ask to reboot"), "hot"),
(_("Update channel list only"), "channels"),
(_("Show packages to be upgraded"), "showlist")]
else:
message = _("No updates available")
choices = []
if fileExists("/home/root/ipkgupgrade.log"):
choices.append((_("Show latest upgrade log"), "log"))
# choices.append((_("Show latest commits on sourceforge"), "commits"))
# if not config.usage.show_update_disclaimer.value:
# choices.append((_("Show disclaimer"), "disclaimer"))
choices.append((_("Cancel"), ""))
self.session.openWithCallback(self.startActualUpgrade, ChoiceBox, title=message, list=choices)
elif self.channellist_only > 0:
if self.channellist_only == 1:
self.setEndMessage(_("Could not find installed channel list."))
elif self.channellist_only == 2:
self.slider.setValue(2)
self.ipkg.startCmd(IpkgComponent.CMD_REMOVE, {'package': self.channellist_name})
self.channellist_only += 1
elif self.channellist_only == 3:
self.slider.setValue(3)
self.ipkg.startCmd(IpkgComponent.CMD_INSTALL, {'package': self.channellist_name})
self.channellist_only += 1
elif self.channellist_only == 4:
self.showUpdateCompletedMessage()
eDVBDB.getInstance().reloadBouquets()
eDVBDB.getInstance().reloadServicelist()
elif self.error == 0:
self.showUpdateCompletedMessage()
else:
self.activityTimer.stop()
self.activityslider.setValue(0)
error = _("Your receiver might be unusable now. Please consult the manual for further assistance before rebooting your receiver.")
if self.packages == 0:
error = _("No updates available. Please try again later.")
if self.updating:
error = _("Update failed. Your receiver does not have a working internet connection.")
self.status.setText(_("Error") + " - " + error)
elif event == IpkgComponent.EVENT_LISTITEM:
if 'enigma2-plugin-settings-' in param[0] and self.channellist_only > 0:
self.channellist_name = param[0]
self.channellist_only = 2
#print event, "-", param
pass
def setEndMessage(self, txt):
self.slider.setValue(4)
self.activityTimer.stop()
self.activityslider.setValue(0)
self.package.setText(txt)
self.status.setText(self.oktext)
def startActualUpgrade(self, answer):
if not answer or not answer[1]:
self.close()
return
if answer[1] == "cold":
self.session.open(TryQuitMainloop,retvalue=42)
self.close()
elif answer[1] == "channels":
self.channellist_only = 1
self.slider.setValue(1)
self.ipkg.startCmd(IpkgComponent.CMD_LIST, args = {'installed_only': True})
# elif answer[1] == "commits":
# self.session.openWithCallback(boundFunction(self.ipkgCallback, IpkgComponent.EVENT_DONE, None), CommitInfo)
elif answer[1] == "disclaimer":
self.showDisclaimer(justShow=True)
elif answer[1] == "showlist":
text = ""
for i in [x[0] for x in sorted(self.ipkg.getFetchedList(), key=lambda d: d[0])]:
text = text and text + "\n" + i or i
self.session.openWithCallback(boundFunction(self.ipkgCallback, IpkgComponent.EVENT_DONE, None), TextBox, text, _("Packages to update"))
elif answer[1] == "log":
text = ""
for i in open("/home/root/ipkgupgrade.log", "r").readlines():
text += i
self.session.openWithCallback(boundFunction(self.ipkgCallback, IpkgComponent.EVENT_DONE, None), TextBox, text, _("Packages to update"))
else:
self.ipkg.startCmd(IpkgComponent.CMD_UPGRADE, args = {'test_only': False})
def modificationCallback(self, res):
self.ipkg.write(res and "N" or "Y")
def exit(self):
if not self.ipkg.isRunning():
if self.packages != 0 and self.error == 0 and self.channellist_only == 0:
self.session.openWithCallback(self.exitAnswer, MessageBox, _("Update completed. Do you want to reboot your receiver?"))
else:
self.close()
else:
if not self.updating:
self.close()
def exitAnswer(self, result):
if result is not None and result:
self.session.open(TryQuitMainloop,retvalue=2)
self.close()
def __close(self):
self.ipkg.removeCallback(self.ipkgCallback)
| gpl-2.0 | -1,112,558,620,414,389,900 | 40.732484 | 525 | 0.715583 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.