prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>vendor.ts<|end_file_name|><|fim▁begin|>// For vendors for example jQuery, Lodash, angular2-jwt just import them here unless you plan on // chunking vendors files for async loading. You would need to import the async loaded vendors // at the entry point of the async loaded file. Also see custom-typings.d.ts as you also need to // run `typings install x` where `x` is your module // Angular 2<|fim▁hole|>import '@angular/core'; import '@angular/common'; // RxJS import 'rxjs/add/operator/map'; import 'rxjs/add/operator/mergeMap'; // Dragula import 'ng2-dragula/ng2-dragula'; // if ('production' === ENV) { // Production // } else { // Development // }<|fim▁end|>
import '@angular/platform-browser'; import '@angular/platform-browser-dynamic';
<|file_name|>sort-down.js<|end_file_name|><|fim▁begin|>const createSortDownIcon = (button) => { button.firstChild.remove(); const svg = document.createElementNS('http://www.w3.org/2000/svg', 'svg'); svg.setAttribute('height', '12'); svg.setAttribute('viewBox', '0 0 503 700'); const path = document.createElementNS('http://www.w3.org/2000/svg', 'path'); path.setAttribute('d', `M43.302,409.357h418.36c37.617,0,56.426,45.527,29.883,72.07l-209.18,209.18c-16.523,16.523-43.243,16.523-59.59,0<|fim▁hole|> button.appendChild(svg); }; export default createSortDownIcon;<|fim▁end|>
L13.419,481.428C-13.124,454.885,5.685,409.357,43.302,409.357z`); svg.appendChild(path);
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub use self::mouse_joint::{MouseJointConfig, MouseJoint}; use std::rc::{Rc, Weak}; use std::cell::RefCell; use std::mem; use std::ptr; use super::{Body, BodyHandleWeak}; use super::island::{Position, Velocity}; use ::dynamics::world::TimeStep; mod mouse_joint; pub type JointHandle<'a> = Rc<RefCell<Joint<'a>>>; pub type JointHandleWeak<'a> = Weak<RefCell<Joint<'a>>>; /// A joint edge is used to connect bodies and joints together in a joint graph where /// each body is a node and each joint is an edge. Each joint has two joint nodes, /// one for each attached body. pub struct JointEdge<'a> { pub body: BodyHandleWeak<'a>, pub joint: JointHandleWeak<'a>, } pub enum JointType { Mouse(MouseJointConfig), } /// `JointConfig`s are used to construct joints. pub struct JointConfig<'a> { pub joint_type: JointType, /// The first attached body. pub body_a: BodyHandleWeak<'a>, /// The second attached body. pub body_b: BodyHandleWeak<'a>, /// Set this flag to true if the attached bodies should collide. pub collide_connected: bool, } pub struct JointData<'a> {<|fim▁hole|> is_collide_connected: bool, } pub enum Joint<'a> { Mouse(MouseJoint<'a>), } impl<'a> Joint<'a> { /*pub fn new(joint_config: &JointConfig<'a>) -> JointHandle<'a> { let result: JointHandle<'a>; unsafe { result = Rc::new(RefCell::new(mem::uninitialized())); let edge_to_a = JointEdge { body: joint_config.body_a.clone(), joint: Rc::downgrade(&result), }; let edge_to_b = JointEdge { body: joint_config.body_b.clone(), joint: Rc::downgrade(&result), }; let joint_data = JointData { edge_to_a: edge_to_a, edge_to_b: edge_to_b, is_island: false, is_collide_connected: joint_config.collide_connected, }; match joint_config.joint_type { JointType::Mouse(ref joint_config) => { ptr::write(&mut *result.borrow_mut(), Joint::Mouse(MouseJoint::new(joint_config, joint_data))); } } } result }*/ pub fn new(joint_config: &JointConfig<'a>) -> JointHandle<'a> { let joint_data = JointData { body_a: joint_config.body_a.clone(), body_b: joint_config.body_b.clone(), is_island: false, is_collide_connected: joint_config.collide_connected, }; let result; result = match joint_config.joint_type { JointType::Mouse(ref joint_config) => Rc::new(RefCell::new(Joint::Mouse(MouseJoint::new(joint_config, joint_data)))), }; result } fn get_joint_data(&self) -> &JointData<'a> { match self { &Joint::Mouse(ref joint) => &joint.joint_data, } } fn get_joint_data_mut(&mut self) -> &mut JointData<'a> { match self { &mut Joint::Mouse(ref mut joint) => &mut joint.joint_data, } } pub fn get_other_body(&self, body: BodyHandleWeak<'a>) -> Option<BodyHandleWeak<'a>> { let b = body.upgrade().unwrap(); let pb = &(*b) as *const RefCell<Body>; let b_a = self.get_joint_data().body_a.upgrade().unwrap(); let pb_a = &(*b_a) as *const RefCell<Body>; if pb == pb_a { return Some(self.get_joint_data().body_b.clone()); } let b_b = self.get_joint_data().body_b.upgrade().unwrap(); let pb_b = &(*b_b) as *const RefCell<Body>; if pb == pb_b { return Some(self.get_joint_data().body_a.clone()); } None } pub fn set_island(&mut self, is_island: bool) { self.get_joint_data_mut().is_island = is_island; } pub fn is_island(&self) -> bool { self.get_joint_data().is_island } pub fn initialize_velocity_constraints(&mut self, step: TimeStep, positions: &Vec<Position>, velocities: &mut Vec<Velocity>) { match self { &mut Joint::Mouse(ref mut joint) => joint.initialize_velocity_constraints(step, positions, velocities), } } pub fn solve_velocity_constraints(&mut self, step: TimeStep, velocities: &mut Vec<Velocity>) { match self { &mut Joint::Mouse(ref mut joint) => joint.solve_velocity_constraints(step, velocities), } } /// This returns true if the position errors are within tolerance. pub fn solve_position_constraints(&mut self, step: TimeStep, positions: &mut Vec<Position>) -> bool { true } }<|fim▁end|>
body_a: BodyHandleWeak<'a>, body_b: BodyHandleWeak<'a>, is_island: bool,
<|file_name|>examples.js<|end_file_name|><|fim▁begin|><|fim▁hole|> module.exports = { mouseposition: path.join(__dirname, "web", "client", "examples", "mouseposition", "app"), scalebar: path.join(__dirname, "web", "client", "examples", "scalebar", "app"), layertree: path.join(__dirname, "web", "client", "examples", "layertree", "app"), "3dviewer": path.join(__dirname, "web", "client", "examples", "3dviewer", "app"), queryform: path.join(__dirname, "web", "client", "examples", "queryform", "app"), featuregrid: path.join(__dirname, "web", "client", "examples", "featuregrid", "app"), print: path.join(__dirname, "web", "client", "examples", "print", "app"), login: path.join(__dirname, "web", "client", "examples", "login", "app"), plugins: path.join(__dirname, "web", "client", "examples", "plugins", "app"), rasterstyler: path.join(__dirname, "web", "client", "examples", "rasterstyler", "app") // this example is not linked and seems to cause a big slow down with uglyfyplugin. disabled temporary // styler: path.join(__dirname, "web", "client", "examples", "styler", "app") };<|fim▁end|>
var path = require("path");
<|file_name|>test_notifier.py<|end_file_name|><|fim▁begin|># Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import logging import sys import uuid import fixtures from oslo_serialization import jsonutils from oslo_utils import strutils from oslo_utils import timeutils from stevedore import dispatch from stevedore import extension import testscenarios import yaml import oslo_messaging from oslo_messaging.notify import _impl_log from oslo_messaging.notify import _impl_test from oslo_messaging.notify import messaging from oslo_messaging.notify import notifier as msg_notifier from oslo_messaging import serializer as msg_serializer from oslo_messaging.tests import utils as test_utils from six.moves import mock load_tests = testscenarios.load_tests_apply_scenarios class JsonMessageMatcher(object): def __init__(self, message): self.message = message def __eq__(self, other): return self.message == jsonutils.loads(other) class _FakeTransport(object): def __init__(self, conf): self.conf = conf def _send_notification(self, target, ctxt, message, version, retry=None): pass class _ReRaiseLoggedExceptionsFixture(fixtures.Fixture): """Record logged exceptions and re-raise in cleanup. The notifier just logs notification send errors so, for the sake of debugging test failures, we record any exceptions logged and re-raise them during cleanup. """ class FakeLogger(object): def __init__(self): self.exceptions = [] def exception(self, msg, *args, **kwargs): self.exceptions.append(sys.exc_info()[1]) def setUp(self): super(_ReRaiseLoggedExceptionsFixture, self).setUp() self.logger = self.FakeLogger() def reraise_exceptions(): for ex in self.logger.exceptions: raise ex self.addCleanup(reraise_exceptions) class TestMessagingNotifier(test_utils.BaseTestCase): _v1 = [ ('v1', dict(v1=True)), ('not_v1', dict(v1=False)), ] _v2 = [ ('v2', dict(v2=True)), ('not_v2', dict(v2=False)), ] _publisher_id = [ ('ctor_pub_id', dict(ctor_pub_id='test', expected_pub_id='test')), ('prep_pub_id', dict(prep_pub_id='test.localhost', expected_pub_id='test.localhost')), ('override', dict(ctor_pub_id='test', prep_pub_id='test.localhost', expected_pub_id='test.localhost')), ] _topics = [ ('no_topics', dict(topics=[])), ('single_topic', dict(topics=['notifications'])), ('multiple_topic2', dict(topics=['foo', 'bar'])), ] _priority = [ ('audit', dict(priority='audit')), ('debug', dict(priority='debug')), ('info', dict(priority='info')), ('warn', dict(priority='warn')), ('error', dict(priority='error')), ('sample', dict(priority='sample')), ('critical', dict(priority='critical')), ] _payload = [ ('payload', dict(payload={'foo': 'bar'})), ] _context = [ ('ctxt', dict(ctxt={'user': 'bob'})), ] _retry = [ ('unconfigured', dict()), ('None', dict(retry=None)), ('0', dict(retry=0)), ('5', dict(retry=5)), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._v1, cls._v2, cls._publisher_id, cls._topics, cls._priority, cls._payload, cls._context, cls._retry) def setUp(self): super(TestMessagingNotifier, self).setUp() self.logger = self.useFixture(_ReRaiseLoggedExceptionsFixture()).logger self.stubs.Set(messaging, 'LOG', self.logger) self.stubs.Set(msg_notifier, '_LOG', self.logger) @mock.patch('oslo_utils.timeutils.utcnow') def test_notifier(self, mock_utcnow): drivers = [] if self.v1: drivers.append('messaging') if self.v2: drivers.append('messagingv2') self.config(driver=drivers, topics=self.topics, group='oslo_messaging_notifications') transport = _FakeTransport(self.conf) if hasattr(self, 'ctor_pub_id'): notifier = oslo_messaging.Notifier(transport, publisher_id=self.ctor_pub_id) else: notifier = oslo_messaging.Notifier(transport) prepare_kwds = {} if hasattr(self, 'retry'): prepare_kwds['retry'] = self.retry if hasattr(self, 'prep_pub_id'): prepare_kwds['publisher_id'] = self.prep_pub_id if prepare_kwds: notifier = notifier.prepare(**prepare_kwds) transport._send_notification = mock.Mock() message_id = uuid.uuid4() uuid.uuid4 = mock.Mock(return_value=message_id) mock_utcnow.return_value = datetime.datetime.utcnow() message = { 'message_id': str(message_id), 'publisher_id': self.expected_pub_id, 'event_type': 'test.notify', 'priority': self.priority.upper(), 'payload': self.payload, 'timestamp': str(timeutils.utcnow()), } sends = [] if self.v1: sends.append(dict(version=1.0)) if self.v2: sends.append(dict(version=2.0)) calls = [] for send_kwargs in sends: for topic in self.topics: if hasattr(self, 'retry'): send_kwargs['retry'] = self.retry else: send_kwargs['retry'] = None target = oslo_messaging.Target(topic='%s.%s' % (topic, self.priority)) calls.append(mock.call(target, self.ctxt, message, **send_kwargs)) method = getattr(notifier, self.priority) method(self.ctxt, 'test.notify', self.payload) uuid.uuid4.assert_called_once_with() transport._send_notification.assert_has_calls(calls, any_order=True) TestMessagingNotifier.generate_scenarios() class TestSerializer(test_utils.BaseTestCase): def setUp(self): super(TestSerializer, self).setUp() self.addCleanup(_impl_test.reset) @mock.patch('oslo_utils.timeutils.utcnow') def test_serializer(self, mock_utcnow): transport = _FakeTransport(self.conf) serializer = msg_serializer.NoOpSerializer() notifier = oslo_messaging.Notifier(transport, 'test.localhost', driver='test', topic='test', serializer=serializer) message_id = uuid.uuid4() uuid.uuid4 = mock.Mock(return_value=message_id) mock_utcnow.return_value = datetime.datetime.utcnow() serializer.serialize_context = mock.Mock() serializer.serialize_context.return_value = dict(user='alice') serializer.serialize_entity = mock.Mock() serializer.serialize_entity.return_value = 'sbar' notifier.info(dict(user='bob'), 'test.notify', 'bar') message = { 'message_id': str(message_id), 'publisher_id': 'test.localhost', 'event_type': 'test.notify', 'priority': 'INFO', 'payload': 'sbar', 'timestamp': str(timeutils.utcnow()), } self.assertEqual([(dict(user='alice'), message, 'INFO', None)], _impl_test.NOTIFICATIONS) uuid.uuid4.assert_called_once_with() serializer.serialize_context.assert_called_once_with(dict(user='bob')) serializer.serialize_entity.assert_called_once_with(dict(user='bob'), 'bar') class TestNotifierTopics(test_utils.BaseTestCase): def test_topics_from_config(self): self.config(driver=['log'], group='oslo_messaging_notifications') self.config(topics=['topic1', 'topic2'], group='oslo_messaging_notifications') transport = _FakeTransport(self.conf) notifier = oslo_messaging.Notifier(transport, 'test.localhost') self.assertEqual(['topic1', 'topic2'], notifier._topics) def test_topics_from_kwargs(self): self.config(driver=['log'], group='oslo_messaging_notifications') transport = _FakeTransport(self.conf) notifier = oslo_messaging.Notifier(transport, 'test.localhost', topic='topic1') self.assertEqual(['topic1'], notifier._topics) notifier = oslo_messaging.Notifier(transport, 'test.localhost', topics=['topic1', 'topic2']) self.assertEqual(['topic1', 'topic2'], notifier._topics) class TestLogNotifier(test_utils.BaseTestCase): @mock.patch('oslo_utils.timeutils.utcnow') def test_notifier(self, mock_utcnow): self.config(driver=['log'], group='oslo_messaging_notifications') transport = _FakeTransport(self.conf) notifier = oslo_messaging.Notifier(transport, 'test.localhost') message_id = uuid.uuid4() uuid.uuid4 = mock.Mock() uuid.uuid4.return_value = message_id mock_utcnow.return_value = datetime.datetime.utcnow() message = { 'message_id': str(message_id), 'publisher_id': 'test.localhost', 'event_type': 'test.notify', 'priority': 'INFO', 'payload': 'bar', 'timestamp': str(timeutils.utcnow()), } <|fim▁hole|> logging.getLogger = mock.Mock() logging.getLogger.return_value = logger notifier.info({}, 'test.notify', 'bar') uuid.uuid4.assert_called_once_with() logging.getLogger.assert_called_once_with('oslo.messaging.' 'notification.test.notify') logger.info.assert_called_once_with(JsonMessageMatcher(message)) def test_sample_priority(self): # Ensure logger drops sample-level notifications. driver = _impl_log.LogDriver(None, None, None) logger = mock.Mock(spec=logging.getLogger('oslo.messaging.' 'notification.foo')) logger.sample = None logging.getLogger = mock.Mock() logging.getLogger.return_value = logger msg = {'event_type': 'foo'} driver.notify(None, msg, "sample", None) logging.getLogger.assert_called_once_with('oslo.messaging.' 'notification.foo') def test_mask_passwords(self): # Ensure that passwords are masked with notifications driver = _impl_log.LogDriver(None, None, None) logger = mock.MagicMock() logger.info = mock.MagicMock() message = {'password': 'passw0rd', 'event_type': 'foo'} mask_str = jsonutils.dumps(strutils.mask_dict_password(message)) with mock.patch.object(logging, 'getLogger') as gl: gl.return_value = logger driver.notify(None, message, 'info', 0) logger.info.assert_called_once_with(mask_str) class TestRoutingNotifier(test_utils.BaseTestCase): def setUp(self): super(TestRoutingNotifier, self).setUp() self.config(driver=['routing'], group='oslo_messaging_notifications') transport = _FakeTransport(self.conf) self.notifier = oslo_messaging.Notifier(transport) self.router = self.notifier._driver_mgr['routing'].obj def _fake_extension_manager(self, ext): return extension.ExtensionManager.make_test_instance( [extension.Extension('test', None, None, ext), ]) def _empty_extension_manager(self): return extension.ExtensionManager.make_test_instance([]) def test_should_load_plugin(self): self.router.used_drivers = set(["zoo", "blah"]) ext = mock.MagicMock() ext.name = "foo" self.assertFalse(self.router._should_load_plugin(ext)) ext.name = "zoo" self.assertTrue(self.router._should_load_plugin(ext)) def test_load_notifiers_no_config(self): # default routing_config="" self.router._load_notifiers() self.assertEqual({}, self.router.routing_groups) self.assertEqual(0, len(self.router.used_drivers)) def test_load_notifiers_no_extensions(self): self.config(routing_config="routing_notifier.yaml", group='oslo_messaging_notifications') routing_config = r"" config_file = mock.MagicMock() config_file.return_value = routing_config with mock.patch.object(self.router, '_get_notifier_config_file', config_file): with mock.patch('stevedore.dispatch.DispatchExtensionManager', return_value=self._empty_extension_manager()): with mock.patch('oslo_messaging.notify.' '_impl_routing.LOG') as mylog: self.router._load_notifiers() self.assertFalse(mylog.debug.called) self.assertEqual({}, self.router.routing_groups) def test_load_notifiers_config(self): self.config(routing_config="routing_notifier.yaml", group='oslo_messaging_notifications') routing_config = r""" group_1: rpc : foo group_2: rpc : blah """ config_file = mock.MagicMock() config_file.return_value = routing_config with mock.patch.object(self.router, '_get_notifier_config_file', config_file): with mock.patch('stevedore.dispatch.DispatchExtensionManager', return_value=self._fake_extension_manager( mock.MagicMock())): self.router._load_notifiers() groups = list(self.router.routing_groups.keys()) groups.sort() self.assertEqual(['group_1', 'group_2'], groups) def test_get_drivers_for_message_accepted_events(self): config = r""" group_1: rpc: accepted_events: - foo.* - blah.zoo.* - zip """ groups = yaml.safe_load(config) group = groups['group_1'] # No matching event ... self.assertEqual([], self.router._get_drivers_for_message( group, "unknown", "info")) # Child of foo ... self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, "foo.1", "info")) # Foo itself ... self.assertEqual([], self.router._get_drivers_for_message( group, "foo", "info")) # Child of blah.zoo self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, "blah.zoo.zing", "info")) def test_get_drivers_for_message_accepted_priorities(self): config = r""" group_1: rpc: accepted_priorities: - info - error """ groups = yaml.safe_load(config) group = groups['group_1'] # No matching priority self.assertEqual([], self.router._get_drivers_for_message( group, None, "unknown")) # Info ... self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, None, "info")) # Error (to make sure the list is getting processed) ... self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, None, "error")) def test_get_drivers_for_message_both(self): config = r""" group_1: rpc: accepted_priorities: - info accepted_events: - foo.* driver_1: accepted_priorities: - info driver_2: accepted_events: - foo.* """ groups = yaml.safe_load(config) group = groups['group_1'] # Valid event, but no matching priority self.assertEqual(['driver_2'], self.router._get_drivers_for_message( group, 'foo.blah', "unknown")) # Valid priority, but no matching event self.assertEqual(['driver_1'], self.router._get_drivers_for_message( group, 'unknown', "info")) # Happy day ... x = self.router._get_drivers_for_message(group, 'foo.blah', "info") x.sort() self.assertEqual(['driver_1', 'driver_2', 'rpc'], x) def test_filter_func(self): ext = mock.MagicMock() ext.name = "rpc" # Good ... self.assertTrue(self.router._filter_func(ext, {}, {}, 'info', None, ['foo', 'rpc'])) # Bad self.assertFalse(self.router._filter_func(ext, {}, {}, 'info', None, ['foo'])) def test_notify(self): self.router.routing_groups = {'group_1': None, 'group_2': None} drivers_mock = mock.MagicMock() drivers_mock.side_effect = [['rpc'], ['foo']] with mock.patch.object(self.router, 'plugin_manager') as pm: with mock.patch.object(self.router, '_get_drivers_for_message', drivers_mock): self.notifier.info({}, 'my_event', {}) self.assertEqual(sorted(['rpc', 'foo']), sorted(pm.map.call_args[0][6])) def test_notify_filtered(self): self.config(routing_config="routing_notifier.yaml", group='oslo_messaging_notifications') routing_config = r""" group_1: rpc: accepted_events: - my_event rpc2: accepted_priorities: - info bar: accepted_events: - nothing """ config_file = mock.MagicMock() config_file.return_value = routing_config rpc_driver = mock.Mock() rpc2_driver = mock.Mock() bar_driver = mock.Mock() pm = dispatch.DispatchExtensionManager.make_test_instance( [extension.Extension('rpc', None, None, rpc_driver), extension.Extension('rpc2', None, None, rpc2_driver), extension.Extension('bar', None, None, bar_driver)], ) with mock.patch.object(self.router, '_get_notifier_config_file', config_file): with mock.patch('stevedore.dispatch.DispatchExtensionManager', return_value=pm): self.notifier.info({}, 'my_event', {}) self.assertFalse(bar_driver.info.called) rpc_driver.notify.assert_called_once_with( {}, mock.ANY, 'INFO', None) rpc2_driver.notify.assert_called_once_with( {}, mock.ANY, 'INFO', None)<|fim▁end|>
logger = mock.Mock()
<|file_name|>footer.js<|end_file_name|><|fim▁begin|>(function (angular) { "use strict"; var appFooter = angular.module('myApp.footer', []); appFooter.controller("footerCtrl", ['$scope', function ($scope) { }]); myApp.directive("siteFooter",function(){ return { restrict: 'A', templateUrl:'app/components/footer/footer.html'<|fim▁hole|> }); } (angular));<|fim▁end|>
};
<|file_name|>network_source_line_server_unittest.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2010, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Unit tests for NetworkSourceLineServer. #include <ios> #include <set> #include <string> #include "breakpad_googletest_includes.h" #include "google_breakpad/processor/code_module.h" #include "google_breakpad/processor/source_line_resolver_interface.h" #include "google_breakpad/processor/stack_frame.h" #include "google_breakpad/processor/symbol_supplier.h" #include "processor/binarystream.h" #include "processor/cfi_frame_info.h" #include "processor/network_source_line_server.h" #include "processor/network_source_line_protocol.h" #include "processor/windows_frame_info.h" namespace { using std::ios_base; using std::set; using std::string; using google_breakpad::CFIFrameInfo; using google_breakpad::CodeModule; using google_breakpad::binarystream; using google_breakpad::NetworkInterface; using google_breakpad::NetworkSourceLineServer; using google_breakpad::SourceLineResolverInterface; using google_breakpad::StackFrame; using google_breakpad::SymbolSupplier; using google_breakpad::SystemInfo; using google_breakpad::WindowsFrameInfo; using ::testing::_; using ::testing::DoAll; using ::testing::Invoke; using ::testing::Property; using ::testing::Return; using ::testing::SetArgumentPointee; // Style guide forbids "using namespace", so at least shorten it. namespace P = google_breakpad::source_line_protocol; class MockNetwork : public NetworkInterface { public: MockNetwork() {} MOCK_METHOD1(Init, bool(bool listen)); MOCK_METHOD2(Send, bool(const char *data, size_t length)); MOCK_METHOD1(WaitToReceive, bool(int timeout)); MOCK_METHOD3(Receive, bool(char *buffer, size_t buffer_size, ssize_t &received)); }; class MockSymbolSupplier : public SymbolSupplier { public: MockSymbolSupplier() {} MOCK_METHOD3(GetSymbolFile, SymbolResult(const CodeModule *module, const SystemInfo *system_info, string *symbol_file)); MOCK_METHOD4(GetSymbolFile, SymbolResult(const CodeModule *module, const SystemInfo *system_info, string *symbol_file, string *symbol_data)); }; class MockSourceLineResolver : public SourceLineResolverInterface { public: MockSourceLineResolver() {} virtual ~MockSourceLineResolver() {} MOCK_METHOD2(LoadModule, bool(const CodeModule *module, const string &map_file)); MOCK_METHOD2(LoadModuleUsingMapBuffer, bool(const CodeModule *module, const string &map_buffer)); MOCK_METHOD1(UnloadModule, void(const CodeModule *module)); MOCK_METHOD1(HasModule, bool(const CodeModule *module)); MOCK_METHOD1(FillSourceLineInfo, void(StackFrame *frame)); MOCK_METHOD1(FindWindowsFrameInfo, WindowsFrameInfo*(const StackFrame *frame)); MOCK_METHOD1(FindCFIFrameInfo, CFIFrameInfo*(const StackFrame *frame)); }; class TestNetworkSourceLineServer : public NetworkSourceLineServer { public: // Override visibility for testing. It's a lot easier to just // call into this method and verify the result than it would be // to mock out the calls to the NetworkInterface, even though // that would ostensibly be more correct and test the code more // thoroughly. Perhaps if someone has time and figures out a // clean way to do it this could be changed. using NetworkSourceLineServer::HandleRequest; TestNetworkSourceLineServer(SymbolSupplier *supplier, SourceLineResolverInterface *resolver, NetworkInterface *net, u_int64_t max_symbol_lines = 0) : NetworkSourceLineServer(supplier, resolver, net, max_symbol_lines) {} }; class NetworkSourceLineServerTest : public ::testing::Test { public: MockSymbolSupplier supplier; MockSourceLineResolver resolver; MockNetwork net; TestNetworkSourceLineServer *server; NetworkSourceLineServerTest() : server(NULL) {} void SetUp() { server = new TestNetworkSourceLineServer(&supplier, &resolver, &net); } }; TEST_F(NetworkSourceLineServerTest, TestInit) { EXPECT_CALL(net, Init(true)).WillOnce(Return(true)); EXPECT_CALL(net, WaitToReceive(0)).WillOnce(Return(false)); ASSERT_TRUE(server->Initialize()); EXPECT_FALSE(server->RunOnce(0)); } TEST_F(NetworkSourceLineServerTest, TestMalformedRequest) { binarystream request; // send a request without a full sequence number request << u_int8_t(1); binarystream response; EXPECT_FALSE(server->HandleRequest(request, response)); request.rewind(); // send a request without a command request << u_int16_t(1); EXPECT_FALSE(server->HandleRequest(request, response)); } TEST_F(NetworkSourceLineServerTest, TestUnknownCommand) { binarystream request; // send a request with an unknown command request << u_int16_t(1) << u_int8_t(100); binarystream response; ASSERT_TRUE(server->HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status; response >> response_sequence >> response_status; ASSERT_FALSE(response.eof()); EXPECT_EQ(u_int16_t(1), response_sequence); EXPECT_EQ(P::ERROR, int(response_status)); } TEST_F(NetworkSourceLineServerTest, TestHasBasic) { EXPECT_CALL(resolver, HasModule(_)) .WillOnce(Return(false)) .WillOnce(Return(true)); binarystream request; const u_int16_t sequence = 0xA0A0; // first request should come back as not loaded request << sequence << P::HAS << string("test.dll") << string("test.pdb") << string("ABCD1234"); binarystream response; ASSERT_TRUE(server->HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status, response_data; response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ(P::MODULE_NOT_LOADED, int(response_data)); // second request should come back as loaded binarystream request2; request2 << sequence << P::HAS << string("loaded.dll") << string("loaded.pdb") << string("ABCD1234"); ASSERT_TRUE(server->HandleRequest(request2, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ(P::MODULE_LOADED, int(response_data)); } TEST_F(NetworkSourceLineServerTest, TestMalformedHasRequest) { binarystream request; // send request with just command, missing all data const u_int16_t sequence = 0xA0A0; request << sequence << P::HAS; binarystream response; ASSERT_TRUE(server->HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status; response >> response_sequence >> response_status; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::ERROR, int(response_status)); // send request with just module name binarystream request2; request2 << sequence << P::HAS << string("test.dll"); ASSERT_TRUE(server->HandleRequest(request2, response)); response >> response_sequence >> response_status; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::ERROR, int(response_status)); // send request with module name, debug file, missing debug id binarystream request3; request3 << sequence << P::HAS << string("test.dll") << string("test.pdb"); ASSERT_TRUE(server->HandleRequest(request3, response)); response >> response_sequence >> response_status; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::ERROR, int(response_status)); } TEST_F(NetworkSourceLineServerTest, TestHasLoad) { EXPECT_CALL(resolver, HasModule(_)) .WillOnce(Return(false)) .WillOnce(Return(false)) .WillOnce(Return(true)); EXPECT_CALL(resolver, LoadModuleUsingMapBuffer(_,_)) .WillOnce(Return(true)); EXPECT_CALL(supplier, GetSymbolFile(_,_,_,_)) .WillOnce(Return(SymbolSupplier::FOUND)); // verify that the module is not loaded, with a HAS request binarystream request; const u_int16_t sequence = 0xA0A0; request << sequence << P::HAS << string("found.dll") << string("found.pdb") << string("ABCD1234"); binarystream response; ASSERT_TRUE(server->HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status, response_data; response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(P::MODULE_NOT_LOADED, int(response_data)); // now send a load request for this module binarystream request2; const u_int16_t sequence2 = 0xB0B0; request2 << sequence2 << P::LOAD << string("found.dll") << string("found.pdb") << string("ABCD1234"); ASSERT_TRUE(server->HandleRequest(request2, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence2, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); // sending another HAS message should now show it as loaded binarystream request3; const u_int16_t sequence3 = 0xC0C0; request3 << sequence3 << P::HAS << string("found.dll") << string("found.pdb") << string("ABCD1234"); ASSERT_TRUE(server->HandleRequest(request3, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence3, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ(P::MODULE_LOADED, int(response_data)); } TEST_F(NetworkSourceLineServerTest, TestLoad) { EXPECT_CALL(resolver, HasModule(_)) .Times(3) .WillRepeatedly(Return(false)); EXPECT_CALL(resolver, LoadModuleUsingMapBuffer(_,_)) .WillOnce(Return(false)); EXPECT_CALL(supplier, GetSymbolFile(_,_,_,_)) .WillOnce(Return(SymbolSupplier::NOT_FOUND)) .WillOnce(Return(SymbolSupplier::INTERRUPT)) .WillOnce(Return(SymbolSupplier::FOUND)); // notfound.dll should return LOAD_NOT_FOUND binarystream request; const u_int16_t sequence = 0xA0A0; request << sequence << P::LOAD << string("notfound.dll") << string("notfound.pdb") << string("ABCD1234"); binarystream response; ASSERT_TRUE(server->HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status, response_data; response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ(int(P::LOAD_NOT_FOUND), int(response_data)); // interrupt.dll should return LOAD_INTERRUPT binarystream request2; const u_int16_t sequence2 = 0xB0B0; request2 << sequence2 << P::LOAD << string("interrupt.dll") << string("interrupt.pdb") << string("0000"); ASSERT_TRUE(server->HandleRequest(request2, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence2, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ(int(P::LOAD_INTERRUPT), int(response_data)); // fail.dll should return LOAD_FAIL binarystream request3; const u_int16_t sequence3 = 0xC0C0; request3 << sequence3 << P::LOAD << string("fail.dll") << string("fail.pdb") << string("FFFFFFFF"); ASSERT_TRUE(server->HandleRequest(request3, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence3, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ(int(P::LOAD_FAIL), int(response_data)); } TEST_F(NetworkSourceLineServerTest, TestMalformedLoadRequest) { binarystream request; // send request with just command, missing all data const u_int16_t sequence = 0xA0A0; request << sequence << P::LOAD; binarystream response; ASSERT_TRUE(server->HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status; response >> response_sequence >> response_status; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::ERROR, int(response_status)); // send request with just module name binarystream request2; request2 << sequence << P::LOAD << string("test.dll"); ASSERT_TRUE(server->HandleRequest(request2, response)); response >> response_sequence >> response_status; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::ERROR, int(response_status)); // send request with module name, debug file, missing debug id binarystream request3; request3 << sequence << P::LOAD << string("test.dll") << string("test.pdb"); ASSERT_TRUE(server->HandleRequest(request3, response)); response >> response_sequence >> response_status; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::ERROR, int(response_status)); } void FillFullSourceLineInfo(StackFrame *frame) { frame->function_name = "function1"; frame->function_base = 0x1200; frame->source_file_name = "function1.cc"; frame->source_line = 1; frame->source_line_base = 0x1230; } void FillPartialSourceLineInfo(StackFrame *frame) { frame->function_name = "function2"; frame->function_base = 0xFFF0; } TEST_F(NetworkSourceLineServerTest, TestGet) { EXPECT_CALL(resolver, FillSourceLineInfo(_)) .WillOnce(Invoke(FillFullSourceLineInfo)) .WillOnce(Invoke(FillPartialSourceLineInfo)); binarystream request; const u_int16_t sequence = 0xA0A0; request << sequence << P::GET << string("loaded.dll") << string("loaded.pdb") << string("ABCD1234") << u_int64_t(0x1000) << u_int64_t(0x1234); binarystream response; ASSERT_TRUE(server->HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status; string function, source_file; u_int32_t source_line; u_int64_t function_base, source_line_base; response >> response_sequence >> response_status >> function >> function_base >> source_file >> source_line >> source_line_base; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ("function1", function); EXPECT_EQ(0x1200, function_base); EXPECT_EQ("function1.cc", source_file); EXPECT_EQ(1, source_line); EXPECT_EQ(0x1230, source_line_base); binarystream request2; const u_int16_t sequence2 = 0xC0C0; request2 << sequence2 << P::GET << string("loaded.dll") << string("loaded.pdb") << string("ABCD1234") << u_int64_t(0x1000) << u_int64_t(0xFFFF); ASSERT_TRUE(server->HandleRequest(request2, response)); response >> response_sequence >> response_status >> function >> function_base >> source_file >> source_line >> source_line_base; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence2, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ("function2", function); EXPECT_EQ(0xFFF0, function_base); EXPECT_EQ("", source_file); EXPECT_EQ(0, source_line); EXPECT_EQ(0, source_line_base); } WindowsFrameInfo* GetFullWindowsFrameInfo(const StackFrame *frame) { // return frame info with program string return new WindowsFrameInfo(1, 2, 3, 0xA, 0xFF, 0xF00, true, "x y ="); } WindowsFrameInfo* GetPartialWindowsFrameInfo(const StackFrame *frame) { // return frame info, no program string return new WindowsFrameInfo(1, 2, 3, 4, 5, 6, true, ""); } TEST_F(NetworkSourceLineServerTest, TestGetStackWin) { EXPECT_CALL(resolver, FindWindowsFrameInfo(_)) .WillOnce(Invoke(GetFullWindowsFrameInfo)) .WillOnce(Invoke(GetPartialWindowsFrameInfo)) .WillOnce(Return((WindowsFrameInfo*)NULL)); binarystream request; const u_int16_t sequence = 0xA0A0; request << sequence << P::GETSTACKWIN << string("loaded.dll") << string("loaded.pdb") << string("ABCD1234") << u_int64_t(0x1000) << u_int64_t(0x1234); binarystream response; ASSERT_TRUE(server->HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status; string stack_info; response >> response_sequence >> response_status >> stack_info; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ("0 0 0 1 2 3 a ff f00 1 x y =", stack_info); binarystream request2; const u_int16_t sequence2 = 0xB0B0; request2 << sequence2 << P::GETSTACKWIN << string("loaded.dll") << string("loaded.pdb") << string("ABCD1234") << u_int64_t(0x1000) << u_int64_t(0xABCD); ASSERT_TRUE(server->HandleRequest(request2, response)); response >> response_sequence >> response_status >> stack_info; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence2, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ("0 0 0 1 2 3 4 5 6 0 1", stack_info); binarystream request3; const u_int16_t sequence3 = 0xC0C0; request3 << sequence3 << P::GETSTACKWIN << string("loaded.dll") << string("loaded.pdb") << string("ABCD1234") << u_int64_t(0x1000) << u_int64_t(0xFFFF); ASSERT_TRUE(server->HandleRequest(request3, response)); response >> response_sequence >> response_status >> stack_info; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence3, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ("", stack_info); } CFIFrameInfo* GetCFIFrameInfoJustCFA(const StackFrame *frame) { CFIFrameInfo* cfi = new CFIFrameInfo(); cfi->SetCFARule("12345678"); return cfi; } CFIFrameInfo* GetCFIFrameInfoCFARA(const StackFrame *frame) { CFIFrameInfo* cfi = new CFIFrameInfo(); cfi->SetCFARule("12345678"); cfi->SetRARule("abcdefgh"); return cfi; } CFIFrameInfo* GetCFIFrameInfoLots(const StackFrame *frame) { CFIFrameInfo* cfi = new CFIFrameInfo(); cfi->SetCFARule("12345678"); cfi->SetRARule("abcdefgh"); cfi->SetRegisterRule("r0", "foo bar"); cfi->SetRegisterRule("b0", "123 abc +"); return cfi; } TEST_F(NetworkSourceLineServerTest, TestGetStackCFI) {<|fim▁hole|> .WillOnce(Invoke(GetCFIFrameInfoJustCFA)) .WillOnce(Invoke(GetCFIFrameInfoCFARA)) .WillOnce(Invoke(GetCFIFrameInfoLots)); binarystream request; const u_int16_t sequence = 0xA0A0; request << sequence << P::GETSTACKCFI << string("loaded.dll") << string("loaded.pdb") << string("ABCD1234") << u_int64_t(0x1000) << u_int64_t(0x1234); binarystream response; ASSERT_TRUE(server->HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status; string stack_info; response >> response_sequence >> response_status >> stack_info; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ("", stack_info); binarystream request2; const u_int16_t sequence2 = 0xB0B0; request2 << sequence2 << P::GETSTACKCFI << string("loaded.dll") << string("loaded.pdb") << string("ABCD1234") << u_int64_t(0x1000) << u_int64_t(0xABCD); ASSERT_TRUE(server->HandleRequest(request2, response)); response >> response_sequence >> response_status >> stack_info; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence2, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ(".cfa: 12345678", stack_info); binarystream request3; const u_int16_t sequence3 = 0xC0C0; request3 << sequence3 << P::GETSTACKCFI << string("loaded.dll") << string("loaded.pdb") << string("ABCD1234") << u_int64_t(0x1000) << u_int64_t(0xFFFF); ASSERT_TRUE(server->HandleRequest(request3, response)); response >> response_sequence >> response_status >> stack_info; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence3, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ(".cfa: 12345678 .ra: abcdefgh", stack_info); binarystream request4; const u_int16_t sequence4 = 0xD0D0; request4 << sequence4 << P::GETSTACKCFI << string("loaded.dll") << string("loaded.pdb") << string("ABCD1234") << u_int64_t(0x1000) << u_int64_t(0xFFFF); ASSERT_TRUE(server->HandleRequest(request4, response)); response >> response_sequence >> response_status >> stack_info; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence4, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ(".cfa: 12345678 .ra: abcdefgh b0: 123 abc + r0: foo bar", stack_info); } TEST_F(NetworkSourceLineServerTest, TestMalformedGetRequest) { //TODO } TEST(TestMissingMembers, TestServerWithoutSymbolSupplier) { // Should provide reasonable responses without a SymbolSupplier MockSourceLineResolver resolver; MockNetwork net; TestNetworkSourceLineServer server(NULL, &resolver, &net); // All LOAD requests should return LOAD_NOT_FOUND binarystream request; binarystream response; const u_int16_t sequence = 0xB0B0; u_int16_t response_sequence; u_int8_t response_status, response_data; request << sequence << P::LOAD << string("found.dll") << string("found.pdb") << string("ABCD1234"); ASSERT_TRUE(server.HandleRequest(request, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_NOT_FOUND), int(response_data)); } TEST(TestMissingMembers, TestServerWithoutResolver) { // Should provide reasonable responses without a SourceLineResolver MockSymbolSupplier supplier; MockNetwork net; TestNetworkSourceLineServer server(&supplier, NULL, &net); // GET requests should return empty info binarystream request; binarystream response; const u_int16_t sequence = 0xA0A0; u_int16_t response_sequence; u_int8_t response_status; request << sequence << P::GET << string("loaded.dll") << string("loaded.pdb") << string("ABCD1234") << u_int64_t(0x1000) << u_int64_t(0x1234); ASSERT_TRUE(server.HandleRequest(request, response)); string function, source_file; u_int32_t source_line; u_int64_t function_base, source_line_base; response >> response_sequence >> response_status >> function >> function_base >> source_file >> source_line >> source_line_base; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ("", function); EXPECT_EQ(0x0, function_base); EXPECT_EQ("", source_file); EXPECT_EQ(0, source_line); EXPECT_EQ(0x0, source_line_base); // GETSTACKWIN requests should return an empty string binarystream request2; const u_int16_t sequence2 = 0xB0B0; request << sequence2 << P::GETSTACKWIN << string("loaded.dll") << string("loaded.pdb") << string("ABCD1234") << u_int64_t(0x1000) << u_int64_t(0x1234); ASSERT_TRUE(server.HandleRequest(request, response)); string response_string; response >> response_sequence >> response_status >> response_string; EXPECT_EQ(sequence2, response_sequence); EXPECT_EQ(P::OK, int(response_status)); EXPECT_EQ("", response_string); } class TestModuleManagement : public ::testing::Test { public: MockSymbolSupplier supplier; MockSourceLineResolver resolver; MockNetwork net; TestNetworkSourceLineServer server; // Init server with symbol line limit of 25 TestModuleManagement() : server(&supplier, &resolver, &net, 25) {} }; TEST_F(TestModuleManagement, TestModuleUnloading) { EXPECT_CALL(supplier, GetSymbolFile(_,_,_,_)) .Times(3) .WillRepeatedly(DoAll(SetArgumentPointee<3>(string("1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n")), Return(SymbolSupplier::FOUND))); EXPECT_CALL(resolver, HasModule(_)) .Times(3) .WillRepeatedly(Return(false)); EXPECT_CALL(resolver, LoadModuleUsingMapBuffer(_,_)) .Times(3) .WillRepeatedly(Return(true)); EXPECT_CALL(resolver, UnloadModule(Property(&CodeModule::code_file, string("one.dll|one.pdb|1111")))) .Times(1); // load three modules, each with 10 lines of symbols. // the third module will overflow the server's symbol line limit, // and should cause the first module to be unloaded. binarystream request; const u_int16_t sequence = 0x1010; request << sequence << P::LOAD << string("one.dll") << string("one.pdb") << string("1111"); binarystream response; ASSERT_TRUE(server.HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status, response_data; response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); binarystream request2; const u_int16_t sequence2 = 0x2020; request2 << sequence2 << P::LOAD << string("two.dll") << string("two.pdb") << string("2222"); ASSERT_TRUE(server.HandleRequest(request2, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence2, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); binarystream request3; const u_int16_t sequence3 = 0x3030; request3 << sequence3 << P::LOAD << string("three.dll") << string("three.pdb") << string("3333"); ASSERT_TRUE(server.HandleRequest(request3, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence3, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); } TEST_F(TestModuleManagement, TestSymbolLimitTooLow) { // load module with symbol count > limit, // ensure that it doesn't get unloaded even though it's the only module EXPECT_CALL(supplier, GetSymbolFile(_,_,_,_)) .WillOnce(DoAll(SetArgumentPointee<3>(string("1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n")), Return(SymbolSupplier::FOUND))); EXPECT_CALL(resolver, HasModule(_)) .WillOnce(Return(false)); EXPECT_CALL(resolver, LoadModuleUsingMapBuffer(_,_)) .WillOnce(Return(true)); EXPECT_CALL(resolver, UnloadModule(_)) .Times(0); binarystream request; const u_int16_t sequence = 0x1010; request << sequence << P::LOAD << string("one.dll") << string("one.pdb") << string("1111"); binarystream response; ASSERT_TRUE(server.HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status, response_data; response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); } TEST_F(TestModuleManagement, TestModuleLoadLRU) { // load 2 modules, then re-load the first one, // then load a third one, causing the second one to be unloaded EXPECT_CALL(supplier, GetSymbolFile(_,_,_,_)) .Times(3) .WillRepeatedly(DoAll(SetArgumentPointee<3>(string("1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n")), Return(SymbolSupplier::FOUND))); EXPECT_CALL(resolver, HasModule(_)) .WillOnce(Return(false)) // load module 1 .WillOnce(Return(false)) // load module 2 .WillOnce(Return(true)) // module 1 already loaded .WillOnce(Return(false)); // load module 3 EXPECT_CALL(resolver, LoadModuleUsingMapBuffer(_,_)) .Times(3) .WillRepeatedly(Return(true)); EXPECT_CALL(resolver, UnloadModule(Property(&CodeModule::code_file, string("two.dll|two.pdb|2222")))) .Times(1); binarystream request; const u_int16_t sequence = 0x1010; request << sequence << P::LOAD << string("one.dll") << string("one.pdb") << string("1111"); binarystream response; ASSERT_TRUE(server.HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status, response_data; response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); binarystream request2; const u_int16_t sequence2 = 0x2020; request2 << sequence2 << P::LOAD << string("two.dll") << string("two.pdb") << string("2222"); ASSERT_TRUE(server.HandleRequest(request2, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence2, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); binarystream request3; const u_int16_t sequence3 = 0x3030; request3 << sequence3 << P::LOAD << string("one.dll") << string("one.pdb") << string("1111"); ASSERT_TRUE(server.HandleRequest(request3, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence3, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); binarystream request4; const u_int16_t sequence4 = 0x4040; request4 << sequence4 << P::LOAD << string("three.dll") << string("three.pdb") << string("3333"); ASSERT_TRUE(server.HandleRequest(request4, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence4, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); } TEST_F(TestModuleManagement, TestModuleGetLRU) { // load 2 modules, then issue a GET for the first one, // then load a third one, causing the second one to be unloaded EXPECT_CALL(supplier, GetSymbolFile(_,_,_,_)) .Times(3) .WillRepeatedly(DoAll(SetArgumentPointee<3>(string("1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n")), Return(SymbolSupplier::FOUND))); EXPECT_CALL(resolver, HasModule(_)) .Times(3) .WillRepeatedly(Return(false)); EXPECT_CALL(resolver, LoadModuleUsingMapBuffer(_,_)) .Times(3) .WillRepeatedly(Return(true)); EXPECT_CALL(resolver, FillSourceLineInfo(_)) .Times(1); EXPECT_CALL(resolver, UnloadModule(Property(&CodeModule::code_file, string("two.dll|two.pdb|2222")))) .Times(1); binarystream request; const u_int16_t sequence = 0x1010; request << sequence << P::LOAD << string("one.dll") << string("one.pdb") << string("1111"); binarystream response; ASSERT_TRUE(server.HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status, response_data; response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); binarystream request2; const u_int16_t sequence2 = 0x2020; request2 << sequence2 << P::LOAD << string("two.dll") << string("two.pdb") << string("2222"); ASSERT_TRUE(server.HandleRequest(request2, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence2, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); binarystream request3; const u_int16_t sequence3 = 0x3030; request3 << sequence3 << P::GET << string("one.dll") << string("one.pdb") << string("1111") << u_int64_t(0x1000) << u_int64_t(0x1234); ASSERT_TRUE(server.HandleRequest(request3, response)); string function, source_file; u_int32_t source_line; u_int64_t function_base, source_line_base; response >> response_sequence >> response_status >> function >> function_base >> source_file >> source_line >> source_line_base; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence3, response_sequence); EXPECT_EQ(P::OK, int(response_status)); // Don't care about the rest of the response, really. binarystream request4; const u_int16_t sequence4 = 0x4040; request4 << sequence4 << P::LOAD << string("three.dll") << string("three.pdb") << string("3333"); ASSERT_TRUE(server.HandleRequest(request4, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence4, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); } TEST_F(TestModuleManagement, TestModuleGetStackWinLRU) { // load 2 modules, then issue a GETSTACKWIN for the first one, // then load a third one, causing the second one to be unloaded EXPECT_CALL(supplier, GetSymbolFile(_,_,_,_)) .Times(3) .WillRepeatedly(DoAll(SetArgumentPointee<3>(string("1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n")), Return(SymbolSupplier::FOUND))); EXPECT_CALL(resolver, HasModule(_)) .Times(3) .WillRepeatedly(Return(false)); EXPECT_CALL(resolver, LoadModuleUsingMapBuffer(_,_)) .Times(3) .WillRepeatedly(Return(true)); EXPECT_CALL(resolver, FindWindowsFrameInfo(_)) .WillOnce(Return((WindowsFrameInfo*)NULL)); EXPECT_CALL(resolver, UnloadModule(Property(&CodeModule::code_file, string("two.dll|two.pdb|2222")))) .Times(1); binarystream request; const u_int16_t sequence = 0x1010; request << sequence << P::LOAD << string("one.dll") << string("one.pdb") << string("1111"); binarystream response; ASSERT_TRUE(server.HandleRequest(request, response)); u_int16_t response_sequence; u_int8_t response_status, response_data; response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); binarystream request2; const u_int16_t sequence2 = 0x2020; request2 << sequence2 << P::LOAD << string("two.dll") << string("two.pdb") << string("2222"); ASSERT_TRUE(server.HandleRequest(request2, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence2, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); binarystream request3; const u_int16_t sequence3 = 0x3030; request3 << sequence3 << P::GETSTACKWIN << string("one.dll") << string("one.pdb") << string("1111") << u_int64_t(0x1000) << u_int64_t(0x1234); ASSERT_TRUE(server.HandleRequest(request3, response)); string stack_info; response >> response_sequence >> response_status >> stack_info; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence3, response_sequence); EXPECT_EQ(P::OK, int(response_status)); // Don't care about the rest of the response, really. binarystream request4; const u_int16_t sequence4 = 0x4040; request4 << sequence4 << P::LOAD << string("three.dll") << string("three.pdb") << string("3333"); ASSERT_TRUE(server.HandleRequest(request4, response)); response >> response_sequence >> response_status >> response_data; ASSERT_FALSE(response.eof()); EXPECT_EQ(sequence4, response_sequence); EXPECT_EQ(P::OK, int(response_status)); ASSERT_EQ(int(P::LOAD_OK), int(response_data)); } } // namespace int main(int argc, char *argv[]) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }<|fim▁end|>
EXPECT_CALL(resolver, FindCFIFrameInfo(_)) .WillOnce(Return((CFIFrameInfo*)NULL))
<|file_name|>patient_encounter_entry.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals import webnotes from webnotes.utils import flt, fmt_money, cstr, cint from selling.doctype.customer.customer import DocType import datetime from webnotes import msgprint, _ from selling.doctype.lead.lead import create_contact from webnotes.model.code import get_obj from webnotes.model.bean import getlist, copy_doclist from selling.doctype.patient_encounter_entry.notification_schedular import get_encounters from webnotes.model.doc import Document, make_autoname class DocType(): def __init__(self, d, dl): self.doc, self.doclist = d, dl def autoname(self): entry = make_autoname(webnotes.conn.get_value('DocType', 'Patient Encounter Entry', 'autoname')) company = webnotes.conn.sql(""" select name from tabCompany where name = (select value from tabSingles where doctype = 'Global Defaults' and field = 'default_company') """)[0][0] self.doc.name = company + ' ' + entry def validate(self):pass # if not webnotes.conn.sql("select patient from `tabPatient Encounter Entry` where name = '%s'"%self.doc.name): # self.send_notification() def on_update(self): patient_id = None from datetime import datetime if self.doc.status == 'Canceled': webnotes.conn.sql("update `tabPatient Encounter Entry` set docstatus = '1' where name = '%s'"%(self.doc.name)) s1=(self.doc.start_time).split(':') s2=(self.doc.end_time).split(':') # date_a=cstr(datetime.combine(datetime.strptime(self.doc.encounter_date,'%Y-%m-%d').date(),datetime.strptime(s1[0]+":"+s1[1],'%H:%M').time())) # date_b=cstr(datetime.combine(datetime.strptime(self.doc.encounter_date,'%Y-%m-%d').date(),datetime.strptime(s2[0]+":"+s2[1],'%H:%M').time())) #webnotes.errprint(self.doc.entry_in_child) if self.doc.new_user == 1 and not self.doc.new_patient: patient_id = self.make_patient() self.doc.new_patient=patient_id self.create_new_contact() self.create_customer(patient_id) self.create_account_head(patient_id) self.doc.save() if self.doc.entry_in_child == 'False': self.make_child_entry(patient_id) #self.make_event() if not self.doc.eventid: self.create_child() else: webnotes.conn.sql("update `tabSlot Child` set slot='"+self.doc.appointment_slot+"', start_time='"+cstr(datetime.strptime(date_a,'%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M'))+"', end_time='"+cstr(datetime.strptime(date_b,'%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M'))+"' where encounter='"+self.doc.name+"'") # webnotes.errprint(date_a) webnotes.conn.sql("update `tabEvent` set starts_on='"+cstr(datetime.strptime(date_a,'%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M'))+"', ends_on='"+cstr(datetime.strptime(date_b,'%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M'))+"' where name='"+self.doc.eventid+"'") if cint(self.doc.checked_in)==1: pass # check_confirmed=webnotes.conn.sql("select true from `tabSlot Child` where slot='"+self.doc.appointment_slot+"' and modality='"+self.doc.encounter+"' and study='"+self.doc.study+"' and date_format(start_time,'%Y-%m-%d %H:%M')=date_format('"+date_a+"','%Y-%m-%d %H:%M') and date_format(end_time,'%Y-%m-%d %H:%M')=date_format('"+date_b+"','%Y-%m-%d %H:%M') and status='Confirm'",debug=1) # if not check_confirmed: # webnotes.conn.sql("update tabEvent set event_type='Confirm' where name='%s'"%self.doc.eventid) # webnotes.conn.sql("update `tabSlot Child` set status='Confirm' where encounter='%s'"%self.doc.name) # else: # webnotes.msgprint("Selected slot is not available",raise_exception=1) # get_encounters() def send_notification(self): mail_list = [] number = [] msg = """Hi %(patient)s, Your appointment has been schedule on %(encounter_date)s at time %(start_time)s for study %(study)s on modality %(modality)s"""%{'patient': self.doc.patient, 'encounter_date':self.doc.encounter_date, 'start_time':self.doc.start_time, 'study':self.doc.study, 'modality':self.doc.modality} technologiest_contact = webnotes.conn.sql("select cell_number, personal_email from tabEmployee where name = '%s'"%(self.doc.technologist),as_list=1) patient_contact = webnotes.conn.sql("select mobile, email from `tabPatient Register` where name = '%s'"%(self.doc.patient),as_list=1) # webnotes.errprint([technologiest_contact, patient_contact]) if mail_list: mail_list.append(technologiest_contact[0][1]) mail_list.append(patient_contact[0][1]) if number: number.append(technologiest_contact[0][0]) number.append(patient_contact[0][0]) self.send_mail(msg, mail_list) self.send_sms(msg, number) def send_mail(self, msg, mail_list): from webnotes.utils.email_lib import sendmail for id in mail_list: if id: sendmail(id, subject='Appoiontment Scheduling', msg = msg) def send_sms(self, msg, number): ss = get_obj('SMS Settings', 'SMS Settings', with_children=1) # webnotes.errprint(ss) for num in number:pass # webnotes.errprint(['number',num]) args = {} for d in getlist(ss.doclist, 'static_parameter_details'): args[d.parameter] = d.value sms_url=webnotes.conn.get_value('SMS Settings', None, 'sms_gateway_url') msg_parameter=webnotes.conn.get_value('SMS Settings', None, 'message_parameter') receiver_parameter=webnotes.conn.get_value('SMS Settings', None, 'receiver_parameter') for num in number: if num: url = sms_url +"?user="+ args["user"] +"&senderID="+ args["sender ID"] +"&receipientno="+ num +"\ &dcs="+ args["dcs"]+ "&msgtxt=" + msg +"&state=" +args["state"] # webnotes.errprint(url) import requests r = requests.get(url) def create_new_contact(self): details = {} details['first_name'] = self.doc.first_name details['email_id'] = self.doc.email or '' details['mobile_no'] = self.doc.mobile or '' details['doc']='Customer' details['link']=self.doc.name or '' create_contact(details) def create_customer(self, patient_id): from webnotes.model.doc import Document d = Document('Customer') d.customer_name = patient_id d.full_name = self.doc.first_name d.save() def get_company_abbr(self): return webnotes.conn.get_value('Company', self.doc.company, 'abbr') def get_receivables_group(self): g = webnotes.conn.sql("select receivables_group from tabCompany where name=%s", self.doc.company) g = g and g[0][0] or '' if not g: msgprint("Update Company master, assign a default group for Receivables") raise Exception return g def create_account_head(self,patient_id): if self.doc.company : abbr = self.get_company_abbr() if not webnotes.conn.exists("Account", (self.doc.name + " - " + abbr)): parent_account = self.get_receivables_group() # create ac_bean = webnotes.bean({ "doctype": "Account", 'account_name': patient_id, 'parent_account': parent_account, 'group_or_ledger':'Ledger', 'company':self.doc.company, 'master_type':'Patient Ecounter Entry', 'master_name':patient_id, "freeze_account": "No" }) ac_bean.ignore_permissions = True ac_bean.insert() webnotes.msgprint(_("Account Head") + ": " + ac_bean.doc.name + _(" created")) else : webnotes.msgprint(_("Please Select Company under which you want to create account head")) def create_child(self): from datetime import datetime # date_a=cstr(datetime.combine(datetime.strptime(self.doc.encounter_date,'%Y-%m-%d').date(),datetime.strptime(self.doc.start_time,'%H:%M').time())) # date_b=cstr(datetime.combine(datetime.strptime(self.doc.encounter_date,'%Y-%m-%d').date(),datetime.strptime(self.doc.end_time,'%H:%M').time())) if self.doc.appointment_slot: # webnotes.errprint([self.doc.start_time]) check_confirmed=webnotes.conn.sql("select true from `tabSlot Child` where slot='"+self.doc.appointment_slot+"' and modality='"+self.doc.encounter+"' and study='"+self.doc.study+"' and date_format(start_time,'%Y-%m-%d %H:%M')=date_format('"+date_a+"','%Y-%m-%d %H:%M') and date_format(end_time,'%Y-%m-%d %H:%M')=date_format('"+date_b+"','%Y-%m-%d %H:%M') and status='Confirm'") # webnotes.errprint(check_confirmed) if not check_confirmed: check_status=webnotes.conn.sql("select case when count(*)<2 then true else false end from `tabSlot Child` where slot='"+self.doc.appointment_slot+"' and modality='"+self.doc.encounter+"' and study='"+self.doc.study+"' and date_format(start_time,'%Y-%m-%d %H:%M')=date_format('"+date_a+"','%Y-%m-%d %H:%M') and date_format(end_time,'%Y-%m-%d %H:%M')=date_format('"+date_b+"','%Y-%m-%d %H:%M') and status<>'Cancel'",as_list=1) # webnotes.errprint(check_status[0][0]) if check_status[0][0]==1: d=Document("Slot Child") d.slot=self.doc.appointment_slot d.modality=self.doc.encounter d.study=self.doc.study d.status='Waiting' d.encounter=self.doc.name d.start_time=date_a d.end_time=date_b d.save() self.make_event(d.name) self.doc.slot=d.name else: webnotes.msgprint("Selected slot is not available",raise_exception=1) else: webnotes.msgprint("Selected slot is not available",raise_exception=1) def make_patient(self): d = Document('Patient Register') d.customer_name = self.doc.first_name + ' ' + self.doc.last_name d.mobile = self.doc.phone_number d.company=self.doc.company d.save() return d.name def child_entry(self,patient_data): services = webnotes.conn.sql(""" SELECT foo.*, case when exists(select true from `tabPhysician Values` a WHERE a.study_name=foo.study AND a.parent=foo.referrer_name and a.referral_fee <> 0) then (select a.referral_fee from `tabPhysician Values` a WHERE a.study_name=foo.study AND a.parent=foo.referrer_name) else (select ifnull(referral_fee,0) from tabStudy where name=foo.study) end as referral_fee, case when exists(select true from `tabPhysician Values` a WHERE a.study_name=foo.study AND a.parent=foo.referrer_name and a.referral_fee <> 0) then (select a.referral_rule from `tabPhysician Values` a WHERE a.study_name=foo.study AND a.parent=foo.referrer_name) else (select referral_rule from tabStudy where name=foo.study) end as referral_rule FROM ( SELECT s.study_aim AS study,'' as item, '1' as qty, s.study_aim as parent,s.modality, e.encounter,e.referrer_name, e.name, s.discount_type,s.study_detials,s.discounted_value as dis_value FROM `tabEncounter` e, tabStudy s WHERE ifnull(e.is_invoiced,'False')='False' AND e.parent ='%(parent)s' and s.name = e.study) AS foo union select '',item,qty,parent,'','','','','','','','','' from `tabStudy Recipe Details` where parent in( SELECT s.study_aim AS study FROM `tabEncounter` e, tabStudy s WHERE ifnull(e.is_invoiced,'False')='False' AND e.parent ='%(parent)s' AND s.name = e.study <|fim▁hole|> tot_amt = 0.0 if services: for srv in services: # cld = addchild(self.doc, 'entries', 'Sales Invoice Item',self.doclist) # cld.study = srv['study'] # cld.modality = srv['modality'] # cld.encounter_id = srv['name'] # cld.discount_type = srv['discount_type'] export_rate=webnotes.conn.sql("""select study_fees from tabStudy where name = '%s' """%srv['study'],as_list=1) srv['export_rate'] = export_rate[0][0] if export_rate else 0 if cint(srv['export_rate'])==0: item_export_rate=webnotes.conn.sql("""select price from tabItem where name = '%s' """%srv['item'],as_list=1) srv['export_rate'] = item_export_rate[0][0] if item_export_rate else 0 # cld.referrer_name=srv['referrer_name'] if srv['referrer_name']: acc_head = webnotes.conn.sql("""select name from `tabAccount` where master_name='%s'"""%(srv['referrer_name'])) if acc_head and acc_head[0][0]: srv['referrer_physician_credit_to'] = acc_head[0][0] # cld.referral_rule= srv['referral_rule'] # cld.referral_fee= srv['referral_fee'] if srv['discount_type']=='Regular discount': # cld.discount=srv['dis_value'] srv['basic_charges']=cstr(flt(srv['export_rate']-flt(flt(srv['export_rate'])*flt(srv['dis_value'])/100))) srv['discount_in_amt']=cstr(flt(flt(srv['export_rate'])*flt(srv['dis_value'])/100)) else: if srv['referral_rule'] == 'Fixed Cost': srv['basic_charges']=cstr(flt(srv['export_rate'])-flt(srv['referral_fee'])) srv['discount_in_amt']=cstr(srv['referral_fee']) else: srv['basic_charges']=cstr(flt(srv['export_rate'])*flt(srv['qty']) - (flt(srv['export_rate'])*(flt(srv['referral_fee'])/100))) # webnotes.errprint(["sdas",srv['basic_charges']]) srv['dis_value'] = cstr(srv['referral_fee']) #cld.discount=cstr(round(flt(cld.referral_fee)/flt(cld.export_rate)*100,2)) # cld.description=srv['study_detials'] # cld.qty=1 tot_amt = flt(srv['basic_charges']) + tot_amt srv['amount'] = tot_amt patient_data_new.append(srv) # webnotes.errprint(patient_data_new) return patient_data_new else: webnotes.msgprint("Bill already made") def make_child_entry(self, patient_id=None): enct = Document('Encounter') # webnotes.errprint([enct, self.doc.patient]) enct.encounter = self.doc.encounter enct.study = self.doc.study enct.encounter_date = self.doc.encounter_date enct.radiologist_name = self.doc.radiologist_name enct.referrer_name = self.doc.referrer_name enct.problem_description = self.doc.problem_description enct.metal_in = self.doc.metal_in enct.pacemaker = self.doc.pacemaker enct.claustrophobia = self.doc.claustrophobia enct.pregnancy = self.doc.pregnancy enct.others = self.doc.others enct.procedure_alert = self.doc.procedure_alert enct.parent = self.doc.patient if self.doc.patient else patient_id enct.id = self.doc.name enct.save(new=1) self.doc.entry_in_child = 'True' self.doc.save() def make_event(self,name_slot): evnt = Document('Event') evnt.slot=name_slot evnt.event_type = 'Waiting' evnt.starts_on = self.doc.encounter_date + ' ' +self.doc.start_time evnt.ends_on = self.doc.encounter_date + ' ' +self.doc.end_time if cint(self.doc.new_user)==1: evnt.patient = self.doc.new_patient evnt.patient_name= self.doc.first_name + ' ' + self.doc.last_name else: evnt.patient = self.doc.patient evnt.patient_name= self.doc.patient_name evnt.service = self.doc.study evnt.subject = self.doc.study evnt.modality=self.doc.encounter evnt.study=self.doc.study evnt.save() self.doc.eventid = evnt.name self.doc.save() @webnotes.whitelist() def get_employee(doctype, txt, searchfield, start, page_len, filters): return webnotes.conn.sql("select name, employee_name from tabEmployee where designation = 'Radiologist'") @webnotes.whitelist() def get_patient_details(doctype, txt, searchfield, start, page_len, filters): return webnotes.conn.sql("""select name, first_name from `tabPatient Register` where docstatus < 2 and (%(key)s like "%(txt)s" or first_name like "%(txt)s") order by case when name like "%(txt)s" then 0 else 1 end, case when first_name like "%(txt)s" then 0 else 1 end, name limit %(start)s, %(page_len)s""" % {'key': searchfield, 'txt': "%%%s%%" % txt, 'start': start, 'page_len': page_len}) @webnotes.whitelist() def update_event(checked, dname,encounter): if cint(checked) == 1: webnotes.conn.sql("update tabEvent set event_type='Confirm' where name='%s'"%dname) # webnotes.errprint(encounter) webnotes.conn.sql("update `tabSlot Child` set status='Confirm' where encounter='%s'"%encounter) @webnotes.whitelist() def get_events(start, end, doctype,op,filters=None): # webnotes.errprint(['hello',doctype, op]) cnd ='' if op: cnd = "and encounter = '%(pros)s'"%{"pros":op} from webnotes.widgets.reportview import build_match_conditions #if not webnotes.has_permission("Task"): # webnotes.msgprint(_("No Permission"), raise_exception=1) conditions = '' # conditions = build_match_conditions("Patient Encounter Entry") # conditions and (" and " + conditions) or "" if filters: filters = json.loads(filters) for key in filters: if filters[key]: conditions += " and " + key + ' = "' + filters[key].replace('"', '\"') + '"' data = webnotes.conn.sql("""select name, start_time, end_time, study, status,encounter from `tabPatient Encounter Entry` where ((start_time between '%(start)s' and '%(end)s') \ or (end_time between '%(start)s' and '%(end)s')) %(cnd)s %(conditions)s""" % { "start": start, "end": end, "conditions": conditions, "cnd":cnd }, as_dict=True, update={"allDay": 0}) return data @webnotes.whitelist() def get_modality(): return webnotes.conn.sql("select name from tabModality", as_list=1) @webnotes.whitelist() def get_study(modality): return webnotes.conn.sql("select name from tabStudy where modality = '%s'"%modality, as_list=1) @webnotes.whitelist() def set_slot(modality, study, start_time, end_time): time = get_study_time(study) if cint(time) > 30: start_time = calc_start_time(start_time, modality) end_time = calc_end_time(cstr(start_time),time) start_time, end_time = check_availability(modality, start_time, end_time, time) return start_time, end_time def check_availability(modality, start_time, end_time, time): # webnotes.errprint(start_time) count = webnotes.conn.sql("""select sum(case when status = 'Waiting' then 2 when status = 'Confirmed' then 1 else 0 end) as status from `tabPatient Encounter Entry` where encounter = '%(encounter)s' and start_time = '%(start_time)s' and end_time = '%(end_time)s' """%{'encounter':modality, 'start_time':start_time, 'end_time':end_time},as_list=1) if count[0][0] in (1, 4, 3): # webnotes.errprint("if loop") start_time = end_time end_time = calc_end_time(cstr(start_time),time) return check_availability(modality, start_time, end_time, time) else: # webnotes.errprint(["else loop", start_time, end_time]) return start_time, end_time # def get_modality_time(modality): # return webnotes.conn.get_value('Modality',modality,'time_required') def get_study_time(study): return webnotes.conn.get_value('Study',modality,'study_time') def calc_end_time(start_time,time): import datetime now = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S') end_time = now + datetime.timedelta(minutes=cint(time)) return end_time def calc_start_time(start_time, modality): end_slot = datetime.datetime.strptime(cstr(start_time), '%Y-%m-%d %H:%M:%S') + datetime.timedelta(minutes=30) start_time_list = webnotes.conn.sql("""select end_time from `tabPatient Encounter Entry` where encounter='%(encounter)s' and end_time between '%(start_time)s' and '%(end_slot)s'"""%{'encounter':modality, 'start_time':start_time, 'end_slot':end_slot}) if start_time_list: start_time = start_time_list[0][0] return start_time @webnotes.whitelist() def get_patient(patient_id): get_obj('DB SYNC', 'DB SYNCl').sync_db(patient_id) @webnotes.whitelist() def create_patient(first_name,last_name,gender,date_of_birth,mobile_no,email, branch): # webnotes.errprint([first_name,last_name,gender,date_of_birth,mobile_no,email]) d = Document('Patient Register') d.first_name = first_name d.last_name = last_name d.birth_date = date_of_birth d.gender = gender d.mobile = mobile_no d.email = email d.lab_branch = branch d.save() return d.name<|fim▁end|>
) order by parent,qty"""%({"parent":patient_data}),as_dict=1) patient_data_new=[]
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>mod button; mod led; mod poti; mod seg7; pub use self::button::Button; pub use self::led::Led; pub use self::poti::Poti;<|fim▁hole|><|fim▁end|>
pub use self::seg7::Seg7;
<|file_name|>SqlServerEnvironment.java<|end_file_name|><|fim▁begin|>package dbfit.environment; import dbfit.annotations.DatabaseEnvironment; import dbfit.api.AbstractDbEnvironment; import dbfit.util.DbParameterAccessor; import dbfit.util.DbParameterAccessorsMapBuilder; import dbfit.util.Direction; import static dbfit.util.Direction.*; import static dbfit.util.LangUtils.enquoteAndJoin; import dbfit.util.TypeNormaliserFactory; import static dbfit.environment.SqlServerTypeNameNormaliser.normaliseTypeName; import java.math.BigDecimal; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Properties; import static org.apache.commons.lang3.ObjectUtils.defaultIfNull; @DatabaseEnvironment(name="SqlServer", driver="com.microsoft.sqlserver.jdbc.SQLServerDriver") public class SqlServerEnvironment extends AbstractDbEnvironment { public SqlServerEnvironment(String driverClassName) { super(driverClassName); defaultParamPatternString = "@([A-Za-z0-9_]+)"; TypeNormaliserFactory.setNormaliser(java.sql.Time.class, new MillisecondTimeNormaliser()); } public boolean supportsOuputOnInsert() { return false; } @Override protected String getConnectionString(String dataSource) { return "jdbc:sqlserver://" + dataSource; } @Override protected String getConnectionString(String dataSource, String database) { return getConnectionString(dataSource) + ";database=" + database; } @Override public void connect(String connectionString, Properties info) throws SQLException { // Add sendTimeAsDatetime=false option to enforce sending Time as // java.sql.Time (otherwise some precision is lost in conversions) super.connect(connectionString + ";sendTimeAsDatetime=false", info); } public Map<String, DbParameterAccessor> getAllColumns(String tableOrViewName) throws SQLException { String qry = " select c.[name], TYPE_NAME(c.system_type_id) as [Type], c.max_length, " + " 0 As is_output, 0 As is_cursor_ref " + " from " + objectDatabasePrefix(tableOrViewName) + "sys.columns c " + " where c.object_id = OBJECT_ID(?) " + " order by column_id"; return readIntoParams(tableOrViewName, qry); } private Map<String, DbParameterAccessor> readIntoParams(String objname, String query) throws SQLException { DbParameterAccessorsMapBuilder params = new DbParameterAccessorsMapBuilder(dbfitToJdbcTransformerFactory); objname = objname.replaceAll("[^a-zA-Z0-9_.#$]", ""); String bracketedName = enquoteAndJoin(objname.split("\\."), ".", "[", "]"); try (PreparedStatement dc = currentConnection.prepareStatement(query)) { dc.setString(1, bracketedName); ResultSet rs = dc.executeQuery(); while (rs.next()) { String paramName = defaultIfNull(rs.getString(1), ""); params.add(paramName, getParameterDirection(rs.getInt(4), paramName), getSqlType(rs.getString(2)), getJavaClass(rs.getString(2))); } } return params.toMap(); } // List interface has sequential search, so using list instead of array to // map types private static List<String> stringTypes = Arrays.asList(new String[] { "VARCHAR", "NVARCHAR", "CHAR", "NCHAR", "TEXT", "NTEXT", "UNIQUEIDENTIFIER" }); private static List<String> intTypes = Arrays .asList(new String[] { "INT" }); private static List<String> booleanTypes = Arrays .asList(new String[] { "BIT" }); private static List<String> floatTypes = Arrays .asList(new String[] { "REAL" }); private static List<String> doubleTypes = Arrays .asList(new String[] { "FLOAT" }); private static List<String> longTypes = Arrays .asList(new String[] { "BIGINT" }); private static List<String> shortTypes = Arrays.asList(new String[] { "TINYINT", "SMALLINT" }); private static List<String> numericTypes = Arrays.asList("NUMERIC"); private static List<String> decimalTypes = Arrays.asList(new String[] { "DECIMAL", "MONEY", "SMALLMONEY" }); private static List<String> timestampTypes = Arrays.asList(new String[] { "SMALLDATETIME", "DATETIME", "DATETIME2" }); private static List<String> dateTypes = Arrays.asList("DATE"); private static List<String> timeTypes = Arrays.asList("TIME"); // private static List<String> refCursorTypes = Arrays.asList(new String[] { // }); // private static List<String> doubleTypes=Arrays.asList(new // String[]{"DOUBLE"}); // private static string[] BinaryTypes=new string[] {"BINARY","VARBINARY", "TIMESTAMP"}; // private static string[] GuidTypes = new string[] { "UNIQUEIDENTIFIER" }; // private static string[] VariantTypes = new string[] { "SQL_VARIANT" }; private String objectDatabasePrefix(String dbObjectName) { String objectDatabasePrefix = ""; String[] objnameParts = dbObjectName.split("\\."); if (objnameParts.length == 3) {<|fim▁hole|> objectDatabasePrefix = objnameParts[0] + "."; } return objectDatabasePrefix; } private static Direction getParameterDirection(int isOutput, String name) { if (name.isEmpty()) { return RETURN_VALUE; } return (isOutput == 1) ? INPUT_OUTPUT : INPUT; } private static int getSqlType(String dataType) { // todo:strip everything from first blank dataType = normaliseTypeName(dataType); if (stringTypes.contains(dataType)) return java.sql.Types.VARCHAR; if (numericTypes.contains(dataType)) return java.sql.Types.NUMERIC; if (decimalTypes.contains(dataType)) return java.sql.Types.DECIMAL; if (intTypes.contains(dataType)) return java.sql.Types.INTEGER; if (timestampTypes.contains(dataType)) return java.sql.Types.TIMESTAMP; if (dateTypes.contains(dataType)) return java.sql.Types.DATE; if (timeTypes.contains(dataType)) return java.sql.Types.TIME; if (booleanTypes.contains(dataType)) return java.sql.Types.BOOLEAN; if (floatTypes.contains(dataType)) return java.sql.Types.FLOAT; if (doubleTypes.contains(dataType)) return java.sql.Types.DOUBLE; if (longTypes.contains(dataType)) return java.sql.Types.BIGINT; if (shortTypes.contains(dataType)) return java.sql.Types.SMALLINT; throw new UnsupportedOperationException("Type " + dataType + " is not supported"); } public Class<?> getJavaClass(String dataType) { dataType = normaliseTypeName(dataType); if (stringTypes.contains(dataType)) return String.class; if (numericTypes.contains(dataType)) return BigDecimal.class; if (decimalTypes.contains(dataType)) return BigDecimal.class; if (intTypes.contains(dataType)) return Integer.class; if (timestampTypes.contains(dataType)) return java.sql.Timestamp.class; if (dateTypes.contains(dataType)) return java.sql.Date.class; if (timeTypes.contains(dataType)) return java.sql.Time.class; if (booleanTypes.contains(dataType)) return Boolean.class; if (floatTypes.contains(dataType)) return Float.class; if (doubleTypes.contains(dataType)) return Double.class; if (longTypes.contains(dataType)) return Long.class; if (shortTypes.contains(dataType)) return Short.class; throw new UnsupportedOperationException("Type " + dataType + " is not supported"); } public Map<String, DbParameterAccessor> getAllProcedureParameters( String procName) throws SQLException { return readIntoParams( procName, "select [name], [Type], max_length, is_output, is_cursor_ref from " + "(" + " select " + " p.[name], TYPE_NAME(p.system_type_id) as [Type], " + " p.max_length, p.is_output, p.is_cursor_ref, " + " p.parameter_id, 0 as set_id, p.object_id " + " from " + objectDatabasePrefix(procName) + "sys.parameters p " + " union all select " + " '' as [name], 'int' as [Type], " + " 4 as max_length, 1 as is_output, 0 as is_cursor_ref, " + " null as parameter_id, 1 as set_id, object_id " + " from " + objectDatabasePrefix(procName) + "sys.objects where type in (N'P', N'PC') " + ") as u where object_id = OBJECT_ID(?) order by set_id, parameter_id"); } public String buildInsertCommand(String tableName, DbParameterAccessor[] accessors) { StringBuilder sb = new StringBuilder("insert into "); sb.append(tableName).append("("); String comma = ""; StringBuilder values = new StringBuilder(); for (DbParameterAccessor accessor : accessors) { if (accessor.hasDirection(Direction.INPUT)) { sb.append(comma); values.append(comma); //This will allow column names that have spaces or are keywords. sb.append("[" + accessor.getName() + "]"); values.append("?"); comma = ","; } } sb.append(") values ("); sb.append(values); sb.append(")"); return sb.toString(); } }<|fim▁end|>
<|file_name|>LClientManager.cpp<|end_file_name|><|fim▁begin|>/* The MIT License (MIT) Copyright (c) <2010-2020> <wenshengming> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "LClientManager.h" #include "LClient.h" #include "LLoginServerMainLogic.h" LClientManager::~LClientManager() { m_pLSMainLogic = NULL; } LClientManager::LClientManager() { } // 初始化连接 bool LClientManager::Initialize(unsigned int unMaxClientServ) { if (m_pLSMainLogic == NULL) { return false; } if (unMaxClientServ == 0) { unMaxClientServ = 1000; } for (unsigned int ui = 0; ui < unMaxClientServ; ++ui) {<|fim▁hole|> } m_queueClientPool.push(pClient); } return true; } // 网络层SessionID,tsa连接信息 bool LClientManager::AddNewUpSession(uint64_t u64SessionID, t_Session_Accepted& tsa) { map<uint64_t, LClient*>::iterator _ito = m_mapClientManagerBySessionID.find(u64SessionID); if (_ito != m_mapClientManagerBySessionID.end()) { return false; } LClient* pClient = GetOneClientFromPool(); if (pClient == NULL) { return false; } pClient->SetClientInfo(u64SessionID, tsa); m_mapClientManagerBySessionID[u64SessionID] = pClient; return true; } void LClientManager::RemoveOneSession(uint64_t u64SessionID) { map<uint64_t, LClient*>::iterator _ito = m_mapClientManagerBySessionID.find(u64SessionID); if (_ito != m_mapClientManagerBySessionID.end()) { FreeOneClientToPool(_ito->second); m_mapClientManagerBySessionID.erase(_ito); } } LClient* LClientManager::GetOneClientFromPool() { if (m_queueClientPool.empty()) { return NULL; } LClient* pClient = m_queueClientPool.front(); m_queueClientPool.pop(); return pClient; } void LClientManager::FreeOneClientToPool(LClient* pClient) { if (pClient == NULL) { return ; } m_queueClientPool.push(pClient); } LClient* LClientManager::FindClientBySessionID(uint64_t u64SessionID) { map<uint64_t, LClient*>::iterator _ito = m_mapClientManagerBySessionID.find(u64SessionID); if (_ito == m_mapClientManagerBySessionID.end()) { return NULL; } return _ito->second; } void LClientManager::SetLSMainLogic(LLoginServerMainLogic* plsml) { m_pLSMainLogic = plsml; } LLoginServerMainLogic* LClientManager::GetLSMainLogic() { return m_pLSMainLogic; } unsigned int LClientManager::GetClientCount() { return m_mapClientManagerBySessionID.size(); }<|fim▁end|>
LClient *pClient = new LClient; if (pClient == NULL) { return false;
<|file_name|>main.py<|end_file_name|><|fim▁begin|>''' Created on Apr 19, 2015 @author: bcopy ''' import os import cherrypy import sys import subprocess import random import time import threading import Queue import tempfile class ScriptMonitor(object): ''' Monitors the script execution and updates result statuses ''' def __init__(self): self.m_processInitialized = False def monitor(self, process): assert isinstance(process, subprocess.Popen) self.m_processInitialized = True self.m_process = process if(self.m_process.pid != None and self.m_process.poll() == None): print "Starting raspbuggy script process output polling..." self.m_stdoutQueue = Queue.Queue() self.m_stderrQueue = Queue.Queue() self.m_stdoutReader = AsynchronousFileReader(self.m_process.stdout, self.m_stdoutQueue) self.m_stdoutReader.start() else: print "Raspbuggy script process startup failed." def abort(self): print "Starting raspbuggy script process output polling..." if(self.m_processInitialized and self.m_process.poll() == None): self.m_process.terminate() self.m_processInitialized = False def isRunning(self): return (self.m_processInitialized and self.m_process.poll() == None) def getStdoutQueue(self): return self.m_stdoutQueue def getStderrQueue(self): return self.m_stderrQueue class AsynchronousFileReader(threading.Thread): ''' Helper class to implement asynchronous reading of a file in a separate thread. Pushes read lines on a queue to be consumed in another thread. ''' def __init__(self, fd, queue): assert isinstance(queue, Queue.Queue) assert callable(fd.readline) threading.Thread.__init__(self) self._fd = fd self._queue = queue def run(self): '''The body of the thread: read lines and put them on the queue.'''<|fim▁hole|> def eof(self): '''Check whether there is no more content to expect.''' return not self.is_alive() and self._queue.empty() class RaspbuggyService(object): def __init__(self): self.m_scriptMonitor = None @cherrypy.expose @cherrypy.tools.json_out() def ping(self): return {"msg": "pong"} @cherrypy.expose @cherrypy.tools.json_out() def status(self): if(self.m_scriptMonitor != None): running = self.m_scriptMonitor.isRunning() retCode = self.m_scriptMonitor.m_process.poll() if(retCode == None): retCode = -1 return {"running":running,"exitCode":retCode} else: return {"running":False,"exitCode":-1} @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def execute(self): scriptData = cherrypy.request.json if(self.m_scriptMonitor == None): self.m_scriptMonitor = ScriptMonitor() if(scriptData["scriptText"] == None): return {"success":False, "message":"Script contents undefined"} elif(self.m_scriptMonitor.isRunning()): return {"success":False, "message":"Script already running !"} else: # Write the script to a temporary file #scriptFile = tempfile.NamedTemporaryFile(prefix='raspbuggy-script-') scriptFile = open("/tmp/raspbuggy-script.py", "w") scriptFile.write(scriptData["scriptText"]+"\n") scriptFile.close() print "Executing script "+scriptFile.name+" ..." scriptProcess = subprocess.Popen(["python", scriptFile.name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=128) if(scriptProcess.pid != None): self.m_scriptMonitor.monitor(scriptProcess) return {"success":True, "message": "Running script (pid "+str(self.m_scriptMonitor.m_process.pid)+")"} else: return {"success":False, "message": "Could not start up script"} @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def abort(self): return {"result":1} @cherrypy.expose @cherrypy.tools.json_out() def tailStdOut(self): return {"tail": "New line\nNew line"} if __name__ == '__main__': WEBAPP_ROOT = os.getenv('RASPBUGGY_WEBAPP_ROOT',os.getcwd()+"/src/main/webapp") BLOCKLY_ROOT = os.getenv('BLOCKLY_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/blockly/b35c0fbfa2") BOOTSTRAP_ROOT = os.getenv('BOOTSTRAP_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/bootstrap/3.3.4") JQUERY_ROOT = os.getenv('JQUERY_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/jquery/1.9.1") #print os.path.abspath(WEBAPP_ROOT) #print os.path.abspath(BLOCKLY_ROOT) cherrypy.quickstart(RaspbuggyService(), "/", { '/': { 'tools.staticdir.on': True, 'tools.staticdir.dir': os.path.abspath(WEBAPP_ROOT) }, '/blockly': { 'tools.staticdir.on': True, 'tools.staticdir.dir': os.path.abspath(BLOCKLY_ROOT) }, '/bootstrap': { 'tools.staticdir.on': True, 'tools.staticdir.dir': os.path.abspath(BOOTSTRAP_ROOT) }, '/jquery': { 'tools.staticdir.on': True, 'tools.staticdir.dir': os.path.abspath(JQUERY_ROOT) } })<|fim▁end|>
for line in iter(self._fd.readline, ''): self._queue.put(line)
<|file_name|>rooms.py<|end_file_name|><|fim▁begin|>""" Room Typeclasses for the TutorialWorld. This defines special types of Rooms available in the tutorial. To keep everything in one place we define them together with the custom commands needed to control them. Those commands could also have been in a separate module (e.g. if they could have been re-used elsewhere.) """ from __future__ import print_function import random from evennia import TICKER_HANDLER from evennia import CmdSet, Command, DefaultRoom from evennia import utils, create_object, search_object from evennia import syscmdkeys, default_cmds from evennia.contrib.tutorial_world.objects import LightSource # the system error-handling module is defined in the settings. We load the # given setting here using utils.object_from_module. This way we can use # it regardless of if we change settings later. from django.conf import settings _SEARCH_AT_RESULT = utils.object_from_module(settings.SEARCH_AT_RESULT) # ------------------------------------------------------------- # # Tutorial room - parent room class # # This room is the parent of all rooms in the tutorial. # It defines a tutorial command on itself (available to # all those who are in a tutorial room). # # ------------------------------------------------------------- # # Special command available in all tutorial rooms class CmdTutorial(Command): """ Get help during the tutorial Usage: tutorial [obj] This command allows you to get behind-the-scenes info about an object or the current location. """ key = "tutorial" aliases = ["tut"] locks = "cmd:all()" help_category = "TutorialWorld" def func(self): """ All we do is to scan the current location for an Attribute called `tutorial_info` and display that. """ caller = self.caller if not self.args: target = self.obj # this is the room the command is defined on else: target = caller.search(self.args.strip()) if not target: return helptext = target.db.tutorial_info if helptext: caller.msg("|G%s|n" % helptext) else: caller.msg("|RSorry, there is no tutorial help available here.|n") # for the @detail command we inherit from MuxCommand, since # we want to make use of MuxCommand's pre-parsing of '=' in the # argument. class CmdTutorialSetDetail(default_cmds.MuxCommand): """ sets a detail on a room Usage: @detail <key> = <description> @detail <key>;<alias>;... = description Example: @detail walls = The walls are covered in ... @detail castle;ruin;tower = The distant ruin ... This sets a "detail" on the object this command is defined on (TutorialRoom for this tutorial). This detail can be accessed with the TutorialRoomLook command sitting on TutorialRoom objects (details are set as a simple dictionary on the room). This is a Builder command. We custom parse the key for the ;-separator in order to create multiple aliases to the detail all at once. """ key = "@detail" locks = "cmd:perm(Builder)" help_category = "TutorialWorld" def func(self): """ All this does is to check if the object has the set_detail method and uses it. """ if not self.args or not self.rhs: self.caller.msg("Usage: @detail key = description") return if not hasattr(self.obj, "set_detail"): self.caller.msg("Details cannot be set on %s." % self.obj) return for key in self.lhs.split(";"): # loop over all aliases, if any (if not, this will just be # the one key to loop over) self.obj.set_detail(key, self.rhs) self.caller.msg("Detail set: '%s': '%s'" % (self.lhs, self.rhs)) class CmdTutorialLook(default_cmds.CmdLook): """ looks at the room and on details Usage: look <obj> look <room detail> look *<account> Observes your location, details at your location or objects in your vicinity. Tutorial: This is a child of the default Look command, that also allows us to look at "details" in the room. These details are things to examine and offers some extra description without actually having to be actual database objects. It uses the return_detail() hook on TutorialRooms for this. """ # we don't need to specify key/locks etc, this is already # set by the parent. help_category = "TutorialWorld" def func(self): """ Handle the looking. This is a copy of the default look code except for adding in the details. """ caller = self.caller args = self.args if args: # we use quiet=True to turn off automatic error reporting. # This tells search that we want to handle error messages # ourself. This also means the search function will always # return a list (with 0, 1 or more elements) rather than # result/None. looking_at_obj = caller.search(args, # note: excludes room/room aliases candidates=caller.location.contents + caller.contents, use_nicks=True, quiet=True) if len(looking_at_obj) != 1: # no target found or more than one target found (multimatch) # look for a detail that may match detail = self.obj.return_detail(args) if detail: self.caller.msg(detail) return else: # no detail found, delegate our result to the normal # error message handler. _SEARCH_AT_RESULT(None, caller, args, looking_at_obj) return else: # we found a match, extract it from the list and carry on # normally with the look handling. looking_at_obj = looking_at_obj[0] else: looking_at_obj = caller.location if not looking_at_obj: caller.msg("You have no location to look at!") return if not hasattr(looking_at_obj, 'return_appearance'): # this is likely due to us having an account instead looking_at_obj = looking_at_obj.character if not looking_at_obj.access(caller, "view"): caller.msg("Could not find '%s'." % args) return # get object's appearance caller.msg(looking_at_obj.return_appearance(caller)) # the object's at_desc() method. looking_at_obj.at_desc(looker=caller) return class TutorialRoomCmdSet(CmdSet): """ Implements the simple tutorial cmdset. This will overload the look command in the default CharacterCmdSet since it has a higher priority (ChracterCmdSet has prio 0) """ key = "tutorial_cmdset" priority = 1 def at_cmdset_creation(self): """add the tutorial-room commands""" self.add(CmdTutorial()) self.add(CmdTutorialSetDetail()) self.add(CmdTutorialLook()) class TutorialRoom(DefaultRoom): """ This is the base room type for all rooms in the tutorial world. It defines a cmdset on itself for reading tutorial info about the location. """ def at_object_creation(self): """Called when room is first created""" self.db.tutorial_info = "This is a tutorial room. It allows you to use the 'tutorial' command." self.cmdset.add_default(TutorialRoomCmdSet) def at_object_receive(self, new_arrival, source_location): """ When an object enter a tutorial room we tell other objects in the room about it by trying to call a hook on them. The Mob object uses this to cheaply get notified of enemies without having to constantly scan for them. Args: new_arrival (Object): the object that just entered this room. source_location (Object): the previous location of new_arrival. """ if new_arrival.has_account and not new_arrival.is_superuser: # this is a character for obj in self.contents_get(exclude=new_arrival): if hasattr(obj, "at_new_arrival"): obj.at_new_arrival(new_arrival) def return_detail(self, detailkey): """ This looks for an Attribute "obj_details" and possibly returns the value of it. Args: detailkey (str): The detail being looked at. This is case-insensitive. """ details = self.db.details if details: return details.get(detailkey.lower(), None) def set_detail(self, detailkey, description): """ This sets a new detail, using an Attribute "details". Args: detailkey (str): The detail identifier to add (for aliases you need to add multiple keys to the same description). Case-insensitive. description (str): The text to return when looking at the given detailkey. """ if self.db.details: self.db.details[detailkey.lower()] = description else: self.db.details = {detailkey.lower(): description} # ------------------------------------------------------------- # # Weather room - room with a ticker # # ------------------------------------------------------------- # These are rainy weather strings WEATHER_STRINGS = ( "The rain coming down from the iron-grey sky intensifies.", "A gust of wind throws the rain right in your face. Despite your cloak you shiver.", "The rainfall eases a bit and the sky momentarily brightens.", "For a moment it looks like the rain is slowing, then it begins anew with renewed force.", "The rain pummels you with large, heavy drops. You hear the rumble of thunder in the distance.", "The wind is picking up, howling around you, throwing water droplets in your face. It's cold.", "Bright fingers of lightning flash over the sky, moments later followed by a deafening rumble.", "It rains so hard you can hardly see your hand in front of you. You'll soon be drenched to the bone.", "Lightning strikes in several thundering bolts, striking the trees in the forest to your west.", "You hear the distant howl of what sounds like some sort of dog or wolf.", "Large clouds rush across the sky, throwing their load of rain over the world.") class WeatherRoom(TutorialRoom): """ This should probably better be called a rainy room... This sets up an outdoor room typeclass. At irregular intervals, the effects of weather will show in the room. Outdoor rooms should inherit from this. """ def at_object_creation(self): """ Called when object is first created. We set up a ticker to update this room regularly. Note that we could in principle also use a Script to manage the ticking of the room; the TickerHandler works fine for simple things like this though. """ super(WeatherRoom, self).at_object_creation() # subscribe ourselves to a ticker to repeatedly call the hook # "update_weather" on this object. The interval is randomized # so as to not have all weather rooms update at the same time. self.db.interval = random.randint(50, 70) TICKER_HANDLER.add(interval=self.db.interval, callback=self.update_weather, idstring="tutorial") # this is parsed by the 'tutorial' command on TutorialRooms. self.db.tutorial_info = \ "This room has a Script running that has it echo a weather-related message at irregular intervals." def update_weather(self, *args, **kwargs): """ Called by the tickerhandler at regular intervals. Even so, we only update 20% of the time, picking a random weather message when we do. The tickerhandler requires that this hook accepts any arguments and keyword arguments (hence the *args, **kwargs even though we don't actually use them in this example) """ if random.random() < 0.2: # only update 20 % of the time self.msg_contents("|w%s|n" % random.choice(WEATHER_STRINGS)) SUPERUSER_WARNING = "\nWARNING: You are playing as a superuser ({name}). Use the {quell} command to\n" \ "play without superuser privileges (many functions and puzzles ignore the \n" \ "presence of a superuser, making this mode useful for exploring things behind \n" \ "the scenes later).\n" \ # ------------------------------------------------------------ # # Intro Room - unique room # # This room marks the start of the tutorial. It sets up properties on # the player char that is needed for the tutorial. # # ------------------------------------------------------------- class IntroRoom(TutorialRoom): """ Intro room properties to customize: char_health - integer > 0 (default 20) """ def at_object_creation(self): """ Called when the room is first created. """ super(IntroRoom, self).at_object_creation() self.db.tutorial_info = "The first room of the tutorial. " \ "This assigns the health Attribute to "\ "the account." def at_object_receive(self, character, source_location): """ Assign properties on characters """ # setup character for the tutorial health = self.db.char_health or 20 if character.has_account: character.db.health = health character.db.health_max = health if character.is_superuser: string = "-" * 78 + SUPERUSER_WARNING + "-" * 78 character.msg("|r%s|n" % string.format(name=character.key, quell="|w@quell|r")) # ------------------------------------------------------------- # # Bridge - unique room # # Defines a special west-eastward "bridge"-room, a large room that takes # several steps to cross. It is complete with custom commands and a # chance of falling off the bridge. This room has no regular exits, # instead the exitings are handled by custom commands set on the account # upon first entering the room. # # Since one can enter the bridge room from both ends, it is # divided into five steps: # westroom <- 0 1 2 3 4 -> eastroom # # ------------------------------------------------------------- class CmdEast(Command): """ Go eastwards across the bridge. Tutorial info: This command relies on the caller having two Attributes (assigned by the room when entering): - east_exit: a unique name or dbref to the room to go to when exiting east. - west_exit: a unique name or dbref to the room to go to when exiting west. The room must also have the following Attributes - tutorial_bridge_posistion: the current position on on the bridge, 0 - 4. """ key = "east" aliases = ["e"] locks = "cmd:all()" help_category = "TutorialWorld" def func(self): """move one step eastwards""" caller = self.caller bridge_step = min(5, caller.db.tutorial_bridge_position + 1) if bridge_step > 4: # we have reached the far east end of the bridge. # Move to the east room. eexit = search_object(self.obj.db.east_exit) if eexit: caller.move_to(eexit[0]) else: caller.msg("No east exit was found for this room. Contact an admin.") return caller.db.tutorial_bridge_position = bridge_step # since we are really in one room, we have to notify others # in the room when we move. caller.location.msg_contents("%s steps eastwards across the bridge." % caller.name, exclude=caller) caller.execute_cmd("look") # go back across the bridge class CmdWest(Command): """ Go westwards across the bridge. Tutorial info: This command relies on the caller having two Attributes (assigned by the room when entering): - east_exit: a unique name or dbref to the room to go to when exiting east. - west_exit: a unique name or dbref to the room to go to when exiting west. The room must also have the following property: - tutorial_bridge_posistion: the current position on on the bridge, 0 - 4. """ key = "west" aliases = ["w"] locks = "cmd:all()" help_category = "TutorialWorld" def func(self): """move one step westwards""" caller = self.caller bridge_step = max(-1, caller.db.tutorial_bridge_position - 1) if bridge_step < 0: # we have reached the far west end of the bridge. # Move to the west room. wexit = search_object(self.obj.db.west_exit) if wexit: caller.move_to(wexit[0]) else: caller.msg("No west exit was found for this room. Contact an admin.") return caller.db.tutorial_bridge_position = bridge_step # since we are really in one room, we have to notify others # in the room when we move. caller.location.msg_contents("%s steps westwards across the bridge." % caller.name, exclude=caller) caller.execute_cmd("look") BRIDGE_POS_MESSAGES = ("You are standing |wvery close to the the bridge's western foundation|n." " If you go west you will be back on solid ground ...", "The bridge slopes precariously where it extends eastwards" " towards the lowest point - the center point of the hang bridge.", "You are |whalfways|n out on the unstable bridge.", "The bridge slopes precariously where it extends westwards" " towards the lowest point - the center point of the hang bridge.", "You are standing |wvery close to the bridge's eastern foundation|n." " If you go east you will be back on solid ground ...") BRIDGE_MOODS = ("The bridge sways in the wind.", "The hanging bridge creaks dangerously.", "You clasp the ropes firmly as the bridge sways and creaks under you.", "From the castle you hear a distant howling sound, like that of a large dog or other beast.", "The bridge creaks under your feet. Those planks does not seem very sturdy.", "Far below you the ocean roars and throws its waves against the cliff," " as if trying its best to reach you.", "Parts of the bridge come loose behind you, falling into the chasm far below!", "A gust of wind causes the bridge to sway precariously.", "Under your feet a plank comes loose, tumbling down. For a moment you dangle over the abyss ...", "The section of rope you hold onto crumble in your hands," " parts of it breaking apart. You sway trying to regain balance.") FALL_MESSAGE = "Suddenly the plank you stand on gives way under your feet! You fall!" \ "\nYou try to grab hold of an adjoining plank, but all you manage to do is to " \ "divert your fall westwards, towards the cliff face. This is going to hurt ... " \ "\n ... The world goes dark ...\n\n" class CmdLookBridge(Command): """ looks around at the bridge. Tutorial info: This command assumes that the room has an Attribute "fall_exit", a unique name or dbref to the place they end upp if they fall off the bridge. """ key = 'look' aliases = ["l"] locks = "cmd:all()" help_category = "TutorialWorld" def func(self): """Looking around, including a chance to fall.""" caller = self.caller bridge_position = self.caller.db.tutorial_bridge_position # this command is defined on the room, so we get it through self.obj location = self.obj # randomize the look-echo message = "|c%s|n\n%s\n%s" % (location.key, BRIDGE_POS_MESSAGES[bridge_position], random.choice(BRIDGE_MOODS)) chars = [obj for obj in self.obj.contents_get(exclude=caller) if obj.has_account] if chars: # we create the You see: message manually here message += "\n You see: %s" % ", ".join("|c%s|n" % char.key for char in chars) self.caller.msg(message) # there is a chance that we fall if we are on the western or central # part of the bridge. if bridge_position < 3 and random.random() < 0.05 and not self.caller.is_superuser: # we fall 5% of time. fall_exit = search_object(self.obj.db.fall_exit) if fall_exit: self.caller.msg("|r%s|n" % FALL_MESSAGE) self.caller.move_to(fall_exit[0], quiet=True) # inform others on the bridge self.obj.msg_contents("A plank gives way under %s's feet and " "they fall from the bridge!" % self.caller.key) # custom help command class CmdBridgeHelp(Command): """ Overwritten help command while on the bridge. """ key = "help" aliases = ["h", "?"] locks = "cmd:all()" help_category = "Tutorial world" def func(self): """Implements the command.""" string = "You are trying hard not to fall off the bridge ..." \ "\n\nWhat you can do is trying to cross the bridge |weast|n" \ " or try to get back to the mainland |wwest|n)." self.caller.msg(string) class BridgeCmdSet(CmdSet): """This groups the bridge commands. We will store it on the room.""" key = "Bridge commands" priority = 1 # this gives it precedence over the normal look/help commands. def at_cmdset_creation(self): """Called at first cmdset creation""" self.add(CmdTutorial()) self.add(CmdEast()) self.add(CmdWest()) self.add(CmdLookBridge()) self.add(CmdBridgeHelp()) BRIDGE_WEATHER = ( "The rain intensifies, making the planks of the bridge even more slippery.", "A gust of wind throws the rain right in your face.", "The rainfall eases a bit and the sky momentarily brightens.", "The bridge shakes under the thunder of a closeby thunder strike.", "The rain pummels you with large, heavy drops. You hear the distinct howl of a large hound in the distance.", "The wind is picking up, howling around you and causing the bridge to sway from side to side.", "Some sort of large bird sweeps by overhead, giving off an eery screech. Soon it has disappeared in the gloom.", "The bridge sways from side to side in the wind.", "Below you a particularly large wave crashes into the rocks.", "From the ruin you hear a distant, otherwordly howl. Or maybe it was just the wind.") class BridgeRoom(WeatherRoom): """ The bridge room implements an unsafe bridge. It also enters the player into a state where they get new commands so as to try to cross the bridge. We want this to result in the account getting a special set of commands related to crossing the bridge. The result is that it will take several steps to cross it, despite it being represented by only a single room. We divide the bridge into steps: self.db.west_exit - - | - - self.db.east_exit 0 1 2 3 4 The position is handled by a variable stored on the character when entering and giving special move commands will increase/decrease the counter until the bridge is crossed. We also has self.db.fall_exit, which points to a gathering location to end up if we happen to fall off the bridge (used by the CmdLookBridge command). """ def at_object_creation(self): """Setups the room""" # this will start the weather room's ticker and tell # it to call update_weather regularly. super(BridgeRoom, self).at_object_creation() # this identifies the exits from the room (should be the command # needed to leave through that exit). These are defaults, but you # could of course also change them after the room has been created. self.db.west_exit = "cliff" self.db.east_exit = "gate" self.db.fall_exit = "cliffledge" # add the cmdset on the room. self.cmdset.add_default(BridgeCmdSet) # since the default Character's at_look() will access the room's # return_description (this skips the cmdset) when # first entering it, we need to explicitly turn off the room # as a normal view target - once inside, our own look will # handle all return messages. self.locks.add("view:false()") def update_weather(self, *args, **kwargs): """ This is called at irregular intervals and makes the passage over the bridge a little more interesting. """ if random.random() < 80: # send a message most of the time self.msg_contents("|w%s|n" % random.choice(BRIDGE_WEATHER)) def at_object_receive(self, character, source_location): """ This hook is called by the engine whenever the player is moved into this room. """ if character.has_account: # we only run this if the entered object is indeed a player object. # check so our east/west exits are correctly defined. wexit = search_object(self.db.west_exit) eexit = search_object(self.db.east_exit) fexit = search_object(self.db.fall_exit) if not (wexit and eexit and fexit): character.msg("The bridge's exits are not properly configured. " "Contact an admin. Forcing west-end placement.") character.db.tutorial_bridge_position = 0 return if source_location == eexit[0]: # we assume we enter from the same room we will exit to character.db.tutorial_bridge_position = 4 else: # if not from the east, then from the west! character.db.tutorial_bridge_position = 0 character.execute_cmd("look") def at_object_leave(self, character, target_location): """ This is triggered when the player leaves the bridge room. """ if character.has_account: # clean up the position attribute del character.db.tutorial_bridge_position # ------------------------------------------------------------------------------- # # Dark Room - a room with states # # This room limits the movemenets of its denizens unless they carry an active # LightSource object (LightSource is defined in # tutorialworld.objects.LightSource) # # ------------------------------------------------------------------------------- DARK_MESSAGES = ("It is pitch black. You are likely to be eaten by a grue.", "It's pitch black. You fumble around but cannot find anything.", "You don't see a thing. You feel around, managing to bump your fingers hard against something. Ouch!", "You don't see a thing! Blindly grasping the air around you, you find nothing.", "It's totally dark here. You almost stumble over some un-evenness in the ground.", "You are completely blind. For a moment you think you hear someone breathing nearby ... " "\n ... surely you must be mistaken.", "Blind, you think you find some sort of object on the ground, but it turns out to be just a stone.", "Blind, you bump into a wall. The wall seems to be covered with some sort of vegetation," " but its too damp to burn.", "You can't see anything, but the air is damp. It feels like you are far underground.") ALREADY_LIGHTSOURCE = "You don't want to stumble around in blindness anymore. You already " \ "found what you need. Let's get light already!" FOUND_LIGHTSOURCE = "Your fingers bump against a splinter of wood in a corner." \ " It smells of resin and seems dry enough to burn! " \ "You pick it up, holding it firmly. Now you just need to" \ " |wlight|n it using the flint and steel you carry with you." class CmdLookDark(Command): """ Look around in darkness Usage: look Look around in the darkness, trying to find something. """ key = "look" aliases = ["l", 'feel', 'search', 'feel around', 'fiddle'] locks = "cmd:all()" help_category = "TutorialWorld" def func(self): """ Implement the command. This works both as a look and a search command; there is a random chance of eventually finding a light source. """ caller = self.caller if random.random() < 0.8: # we don't find anything caller.msg(random.choice(DARK_MESSAGES)) else: # we could have found something! if any(obj for obj in caller.contents if utils.inherits_from(obj, LightSource)): # we already carry a LightSource object. caller.msg(ALREADY_LIGHTSOURCE) else: # don't have a light source, create a new one. create_object(LightSource, key="splinter", location=caller) caller.msg(FOUND_LIGHTSOURCE) class CmdDarkHelp(Command): """ Help command for the dark state.<|fim▁hole|> """ key = "help" locks = "cmd:all()" help_category = "TutorialWorld" def func(self): """ Replace the the help command with a not-so-useful help """ string = "Can't help you until you find some light! Try looking/feeling around for something to burn. " \ "You shouldn't give up even if you don't find anything right away." self.caller.msg(string) class CmdDarkNoMatch(Command): """ This is a system command. Commands with special keys are used to override special sitations in the game. The CMD_NOMATCH is used when the given command is not found in the current command set (it replaces Evennia's default behavior or offering command suggestions) """ key = syscmdkeys.CMD_NOMATCH locks = "cmd:all()" def func(self): """Implements the command.""" self.caller.msg("Until you find some light, there's not much you can do. Try feeling around.") class DarkCmdSet(CmdSet): """ Groups the commands of the dark room together. We also import the default say command here so that players can still talk in the darkness. We give the cmdset the mergetype "Replace" to make sure it completely replaces whichever command set it is merged onto (usually the default cmdset) """ key = "darkroom_cmdset" mergetype = "Replace" priority = 2 def at_cmdset_creation(self): """populate the cmdset.""" self.add(CmdTutorial()) self.add(CmdLookDark()) self.add(CmdDarkHelp()) self.add(CmdDarkNoMatch()) self.add(default_cmds.CmdSay) class DarkRoom(TutorialRoom): """ A dark room. This tries to start the DarkState script on all objects entering. The script is responsible for making sure it is valid (that is, that there is no light source shining in the room). The is_lit Attribute is used to define if the room is currently lit or not, so as to properly echo state changes. Since this room (in the tutorial) is meant as a sort of catch-all, we also make sure to heal characters ending up here, since they may have been beaten up by the ghostly apparition at this point. """ def at_object_creation(self): """ Called when object is first created. """ super(DarkRoom, self).at_object_creation() self.db.tutorial_info = "This is a room with custom command sets on itself." # the room starts dark. self.db.is_lit = False self.cmdset.add(DarkCmdSet, permanent=True) def at_init(self): """ Called when room is first recached (such as after a reload) """ self.check_light_state() def _carries_light(self, obj): """ Checks if the given object carries anything that gives light. Note that we do NOT look for a specific LightSource typeclass, but for the Attribute is_giving_light - this makes it easy to later add other types of light-giving items. We also accept if there is a light-giving object in the room overall (like if a splinter was dropped in the room) """ return obj.is_superuser or obj.db.is_giving_light or any(o for o in obj.contents if o.db.is_giving_light) def _heal(self, character): """ Heal a character. """ health = character.db.health_max or 20 character.db.health = health def check_light_state(self, exclude=None): """ This method checks if there are any light sources in the room. If there isn't it makes sure to add the dark cmdset to all characters in the room. It is called whenever characters enter the room and also by the Light sources when they turn on. Args: exclude (Object): An object to not include in the light check. """ if any(self._carries_light(obj) for obj in self.contents if obj != exclude): self.locks.add("view:all()") self.cmdset.remove(DarkCmdSet) self.db.is_lit = True for char in (obj for obj in self.contents if obj.has_account): # this won't do anything if it is already removed char.msg("The room is lit up.") else: # noone is carrying light - darken the room self.db.is_lit = False self.locks.add("view:false()") self.cmdset.add(DarkCmdSet, permanent=True) for char in (obj for obj in self.contents if obj.has_account): if char.is_superuser: char.msg("You are Superuser, so you are not affected by the dark state.") else: # put players in darkness char.msg("The room is completely dark.") def at_object_receive(self, obj, source_location): """ Called when an object enters the room. """ if obj.has_account: # a puppeted object, that is, a Character self._heal(obj) # in case the new guy carries light with them self.check_light_state() def at_object_leave(self, obj, target_location): """ In case people leave with the light, we make sure to clear the DarkCmdSet if necessary. This also works if they are teleported away. """ # since this hook is called while the object is still in the room, # we exclude it from the light check, to ignore any light sources # it may be carrying. self.check_light_state(exclude=obj) # ------------------------------------------------------------- # # Teleport room - puzzles solution # # This is a sort of puzzle room that requires a certain # attribute on the entering character to be the same as # an attribute of the room. If not, the character will # be teleported away to a target location. This is used # by the Obelisk - grave chamber puzzle, where one must # have looked at the obelisk to get an attribute set on # oneself, and then pick the grave chamber with the # matching imagery for this attribute. # # ------------------------------------------------------------- class TeleportRoom(TutorialRoom): """ Teleporter - puzzle room. Important attributes (set at creation): puzzle_key - which attr to look for on character puzzle_value - what char.db.puzzle_key must be set to success_teleport_to - where to teleport in case if success success_teleport_msg - message to echo while teleporting to success failure_teleport_to - where to teleport to in case of failure failure_teleport_msg - message to echo while teleporting to failure """ def at_object_creation(self): """Called at first creation""" super(TeleportRoom, self).at_object_creation() # what character.db.puzzle_clue must be set to, to avoid teleportation. self.db.puzzle_value = 1 # target of successful teleportation. Can be a dbref or a # unique room name. self.db.success_teleport_msg = "You are successful!" self.db.success_teleport_to = "treasure room" # the target of the failure teleportation. self.db.failure_teleport_msg = "You fail!" self.db.failure_teleport_to = "dark cell" def at_object_receive(self, character, source_location): """ This hook is called by the engine whenever the player is moved into this room. """ if not character.has_account: # only act on player characters. return # determine if the puzzle is a success or not is_success = str(character.db.puzzle_clue) == str(self.db.puzzle_value) teleport_to = self.db.success_teleport_to if is_success else self.db.failure_teleport_to # note that this returns a list results = search_object(teleport_to) if not results or len(results) > 1: # we cannot move anywhere since no valid target was found. character.msg("no valid teleport target for %s was found." % teleport_to) return if character.is_superuser: # superusers don't get teleported character.msg("Superuser block: You would have been teleported to %s." % results[0]) return # perform the teleport if is_success: character.msg(self.db.success_teleport_msg) else: character.msg(self.db.failure_teleport_msg) # teleport quietly to the new place character.move_to(results[0], quiet=True, move_hooks=False) # we have to call this manually since we turn off move_hooks # - this is necessary to make the target dark room aware of an # already carried light. results[0].at_object_receive(character, self) # ------------------------------------------------------------- # # Outro room - unique exit room # # Cleans up the character from all tutorial-related properties. # # ------------------------------------------------------------- class OutroRoom(TutorialRoom): """ Outro room. Called when exiting the tutorial, cleans the character of tutorial-related attributes. """ def at_object_creation(self): """ Called when the room is first created. """ super(OutroRoom, self).at_object_creation() self.db.tutorial_info = "The last room of the tutorial. " \ "This cleans up all temporary Attributes " \ "the tutorial may have assigned to the "\ "character." def at_object_receive(self, character, source_location): """ Do cleanup. """ if character.has_account: del character.db.health_max del character.db.health del character.db.last_climbed del character.db.puzzle_clue del character.db.combat_parry_mode del character.db.tutorial_bridge_position for obj in character.contents: if obj.typeclass_path.startswith("evennia.contrib.tutorial_world"): obj.delete() character.tags.clear(category="tutorial_world")<|fim▁end|>
<|file_name|>tstTwoDDistributionHelpers.cpp<|end_file_name|><|fim▁begin|>//---------------------------------------------------------------------------// //! //! \file tstTwoDDistributionHelpers.cpp //! \author Luke Kersting //! \brief Coherent photon scattering distribution unit tests //! //---------------------------------------------------------------------------// // Std Lib Includes #include <iostream> // Trilinos Includes #include <Teuchos_UnitTestHarness.hpp> #include <Teuchos_RCP.hpp> #include <Teuchos_VerboseObject.hpp> #include <Teuchos_Array.hpp> // FRENSIE Includes #include "MonteCarlo_UnitTestHarnessExtensions.hpp" #include "MonteCarlo_TwoDDistributionHelpers.hpp" #include "Utility_HistogramDistribution.hpp" #include "Utility_RandomNumberGenerator.hpp" //---------------------------------------------------------------------------// // Testing Variable //---------------------------------------------------------------------------// MonteCarlo::TwoDDistribution twod_distribution; double interpolation_fraction, independent_value, energy; //---------------------------------------------------------------------------// // Tests. //---------------------------------------------------------------------------// // Check that lower bin is sample for independent variable < lower boundary TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, sampleTwoDDistributionCorrelated_below ) { double sampled_variable; // Set up the random number stream std::vector<double> fake_stream( 1 ); fake_stream[0] = 3.0/18.0; // sample the first distribution Utility::RandomNumberGenerator::setFakeStream( fake_stream ); sampled_variable = MonteCarlo::sampleTwoDDistributionCorrelated( 0.0001, twod_distribution ); TEST_FLOATING_EQUALITY( sampled_variable, -1.5, 1e-15 ); } //---------------------------------------------------------------------------// // Check that upper bin is sample for independent variable > upper boundary TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, sampleTwoDDistributionCorrelated_above ) { double sampled_variable; // Set up the random number stream std::vector<double> fake_stream( 1 ); fake_stream[0] = 0.5; // sample the last distribution Utility::RandomNumberGenerator::setFakeStream( fake_stream ); sampled_variable = MonteCarlo::sampleTwoDDistributionCorrelated( 1.0, twod_distribution ); TEST_FLOATING_EQUALITY( sampled_variable, 2.0, 1e-15 ); } //---------------------------------------------------------------------------// // Check the sample for independent variable inbetween bins TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, sampleTwoDDistributionCorrelated_inbetween ) { double sampled_variable; // Set up the random number stream std::vector<double> fake_stream( 1 ); fake_stream[0] = 0.5; // sample between the middle and last distribution Utility::RandomNumberGenerator::setFakeStream( fake_stream ); sampled_variable = MonteCarlo::sampleTwoDDistributionCorrelated( 0.05, twod_distribution ); Utility::RandomNumberGenerator::unsetFakeStream(); TEST_FLOATING_EQUALITY( sampled_variable, 13.0/9.0, 1e-15 ); } //---------------------------------------------------------------------------// // Check that the distribution can be correlated sampled with a random number TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, sampleTwoDDistributionCorrelatedWithRandomNumber ) { double sampled_variable; double random_number = 3.0/18.0; // sample the first distribution sampled_variable = MonteCarlo::sampleTwoDDistributionCorrelatedWithRandomNumber( 0.0001, twod_distribution, random_number ); TEST_FLOATING_EQUALITY( sampled_variable, -1.5, 1e-15 ); random_number = 0.5; sampled_variable = MonteCarlo::sampleTwoDDistributionCorrelatedWithRandomNumber( 1.0, twod_distribution, random_number ); TEST_FLOATING_EQUALITY( sampled_variable, 2.0, 1e-15 ); sampled_variable = MonteCarlo::sampleTwoDDistributionCorrelatedWithRandomNumber( 0.05, twod_distribution, random_number ); TEST_FLOATING_EQUALITY( sampled_variable, 13.0/9.0, 1e-15 ); } //---------------------------------------------------------------------------// // Check that the distribution can be sampled with a random number using independent sampling TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, sampleTwoDDistributionIndependent ) { double sampled_variable; // Set up the random number stream std::vector<double> fake_stream( 2 ); fake_stream[0] = 0.5; // sample between the middle and last distribution fake_stream[1] = 0.5; // sample from middle distribution Utility::RandomNumberGenerator::setFakeStream( fake_stream ); sampled_variable = MonteCarlo::sampleTwoDDistributionIndependent( 0.05, twod_distribution ); Utility::RandomNumberGenerator::unsetFakeStream(); TEST_FLOATING_EQUALITY( sampled_variable, 1.0, 1e-15 ); } //---------------------------------------------------------------------------// // Check the correlated cdf value can be evaluated TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, evaluateTwoDDistributionCorrelatedCDF ) { double sampled_variable; sampled_variable = MonteCarlo::evaluateTwoDDistributionCorrelatedCDF( energy, independent_value, twod_distribution ); TEST_FLOATING_EQUALITY( sampled_variable, 0.4259259259259260, 1e-15 ); } //---------------------------------------------------------------------------// // Check the correlated pdf value can be evaluated TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, evaluateTwoDDistributionCorrelatedPDF ) { double sampled_variable; sampled_variable = MonteCarlo::evaluateTwoDDistributionCorrelatedPDF( energy, independent_value, twod_distribution ); TEST_FLOATING_EQUALITY( sampled_variable, 1.5/9.0, 1e-15 ); } //---------------------------------------------------------------------------// // Check the correlated value can be evaluated TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, evaluateTwoDDistributionCorrelated )<|fim▁hole|> double sampled_variable; sampled_variable = MonteCarlo::evaluateTwoDDistributionCorrelated( energy, independent_value, twod_distribution ); TEST_FLOATING_EQUALITY( sampled_variable, 1.0, 1e-15 ); } //---------------------------------------------------------------------------// // Check the correlation sample for two bins with random number TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, correlatedSampleWithRandomNumber ) { double sampled_variable; double random_number = 0.5; // sample between the middle and last distribution sampled_variable = MonteCarlo::correlatedSampleWithRandomNumber( twod_distribution[2].second, twod_distribution[1].second, interpolation_fraction, random_number ); TEST_FLOATING_EQUALITY( sampled_variable, 13.0/9.0, 1e-15 ); } //---------------------------------------------------------------------------// // Check the correlation sample for two bins TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, correlatedSample ) { double sampled_variable; // Set up the random number stream std::vector<double> fake_stream( 2 ); fake_stream[0] = 0.5; // sample between the middle and last distribution Utility::RandomNumberGenerator::setFakeStream( fake_stream ); sampled_variable = MonteCarlo::correlatedSample( twod_distribution[2].second, twod_distribution[1].second, interpolation_fraction ); Utility::RandomNumberGenerator::unsetFakeStream(); TEST_FLOATING_EQUALITY( sampled_variable, 13.0/9.0, 1e-15 ); } //---------------------------------------------------------------------------// // Check the correlation sample for two bins in a subrange TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, correlatedSampleInSubrange ) { double sampled_variable; // Set up the random number stream std::vector<double> fake_stream( 2 ); fake_stream[0] = 0.5; // sample between the middle and last distribution Utility::RandomNumberGenerator::setFakeStream( fake_stream ); sampled_variable = MonteCarlo::correlatedSampleInSubrange( twod_distribution[2].second, twod_distribution[1].second, interpolation_fraction, 3.0 ); Utility::RandomNumberGenerator::unsetFakeStream(); TEST_FLOATING_EQUALITY( sampled_variable, 1.0, 1e-15 ); } //---------------------------------------------------------------------------// // Check the correlated cdf value can be evaluated TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, evaluateCorrelatedCDF ) { double sampled_variable; sampled_variable = MonteCarlo::evaluateCorrelatedCDF( twod_distribution[2].second, twod_distribution[1].second, interpolation_fraction, independent_value ); TEST_FLOATING_EQUALITY( sampled_variable, 0.4259259259259260, 1e-15 ); } //---------------------------------------------------------------------------// // Check the correlated pdf value can be evaluated TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, evaluateCorrelatedPDF ) { double sampled_variable; sampled_variable = MonteCarlo::evaluateCorrelatedPDF( twod_distribution[2].second, twod_distribution[1].second, interpolation_fraction, independent_value ); TEST_FLOATING_EQUALITY( sampled_variable, 1.5/9.0, 1e-12 ); } //---------------------------------------------------------------------------// // Check the correlated value can be evaluated TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, evaluateCorrelated ) { double sampled_variable; sampled_variable = MonteCarlo::evaluateCorrelated( twod_distribution[2].second, twod_distribution[1].second, interpolation_fraction, independent_value ); TEST_FLOATING_EQUALITY( sampled_variable, 1.0, 1e-12 ); } //---------------------------------------------------------------------------// // Check that the distributions can be sampled independently TEUCHOS_UNIT_TEST( TwoDDistributionHelpers, independentSample ) { double sampled_variable; // Set up the random number stream std::vector<double> fake_stream( 2 ); fake_stream[0] = 0.5; // sample from the middle distribution fake_stream[1] = 0.5; // sample cdf = 0.5 Utility::RandomNumberGenerator::setFakeStream( fake_stream ); sampled_variable = MonteCarlo::independentSample( twod_distribution[2].second, twod_distribution[1].second, interpolation_fraction ); Utility::RandomNumberGenerator::unsetFakeStream(); TEST_FLOATING_EQUALITY( sampled_variable, 1.0, 1e-12 ); } //---------------------------------------------------------------------------// // Custom main function //---------------------------------------------------------------------------// int main( int argc, char** argv ) { Teuchos::CommandLineProcessor& clp = Teuchos::UnitTestRepository::getCLP(); const Teuchos::RCP<Teuchos::FancyOStream> out = Teuchos::VerboseObjectBase::getDefaultOStream(); Teuchos::CommandLineProcessor::EParseCommandLineReturn parse_return = clp.parse(argc,argv); if ( parse_return != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL ) { *out << "\nEnd Result: TEST FAILED" << std::endl; return parse_return; } // Create the two dimensional distribution twod_distribution.resize(3); twod_distribution[0].first = 0.001; // Create a first fake histogram distribution Teuchos::Array<double> dist_1_bin_boundaries( 4 ); dist_1_bin_boundaries[0] = -2.0; dist_1_bin_boundaries[1] = -1.0; dist_1_bin_boundaries[2] = 1.0; dist_1_bin_boundaries[3] = 2.0; Teuchos::Array<double> bin_values( 3 ); bin_values[0] = 2.0; bin_values[1] = 1.0; bin_values[2] = 2.0; twod_distribution[0].second.reset( new Utility::HistogramDistribution( dist_1_bin_boundaries, bin_values) ); twod_distribution[1].first = 0.01; // Create a second fake histogram distribution Teuchos::Array<double> dist_2_bin_boundaries( 4 ); dist_2_bin_boundaries[0] = -1.0; dist_2_bin_boundaries[1] = 0.0; dist_2_bin_boundaries[2] = 2.0; dist_2_bin_boundaries[3] = 3.0; twod_distribution[1].second.reset( new Utility::HistogramDistribution( dist_2_bin_boundaries, bin_values) ); twod_distribution[2].first = 0.1; // Create a third fake histogram distribution Teuchos::Array<double> dist_3_bin_boundaries( 4 ); dist_3_bin_boundaries[0] = 0.0; dist_3_bin_boundaries[1] = 1.0; dist_3_bin_boundaries[2] = 3.0; dist_3_bin_boundaries[3] = 4.0; twod_distribution[2].second.reset( new Utility::HistogramDistribution( dist_3_bin_boundaries, bin_values) ); independent_value = 1.0; energy = 0.05; interpolation_fraction = ( energy - twod_distribution[1].first )/ ( twod_distribution[2].first - twod_distribution[1].first ); // Initialize the random number generator Utility::RandomNumberGenerator::createStreams(); // Run the unit tests Teuchos::GlobalMPISession mpiSession( &argc, &argv ); const bool success = Teuchos::UnitTestRepository::runUnitTests( *out ); if (success) *out << "\nEnd Result: TEST PASSED" << std::endl; else *out << "\nEnd Result: TEST FAILED" << std::endl; clp.printFinalTimerSummary(out.ptr()); return (success ? 0 : 1); } //---------------------------------------------------------------------------// // end tstCoherentPhotonScatteringDistribution.cpp //---------------------------------------------------------------------------//<|fim▁end|>
{
<|file_name|>union_template.rs<|end_file_name|><|fim▁begin|>#![allow( dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals )] #[repr(C)] pub struct NastyStruct {<|fim▁hole|> pub mIsSome: bool, pub mStorage: NastyStruct__bindgen_ty_1, pub __bindgen_anon_1: NastyStruct__bindgen_ty_2, } #[repr(C)] pub union NastyStruct__bindgen_ty_1 { pub mFoo: *mut ::std::os::raw::c_void, pub mDummy: ::std::os::raw::c_ulong, } impl Default for NastyStruct__bindgen_ty_1 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::<Self>::uninit(); unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[repr(C)] pub union NastyStruct__bindgen_ty_2 { pub wat: ::std::os::raw::c_short, pub wut: *mut ::std::os::raw::c_int, } impl Default for NastyStruct__bindgen_ty_2 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::<Self>::uninit(); unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } impl Default for NastyStruct { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::<Self>::uninit(); unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[repr(C)] pub union Whatever { pub mTPtr: *mut ::std::os::raw::c_void, pub mInt: ::std::os::raw::c_int, } impl Default for Whatever { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::<Self>::uninit(); unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } }<|fim▁end|>
<|file_name|>networkConfig.py<|end_file_name|><|fim▁begin|># # Copyright (c) 2010 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # import gettext import logging import os import threading #from gi.repository import GObject import socket import rhsm.config import rhsm.connection as connection import rhsm.utils from rhsm.utils import remove_scheme from rhsm.utils import parse_url from subscription_manager.ga import GObject as ga_GObject from subscription_manager.gui.utils import show_error_window import subscription_manager.injection as inj from subscription_manager.gui import progress from subscription_manager.gui import widgets _ = gettext.gettext DIR = os.path.dirname(__file__) log = logging.getLogger('rhsm-app.' + __name__) class NetworkConfigDialog(widgets.SubmanBaseWidget): """This is the dialog that allows setting http proxy settings. It uses the instant apply paradigm or whatever you wanna call it that the gnome HIG recommends. Whenever a toggle button is flipped or a text entry changed, the new setting will be saved. """ widget_names = ["networkConfigDialog", "enableProxyButton", "enableProxyAuthButton", "proxyEntry", "proxyUserEntry", "proxyPasswordEntry", "cancelButton", "saveButton", "testConnectionButton", "connectionStatusLabel"] gui_file = "networkConfig" def __init__(self): # Get widgets we'll need to access super(NetworkConfigDialog, self).__init__() self.org_timeout = socket.getdefaulttimeout() self.progress_bar = None self.cfg = rhsm.config.initConfig() self.cp_provider = inj.require(inj.CP_PROVIDER) # Need to load values before connecting signals because when the dialog # starts up it seems to trigger the signals which overwrites the config # with the blank values. self.set_initial_values() self.enableProxyButton.connect("toggled", self.enable_action) self.enableProxyAuthButton.connect("toggled", self.enable_action) self.enableProxyButton.connect("toggled", self.clear_connection_label) self.enableProxyAuthButton.connect("toggled", self.clear_connection_label) self.enableProxyButton.connect("toggled", self.enable_test_button) self.proxyEntry.connect("changed", self.clear_connection_label) self.proxyUserEntry.connect("changed", self.clear_connection_label) self.proxyPasswordEntry.connect("changed", self.clear_connection_label) self.proxyEntry.connect("focus-out-event", self.clean_proxy_entry) self.cancelButton.connect("clicked", self.on_cancel_clicked) self.saveButton.connect("clicked", self.on_save_clicked) self.testConnectionButton.connect("clicked", self.on_test_connection_clicked) self.networkConfigDialog.connect("delete-event", self.deleted) def set_initial_values(self): proxy_url = self.cfg.get("server", "proxy_hostname") or "" # append port unless not specified, then append the default of 3128 if proxy_url: proxy_url = proxy_url + ':' + (self.cfg.get("server", "proxy_port") or rhsm.config.DEFAULT_PROXY_PORT) self.proxyEntry.set_text("%s" % proxy_url) # show proxy/proxy auth sections as being enabled if we have values set # rhn actualy has a seperate for config flag for enabling, which seems overkill if self.cfg.get("server", "proxy_hostname"): self.enableProxyButton.set_active(True) if self.cfg.get("server", "proxy_hostname") and self.cfg.get("server", "proxy_user"): self.enableProxyAuthButton.set_active(True) self.enable_action(self.enableProxyAuthButton) self.enable_action(self.enableProxyButton) # the extra or "" are to make sure we don't str None self.proxyUserEntry.set_text(str(self.cfg.get("server", "proxy_user") or "")) self.proxyPasswordEntry.set_text(str(self.cfg.get("server", "proxy_password") or "")) self.connectionStatusLabel.set_label("") # If there is no proxy information, disable the proxy test # button. if not self.enableProxyButton.get_active(): self.testConnectionButton.set_sensitive(False) self.enableProxyAuthButton.set_sensitive(False) def write_values(self, widget=None, dummy=None): proxy = self.proxyEntry.get_text() or "" # don't save these values if they are disabled in the gui if proxy and self.enableProxyButton.get_active(): # Remove any URI scheme provided proxy = remove_scheme(proxy) # Update the proxy entry field to show we removed any scheme self.proxyEntry.set_text(proxy) try: proxy_hostname, proxy_port = proxy.split(':') self.cfg.set("server", "proxy_hostname", proxy_hostname) self.cfg.set("server", "proxy_port", proxy_port) except ValueError: # no port? just write out the hostname and assume default self.cfg.set("server", "proxy_hostname", proxy) self.cfg.set("server", "proxy_port", rhsm.config.DEFAULT_PROXY_PORT) else: # delete config options if we disable it in the ui self.cfg.set("server", "proxy_hostname", "") self.cfg.set("server", "proxy_port", "") if self.enableProxyAuthButton.get_active(): if self.proxyUserEntry.get_text() is not None: self.cfg.set("server", "proxy_user", str(self.proxyUserEntry.get_text())) if self.proxyPasswordEntry.get_text() is not None: self.cfg.set("server", "proxy_password", str(self.proxyPasswordEntry.get_text())) else: self.cfg.set("server", "proxy_user", "") self.cfg.set("server", "proxy_password", "") try: self.cfg.save() self.cp_provider.set_connection_info() except Exception: show_error_window(_("There was an error saving your configuration.") + _("Make sure that you own %s.") % self.cfg.fileName, parent=self.networkConfigDialog) def show(self): self.set_initial_values() self.networkConfigDialog.present() def on_save_clicked(self, button): self.write_values() self.networkConfigDialog.hide() def on_cancel_clicked(self, button): self.networkConfigDialog.hide() def enable_test_button(self, button): self.testConnectionButton.set_sensitive(button.get_active()) def clear_connection_label(self, entry): self.connectionStatusLabel.set_label("") # only used as callback from test_connection thread def on_test_connection_finish(self, result): if result: self.connectionStatusLabel.set_label(_("Proxy connection succeeded")) else: self.connectionStatusLabel.set_label(_("Proxy connection failed")) self._clear_progress_bar() def _reset_socket_timeout(self): socket.setdefaulttimeout(self.org_timeout) def test_connection_wrapper(self, proxy_host, proxy_port, proxy_user, proxy_password):<|fim▁hole|> cp = connection.UEPConnection( proxy_hostname=proxy_host, proxy_port=proxy_port, proxy_user=proxy_user, proxy_password=proxy_password) try: socket.setdefaulttimeout(10) cp.getStatus() # Either connection.RemoteServerException or connection.RestLibExecption are considered # acceptable exceptions because they are only thrown as a response from the server. Meaning the # connection through the proxy was successful. except (connection.RemoteServerException, connection.RestlibException) as e: log.warn("Reporting proxy connection as good despite %s" % e) return True except connection.NetworkException, e: log.warn("%s when attempting to connect through %s:%s" % (e.code, proxy_host, proxy_port)) return False except Exception, e: log.exception("'%s' when attempting to connect through %s:%s" % (e, proxy_host, proxy_port)) return False else: return True finally: self._reset_socket_timeout() # Pass through of the return values of parse_proxy_entry # This was done to simplify on_test_connection_clicked def clean_proxy_entry(self, widget=None, dummy=None): proxy_url = self.proxyEntry.get_text() proxy_host, proxy_port = self.parse_proxy_entry(proxy_url) cleaned_proxy_url = "%s:%s" % (proxy_host, proxy_port) self.proxyEntry.set_text(cleaned_proxy_url) return (proxy_host, proxy_port) def parse_proxy_entry(self, proxy_url): proxy_url = remove_scheme(proxy_url) proxy_host = None proxy_port = None try: proxy_info = parse_url(proxy_url, default_port=rhsm.config.DEFAULT_PROXY_PORT) proxy_host = proxy_info[2] proxy_port = proxy_info[3] except rhsm.utils.ServerUrlParseErrorPort, e: proxy_host = proxy_url.split(':')[0] proxy_port = rhsm.config.DEFAULT_PROXY_PORT except rhsm.utils.ServerUrlParseError, e: log.error(e) return (proxy_host, proxy_port) def on_test_connection_clicked(self, button): proxy_host, proxy_port = self.clean_proxy_entry() # ensure that we only use those values for testing if required # this catches the case where there was previously a user and pass in the config # and the user unchecks the box, leaving behind the values for the time being. # Alternatively we could clear those boxes when the box is unchecked if self.enableProxyAuthButton.get_active(): proxy_user = self.proxyUserEntry.get_text() proxy_password = self.proxyPasswordEntry.get_text() else: proxy_user = None proxy_password = None self._display_progress_bar() threading.Thread(target=self.test_connection_wrapper, args=(proxy_host, proxy_port, proxy_user, proxy_password), name='TestNetworkConnectionThread').start() def deleted(self, event, data): self.write_values() self.networkConfigDialog.hide() self._clear_progress_bar() return True def _display_progress_bar(self): if self.progress_bar: self.progress_bar.set_title(_("Testing Connection")) self.progress_bar.set_label(_("Please wait")) else: self.progress_bar = progress.Progress(_("Testing Connection"), _("Please wait")) self.timer = ga_GObject.timeout_add(100, self.progress_bar.pulse) self.progress_bar.set_transient_for(self.networkConfigDialog) def _clear_progress_bar(self): if not self.progress_bar: # progress bar could be none iff self.test_connection is called directly return self.progress_bar.hide() ga_GObject.source_remove(self.timer) self.timer = 0 self.progress_bar = None def enable_action(self, button): if button.get_name() == "enableProxyButton": self.proxyEntry.set_sensitive(button.get_active()) self.proxyEntry.grab_focus() self.enableProxyAuthButton.set_sensitive(button.get_active()) # Proxy authentication should only be active if proxy is also enabled self.proxyUserEntry.set_sensitive(button.get_active() and self.enableProxyAuthButton.get_active()) self.proxyPasswordEntry.set_sensitive(button.get_active() and self.enableProxyAuthButton.get_active()) elif button.get_name() == "enableProxyAuthButton": self.proxyUserEntry.set_sensitive(button.get_active()) self.proxyPasswordEntry.set_sensitive(button.get_active()) self.get_object("usernameLabel").set_sensitive(button.get_active()) self.get_object("passwordLabel").set_sensitive(button.get_active()) def set_parent_window(self, window): self.networkConfigDialog.set_transient_for(window)<|fim▁end|>
connection_status = self.test_connection(proxy_host, proxy_port, proxy_user, proxy_password) ga_GObject.idle_add(self.on_test_connection_finish, connection_status) def test_connection(self, proxy_host, proxy_port, proxy_user, proxy_password):
<|file_name|>sample_test.py<|end_file_name|><|fim▁begin|># Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS,<|fim▁hole|> import unittest from perfkitbenchmarker import sample class SampleTestCase(unittest.TestCase): def testMetadataOptional(self): instance = sample.Sample(metric='Test', value=1.0, unit='Mbps') self.assertDictEqual({}, instance.metadata) def testProvidedMetadataSet(self): metadata = {'origin': 'unit test'} instance = sample.Sample(metric='Test', value=1.0, unit='Mbps', metadata=metadata.copy()) self.assertDictEqual(metadata, instance.metadata)<|fim▁end|>
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
<|file_name|>index.js<|end_file_name|><|fim▁begin|>/**<|fim▁hole|> * @copyright 2020 Photon Storm Ltd. * @license {@link https://opensource.org/licenses/MIT|MIT License} */ /** * @namespace Phaser.Structs */ module.exports = { List: require('./List'), Map: require('./Map'), ProcessQueue: require('./ProcessQueue'), RTree: require('./RTree'), Set: require('./Set'), Size: require('./Size') };<|fim▁end|>
* @author Richard Davey <[email protected]>
<|file_name|>include.js<|end_file_name|><|fim▁begin|>(function () { var oLoginViewModel, oSettingsViewModel ; AfterLogicApi.addPluginHook('view-model-defined', function (sViewModelName, oViewModel) { if (oViewModel && ('CLoginViewModel' === sViewModelName)) { oLoginViewModel = oViewModel; } if (oViewModel && ('CSettingsViewModel' === sViewModelName)) { oSettingsViewModel = oViewModel; if (!AfterLogicApi.getAppDataItem('User').CanLoginWithPassword) { oSettingsViewModel.getViewModel('two_factor_authentication').visible(false); } } }); AfterLogicApi.addPluginHook('api-mail-on-password-specified-success', function () { oSettingsViewModel.getViewModel('two_factor_authentication').visible(true); }); AfterLogicApi.addPluginHook('ajax-default-request', function (sAction, oParameters) { if (('SystemLogin' === sAction)) { this.oParams = oParameters; } }); AfterLogicApi.addPluginHook('ajax-default-response', function (sAction, oData) { if (('SystemLogin' === sAction && oData.Result != false && oData.ContinueAuth != true)) { oData['StopExecuteResponse'] = true; AfterLogicApi.showPopup(VerifyTokenPopup, [this.oParams.Email, oLoginViewModel]); } }); <|fim▁hole|><|fim▁end|>
}());
<|file_name|>backend.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf8 -*- # National Library of Norway, 2014-2015 # load the packages from pysqlite2 import dbapi2 as sqlite3 from collections import Counter from operator import itemgetter from itertools import chain from flask import Flask, Response, request, session, g, redirect, url_for, \ abort, render_template, flash, jsonify from contextlib import closing import re import json import sys import operator import itertools ## CONFIGURATION # path to databases try: path = str(sys.argv[1]) except: path = '' # specify port (default: 5000) try: port = int(sys.argv[2]) except: port = 5000 # specify host (default: 127.0.0.1) try: host = str(sys.argv[3]) except: host = '127.0.0.1' # paths for the databases UNIGRAM = path + 'unigram-one-row.db' BIGRAM = path + 'bigram-one-row.db' TRIGRAM = path + 'trigram-one-row.db' AVIS_UNIGRAM = path + 'avis-unigram-one-row.db' AVIS_BIGRAM = path + 'avis-bigram-one-row.db' AVIS_TRIGRAM = path + 'avis-trigram-one-row.db' # database structure db_names = {'bok': 'bok_', 'avis': 'avis_'} table_names = ['unigram', 'bigram', 'trigram'] index_names = {'bok': ['_lff_','_lfsf_','_lfstf_'], 'avis': ['_ff_','_fsf_','_fstf_']} field_names = ['first', 'second', 'third'] # Allowed paramaters languages = 'all|nob|nno' corpora = 'bok|avis' # Default paramaters default_params = {'terms': '', 'lang': 'all', 'case_sens': '0', 'freq': 'rel', 'corpus': 'bok'}; # Maximum values maxTerms = 10 maxNgram = 3 # currently, only unigram, bigram, trigram is supported maxChar = 200 # cut-off-point at 200 characters for query string maxWildcards = 5 maxAdd = 10 maxTrunct = 5 # loads a JSON object holding the max. frequencies per year for calculation of relative frequency in python (optional: you might want to store these in the database itself) with open ('totals.json', 'r') as f: freqs_per_year = json.load(f) # initiating Flask (with settings from environment variable - for use in development and production environments) app = Flask(__name__, static_url_path='/ngram/static') app.config.from_object(__name__) app.config.from_envvar('FLASK_NGRAM_SETTINGS') # connection to DB def connect_db(self): rv = sqlite3.connect(self) #rv.row_factory = sqlite3.Row return rv @app.before_request def before_request(): """ establish connection upon request """ g.db = connect_db(UNIGRAM) # Attach databases g.db.execute("ATTACH DATABASE '" + UNIGRAM + "' as bok_unigram;") g.db.execute("ATTACH DATABASE '" + BIGRAM + "' as bok_bigram;") g.db.execute("ATTACH DATABASE '" + TRIGRAM + "' as bok_trigram;") g.db.execute("ATTACH DATABASE '" + AVIS_UNIGRAM + "' as avis_unigram;") g.db.execute("ATTACH DATABASE '" + AVIS_BIGRAM + "' as avis_bigram") g.db.execute("ATTACH DATABASE '" + AVIS_TRIGRAM + "' as avis_trigram") @app.after_request def after_request(response): """ Close connection after request """ g.db.close() return response def query_db_dict(query, args=(), one=False): """ Return results as dictionary """ cur = g.db.execute(query, args) rv = [dict((cur.description[idx][0], value) for idx, value in enumerate(row)) for row in cur.fetchall()] return (rv[0] if rv else None) if one else rv def query_db_row(query, args=(), one=False): """ Return results as rows """ cur = g.db.execute(query, args) rv = [list((value) for idx, value in enumerate(row)) for row in cur.fetchall()] return (rv[0] if rv else None) if one else rv def return_terms(terms): """Gets a string of terms and returns them as a list, with some clean-up""" # index for wildcards (not allowed to exceed maxWildcards, these ones are powerful) wildcardIdx = 0 # we only allow a certain amount of characters in the terms string terms = terms[:maxChar] # removes unnecessary whitespace or empty query terms terms = re.sub(r',\s{0,},',',', terms) # splits on comma (with following whitespace): commas may be masked by quoatation marks terms = re.findall('[^\,\"]+|\"[^"]*\"', terms) # gets number of terms nTerms = len(terms) # checks if number exceeds maxTerms, remaining ones are removed if nTerms >= maxTerms: terms = terms[:maxTerms] nTerms = maxTerms # loops through each term for i in range(nTerms): # substitutes '*' with '%' for SQL queries, removes illegal wildcards (according to maxWildcards) if "*" in terms[i] and wildcardIdx < maxWildcards: wildcardIdx += 1 terms[i] = terms[i].replace("*", "%") else: terms[i] = terms[i].replace("*", "") # removes whitespace at the beginning or the end of the string terms[i] = re.sub(r'^\s+', '', terms[i]) terms[i] = re.sub(r'\s+$', '', terms[i]) # removes mask for comma if terms[i] == '","': terms[i] = re.sub(r'","',',', terms[i]) # removes whitespace between '+' and terms if "+" in terms[i]: terms[i] = re.sub(r'\s+\+', '+', terms[i]) terms[i] = re.sub(r'\+\s+', '+', terms[i]) return terms def query_factory(ngrams, lang, case_sens, corpus): """ Creates a sql query for each item in the object """ sql_array = [] args_array = [] label_array = [] lang_array = [] corpus_array = [] for ngram in ngrams: sql, args, query_lang, query_corpus = build_query_single(ngram, lang, case_sens, corpus) sql_array.append(sql) args_array.append(args) label_array.append(' '.join(ngram)) lang_array.append(query_lang) corpus_array.append(query_corpus) return sql_array, args_array, label_array, lang_array, corpus_array def extract_info(term): """ Extracts information after colon, returns only ngram and dictionary of arguments""" ngram = [] argumentDict = {} lastElement = len(term) - 1 splitted = term[lastElement].split(':') if len(splitted) >= 2: ngram.extend(term[:lastElement]) ngram.extend([splitted[0]]) extension = splitted[1:] for element in extension: if re.match(r'nob|nno|all', element): argumentDict['lang'] = element if re.match(r'bok|avis', element): argumentDict["db"] = element if re.match (r'avis', element): argumentDict["lang"] = 'all' if re.match (r'bok', element) and re.match(r'nob|nno|all', element) != -1: argumentDict["lang"] = 'all' if re.match(r'[0-9]{4}', element): argumentDict["anno"] = element return ngram, argumentDict def wildcard_search(ngrams, lang, case_sens, corpus): """ Returns the ten most common ngrams matching query """ whereClause = [] whereParams = [] args = [] ngramSize = len(ngrams) argumentDict = {"ngram": [], "lang": lang, "db": corpus} if extract_info(ngrams) != None: ngrams, argumentsExtracted = extract_info(ngrams) argumentDict = dict_merge(argumentDict, argumentsExtracted) # values based on input params = 'in (?,?)' if case_sens == '0' else 'in (?)' langClause = 'and lang = ?' if argumentDict["lang"] != "all" else '' getFieldNames = ', '.join(field_names[:ngramSize]) getTableNames = db_names[argumentDict["db"]] + table_names[ngramSize-1] + "." + table_names[ngramSize-1]<|fim▁hole|> for ngram in ngrams: if "%" in ngram: argumentDict["ngram"].append(ngram) whereParams.append("LIKE ?") else: whereParams.append(params) if case_sens == '0': argumentDict["ngram"].extend(swapcase([ngram])) else: argumentDict["ngram"].append(ngram) idxName = query_planner(whereParams,argumentDict["ngram"]) whereClause = " and ".join( list(('(%s %s)' % (field_names[idx],whereParams[idx])) for idx, val in enumerate(ngrams)) ) + (langClause if argumentDict["db"] == 'bok' else '') sql = "SELECT DISTINCT %s FROM (SELECT %s, freq FROM %s INDEXED BY %s WHERE %s ORDER BY freq DESC LIMIT 10) T;" % (getFieldNames, getFieldNames, getTableNames, idxName, whereClause) # builds argument array for SQL query args.extend(argumentDict["ngram"]) args.append(argumentDict["anno"]) if "anno" in argumentDict else None if argumentDict["lang"] != 'all' and argumentDict["db"] == 'bok': args.append(argumentDict["lang"]) cur = g.db.execute(sql, args) return ([list((value) for idx, value in enumerate(row)) for row in cur.fetchall()], argumentDict["lang"], argumentDict["db"]) def query_planner(where,args): """ NB N-gram query planner """ letters = ['f','s','t'] letterCombination = '' for idx,val in enumerate(where): if '=' in where[idx]: letterCombination += letters[idx] elif 'LIKE' in where[idx] and len(args[idx]) > 1: letterCombination = ''.join(letters[:len(where)]) return '_' + letterCombination + 'f_' return '_' + letterCombination + 'f_' def extract_values(dictionary): values = [] for key, value in sorted(dictionary.items()): values.extend(value) return values def combination_gen(ngrams): """ Returns combinations for truncated expressions """ args = [] if len(ngrams) > 1: for item1 in ngrams[0]: for item2 in ngrams[1]: if len(ngrams) == 2: args.append([item1, item2]) if len(ngrams) == 3: for item3 in ngrams[2]: args.append([item1, item2, item3]) else: for item in ngrams[0]: args.append([item]) return args def dict_merge(a, b): c = a.copy() c.update(b) return c def build_query_single(ngram, lang, case_sens, corpus): args = [] argumentDict = {"ngram": [], "lang": lang, "db": corpus} ngramSize = len(ngram) # get values after colon, parse them if extract_info(ngram) != None: ngram, argumentsExtracted = extract_info(ngram) argumentDict = dict_merge(argumentDict, argumentsExtracted) # values based on input params = 'in (?,?)' if case_sens == '0' else 'in (?)' langClause = ' and lang = ?' if argumentDict["lang"] != 'all' else " and lang in (?,?)" whereClause = " and ".join( list(('(%s %s)' % (field_names[idx], params)) for idx, val in enumerate(ngram)) ) + (langClause if argumentDict["db"] == 'bok' else '') getTableName = db_names[argumentDict["db"]] + table_names[ngramSize-1] + "." + table_names[ngramSize-1] # "Case-insensitive": because of limits of our current sqlite3 implementation, we only allow for a quasi case-insensitive search (only the first letter of a word is considered) if case_sens == '0': argumentDict["ngram"] = swapcase(ngram) else: argumentDict["ngram"] = ngram idxName = index_names[argumentDict["db"]][ngramSize-1] # Builds query string sql = "SELECT json FROM %s INDEXED BY %s WHERE %s" % (getTableName, idxName, whereClause) # Builds argument array args.extend(argumentDict["ngram"]) args.append(argumentDict["anno"]) if "anno" in argumentDict else None if argumentDict["lang"] != 'all' and argumentDict["db"] == 'bok': args.append(argumentDict["lang"]) elif argumentDict["lang"] == 'all' and argumentDict["db"] == 'bok': args.append('nob') args.append('nno') return (sql, args, argumentDict["lang"], argumentDict["db"]) def swapcase(args): """ Swaps the case of the first letter of the argument """ lowerUpperArgs = [] try: for arg in args: lowerUpperArgs += arg, arg[0].swapcase() + arg[1:] except: return None return lowerUpperArgs def tokenize(term): """ Very simple tokenizer: based on whitespace but not including paranthesis """ return re.findall('[^\s\(]+|\([^)]*\)', term) def termParser(i, lParams): ngrams = [] term = lParams['terms'][i] if "+" in term: qType = 'agg' # splits on +, up to value of maxAdd aggNgrams = re.split('\+', term, maxAdd)[:maxAdd] for item in aggNgrams: aggNgram = tokenize(item) if len(aggNgram) > maxNgram: ngrams += [aggNgram[:maxNgram]] else: ngrams += [aggNgram] else: # invokes the tokenizer ngrams = tokenize(term) # only unigram to trigram search is allowed for if len(ngrams) > maxNgram: ngrams = ngrams[:maxNgram] if any("%" in ngram for ngram in ngrams): qType = 'wildcard' # returns ngrams for wildcard ngrams, lParams['lang'], lParams['corpus'] = wildcard_search(ngrams, lParams['lang'], lParams['case_sens'], lParams['corpus']) # hack: as for now, case_sens must be 1 when doing wildcard_search lParams['case_sens'] = '1' else: # checks if the term contains brackets, if, then return the combinations # regular expression for finding brackets parentes = re.compile('\([^)]*\)') if any(parentes.match(ngram) for ngram in ngrams): qType = 'trunctated' for i in range(len(ngrams)): ngrams_or = ngrams[i].strip('()') ngrams[i] = re.split("\s", ngrams_or, maxTrunct)[:maxTrunct] ngrams = combination_gen(ngrams) else: qType = 'single' ngrams = [ngrams] return (ngrams, qType, lParams) def merge_result(self): """ Returns a merged object (similar to UNION SELECT) """ total = Counter() jsonObject = {} # loops through each result row for entry in self: jsonObject = json.loads(entry[0]) entryCounter = Counter(jsonObject) total += entryCounter return total def get_relfreq(total,total_freq): """Calculates the relative frequency for each item, returns complete dictionary """ relfreq_dict = [] for attribute, value in total.iteritems(): if int(attribute) >= 1810: rel_freq = float(value) / total_freq[attribute] * 100 relfreq_dict.append({"x": int(attribute), "y": rel_freq, "f": int(value)}) return relfreq_dict def return_agg_results(sql,args,lang,label,corpus): """ Returns results for multiple items to be summed """ entries = [] result = [] corplang_set = set() corpus_totalfreq = [] total_freq = Counter() # Gets the result for each sub-query for idx, val in enumerate(sql): result += query_db_row(sql[idx], args[idx]) # merges the result total = merge_result(result) ## finds out which corpora/languages were used in the query prior to calculating relative frequency corplang_pairs = [[a, b] for a, b in zip(corpus, lang)] corplang_set = set(map(tuple, corplang_pairs)) for item in corplang_set: corpus_totalfreq.append([freqs_per_year[item[0]][item[1]]]) ## calculates the grand total frequency for item in corpus_totalfreq: entry_counter = Counter(item[0]) total_freq += entry_counter ## returns a sorted dictionary with relative frequencies relfreq_dict = get_relfreq(total,total_freq) relfreq_dict = sorted(relfreq_dict, key=itemgetter('x')) if relfreq_dict != []: entries += [{"key": label, "values": relfreq_dict}] return entries def return_single_results(sql,args,lang,label,corpus): """ Returns the results for single items """ entries = [] total_freq = Counter() # Gets the result for each sub-query for idx, val in enumerate(sql): result = query_db_row(sql[idx], args[idx]) total = merge_result(result) total_freq = freqs_per_year[corpus[idx]][lang[idx]] ## returns a sorted dictionary with relative frequencies relfreq_dict = get_relfreq(total,total_freq) relfreq_dict = sorted(relfreq_dict, key=itemgetter('x')) if relfreq_dict != []: entries += [{"key": label[idx], "values": relfreq_dict}] return entries def get_query_params(request): """ Returns a dictionary of query parameters """ qParams = {} # gets the query parameters, does some basic validation and builds a dictionary of paramaters terms = request.args.get('terms') if terms: qParams['terms'] = terms lang = request.args.get('lang') if lang: if re.match(languages, lang): qParams['lang'] = lang case_sens = request.args.get('case_sens') if case_sens: if re.match('0|1',case_sens): qParams['case_sens'] = case_sens freq = request.args.get('freq') if freq: if re.match('rel|abs',freq): qParams['freq'] = freq corpus = request.args.get('corpus') if corpus: if re.match(corpora,corpus): qParams['corpus'] = corpus return qParams @app.route('/') def index(): return render_template('header-footer.html') @app.route('/ngram/query') def query(): entries = [] # get query paramaters qParams = get_query_params(request) # fills in default_parameters for those not set sParams = dict_merge(default_params, qParams) # does some clean-up and returns terms as list sParams['terms'] = return_terms(sParams['terms']) # gets total number of statements nTerms = len(sParams['terms']) # loops through each term, interpreting it and generating query for i in range(nTerms): # invokes term parser ngrams, qType, lParams = termParser(i, sParams) # starts the query factory for interprated term sql, args, label, lang, corpus = query_factory(ngrams, lParams['lang'], lParams['case_sens'], lParams['corpus']) # run query depending on amount of results from query_factory if len(sql) == 1: entries += return_single_results(sql,args,lang,label,corpus) elif len(sql) > 1: if qType == 'agg': entries += return_agg_results(sql, args, lang, label, corpus) elif qType == 'wildcard' or qType == 'trunctated': entries += return_single_results(sql,args,lang,label,corpus) else: pass else: pass jsonOutput = export_to_json(entries) return Response(jsonOutput, mimetype='application/json') def export_to_json(entries): """ Exports results as a JSON object """ return json.dumps(entries, indent=4, separators=(', ', ': ')) def export_to_json_file(entries): """ Exports result as JSON file """ with open('static/dump.json', 'wb') as outfile: json.dump(entries, outfile, indent=4, separators=(', ', ': ')) if __name__ == '__main__': app.run(port=port,host=host)<|fim▁end|>
<|file_name|>views.py<|end_file_name|><|fim▁begin|>import logging from flask import abort<|fim▁hole|>from flask import session from flask import url_for from testrail_reporting.auth.models import AuthUser from testrail_reporting.auth.oauth import get_google log = logging.getLogger(__name__) auth = Blueprint('auth', __name__) @auth.route('/login') def login(): callback = url_for('auth.authorized', _external=True) return get_google().authorize(callback=callback) @auth.route('/authorized') def authorized(): resp = get_google().authorized_response() if resp is None: abort(401) google_token = resp['access_token'] session['google_token'] = (google_token, '') user_info = get_google().get('userinfo').data domain = user_info.get('hd', None) if domain != current_app.config['GOOGLE_APP_DOMAIN']: flash('Domain is not allowed') return redirect(url_for('pages.index')) user_info.update({'google_token': google_token}) AuthUser.objects(email=user_info["email"]).update_one(upsert=True, **user_info) return redirect(url_for('pages.index')) @auth.route('/logout') def logout(): session.pop('google_token', None) return redirect(url_for('pages.login'))<|fim▁end|>
from flask import Blueprint from flask import current_app from flask import flash from flask import redirect
<|file_name|>location.rs<|end_file_name|><|fim▁begin|>pub struct GpsCoordinates { pub lat: f64, pub long: f64,<|fim▁hole|><|fim▁end|>
}
<|file_name|>admission_test.go<|end_file_name|><|fim▁begin|>/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package imagepolicy import ( "crypto/tls" "crypto/x509" "encoding/json" "math/rand" "net/http" "net/http/httptest" "reflect" "strconv" "testing" "time" "k8s.io/api/imagepolicy/v1alpha1" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/client-go/tools/clientcmd/api/v1" api "k8s.io/kubernetes/pkg/apis/core" "fmt" "io/ioutil" "os" "path/filepath" "text/template" _ "k8s.io/kubernetes/pkg/apis/imagepolicy/install" ) const defaultConfigTmplJSON = ` { "imagePolicy": { "kubeConfigFile": "{{ .KubeConfig }}", "allowTTL": {{ .AllowTTL }}, "denyTTL": {{ .DenyTTL }}, "retryBackoff": {{ .RetryBackoff }}, "defaultAllow": {{ .DefaultAllow }} } } ` const defaultConfigTmplYAML = ` imagePolicy: kubeConfigFile: "{{ .KubeConfig }}" allowTTL: {{ .AllowTTL }} denyTTL: {{ .DenyTTL }} retryBackoff: {{ .RetryBackoff }} defaultAllow: {{ .DefaultAllow }} ` func TestNewFromConfig(t *testing.T) { dir, err := ioutil.TempDir("", "") if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) data := struct { CA string Cert string Key string }{ CA: filepath.Join(dir, "ca.pem"), Cert: filepath.Join(dir, "clientcert.pem"), Key: filepath.Join(dir, "clientkey.pem"), } files := []struct { name string data []byte }{ {data.CA, caCert}, {data.Cert, clientCert}, {data.Key, clientKey}, } for _, file := range files { if err := ioutil.WriteFile(file.name, file.data, 0400); err != nil { t.Fatal(err) } } tests := []struct { msg string kubeConfigTmpl string wantErr bool }{ { msg: "a single cluster and single user", kubeConfigTmpl: ` clusters: - cluster: certificate-authority: {{ .CA }} server: https://admission.example.com name: foobar users: - name: a cluster user: client-certificate: {{ .Cert }} client-key: {{ .Key }} `, wantErr: true, }, { msg: "multiple clusters with no context", kubeConfigTmpl: ` clusters: - cluster: certificate-authority: {{ .CA }} server: https://admission.example.com name: foobar - cluster: certificate-authority: a bad certificate path server: https://admission.example.com name: barfoo users: - name: a name user: client-certificate: {{ .Cert }} client-key: {{ .Key }} `, wantErr: true, }, { msg: "multiple clusters with a context", kubeConfigTmpl: ` clusters: - cluster: certificate-authority: a bad certificate path server: https://admission.example.com name: foobar - cluster: certificate-authority: {{ .CA }} server: https://admission.example.com name: barfoo users: - name: a name user: client-certificate: {{ .Cert }} client-key: {{ .Key }} contexts: - name: default context: cluster: barfoo user: a name current-context: default `, wantErr: false, }, { msg: "cluster with bad certificate path specified", kubeConfigTmpl: ` clusters: - cluster: certificate-authority: a bad certificate path server: https://admission.example.com name: foobar - cluster: certificate-authority: {{ .CA }} server: https://admission.example.com name: barfoo users: - name: a name user: client-certificate: {{ .Cert }} client-key: {{ .Key }} contexts: - name: default context: cluster: foobar user: a name current-context: default `, wantErr: true, }, } for _, tt := range tests { // Use a closure so defer statements trigger between loop iterations. t.Run(tt.msg, func(t *testing.T) { err := func() error { tempfile, err := ioutil.TempFile("", "") if err != nil { return err } p := tempfile.Name() defer os.Remove(p) tmpl, err := template.New("test").Parse(tt.kubeConfigTmpl) if err != nil { return fmt.Errorf("failed to parse test template: %v", err) } if err := tmpl.Execute(tempfile, data); err != nil { return fmt.Errorf("failed to execute test template: %v", err) } tempconfigfile, err := ioutil.TempFile("", "") if err != nil { return err } pc := tempconfigfile.Name() defer os.Remove(pc) configTmpl, err := template.New("testconfig").Parse(defaultConfigTmplJSON) if err != nil { return fmt.Errorf("failed to parse test template: %v", err) } dataConfig := struct { KubeConfig string AllowTTL int DenyTTL int RetryBackoff int DefaultAllow bool }{ KubeConfig: p, AllowTTL: 500, DenyTTL: 500, RetryBackoff: 500, DefaultAllow: true, } if err := configTmpl.Execute(tempconfigfile, dataConfig); err != nil { return fmt.Errorf("failed to execute test template: %v", err) } // Create a new admission controller configFile, err := os.Open(pc) if err != nil { return fmt.Errorf("failed to read test config: %v", err) } defer configFile.Close() _, err = NewImagePolicyWebhook(configFile) return err }() if err != nil && !tt.wantErr { t.Errorf("failed to load plugin from config %q: %v", tt.msg, err) } if err == nil && tt.wantErr { t.Errorf("wanted an error when loading config, did not get one: %q", tt.msg) } }) } } // Service mocks a remote service. type Service interface { Review(*v1alpha1.ImageReview) HTTPStatusCode() int } // NewTestServer wraps a Service as an httptest.Server. func NewTestServer(s Service, cert, key, caCert []byte) (*httptest.Server, error) { var tlsConfig *tls.Config if cert != nil { cert, err := tls.X509KeyPair(cert, key) if err != nil { return nil, err } tlsConfig = &tls.Config{Certificates: []tls.Certificate{cert}} } if caCert != nil { rootCAs := x509.NewCertPool() rootCAs.AppendCertsFromPEM(caCert) if tlsConfig == nil { tlsConfig = &tls.Config{}<|fim▁hole|> } tlsConfig.ClientCAs = rootCAs tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert } serveHTTP := func(w http.ResponseWriter, r *http.Request) { var review v1alpha1.ImageReview if err := json.NewDecoder(r.Body).Decode(&review); err != nil { http.Error(w, fmt.Sprintf("failed to decode body: %v", err), http.StatusBadRequest) return } if s.HTTPStatusCode() < 200 || s.HTTPStatusCode() >= 300 { http.Error(w, "HTTP Error", s.HTTPStatusCode()) return } s.Review(&review) type status struct { Allowed bool `json:"allowed"` Reason string `json:"reason"` AuditAnnotations map[string]string `json:"auditAnnotations"` } resp := struct { APIVersion string `json:"apiVersion"` Kind string `json:"kind"` Status status `json:"status"` }{ APIVersion: v1alpha1.SchemeGroupVersion.String(), Kind: "ImageReview", Status: status{ review.Status.Allowed, review.Status.Reason, review.Status.AuditAnnotations, }, } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(resp) } server := httptest.NewUnstartedServer(http.HandlerFunc(serveHTTP)) server.TLS = tlsConfig server.StartTLS() return server, nil } // A service that can be set to allow all or deny all authorization requests. type mockService struct { allow bool statusCode int outAnnotations map[string]string } func (m *mockService) Review(r *v1alpha1.ImageReview) { r.Status.Allowed = m.allow // hardcoded overrides if r.Spec.Containers[0].Image == "good" { r.Status.Allowed = true } for _, c := range r.Spec.Containers { if c.Image == "bad" { r.Status.Allowed = false } } if !r.Status.Allowed { r.Status.Reason = "not allowed" } r.Status.AuditAnnotations = m.outAnnotations } func (m *mockService) Allow() { m.allow = true } func (m *mockService) Deny() { m.allow = false } func (m *mockService) HTTPStatusCode() int { return m.statusCode } // newImagePolicyWebhook creates a temporary kubeconfig file from the provided arguments and attempts to load // a new newImagePolicyWebhook from it. func newImagePolicyWebhook(callbackURL string, clientCert, clientKey, ca []byte, cacheTime time.Duration, defaultAllow bool) (*Plugin, error) { tempfile, err := ioutil.TempFile("", "") if err != nil { return nil, err } p := tempfile.Name() defer os.Remove(p) config := v1.Config{ Clusters: []v1.NamedCluster{ { Cluster: v1.Cluster{Server: callbackURL, CertificateAuthorityData: ca}, }, }, AuthInfos: []v1.NamedAuthInfo{ { AuthInfo: v1.AuthInfo{ClientCertificateData: clientCert, ClientKeyData: clientKey}, }, }, } if err := json.NewEncoder(tempfile).Encode(config); err != nil { return nil, err } tempconfigfile, err := ioutil.TempFile("", "") if err != nil { return nil, err } pc := tempconfigfile.Name() defer os.Remove(pc) configTmpl, err := template.New("testconfig").Parse(defaultConfigTmplYAML) if err != nil { return nil, fmt.Errorf("failed to parse test template: %v", err) } dataConfig := struct { KubeConfig string AllowTTL int64 DenyTTL int64 RetryBackoff int64 DefaultAllow bool }{ KubeConfig: p, AllowTTL: cacheTime.Nanoseconds(), DenyTTL: cacheTime.Nanoseconds(), RetryBackoff: 0, DefaultAllow: defaultAllow, } if err := configTmpl.Execute(tempconfigfile, dataConfig); err != nil { return nil, fmt.Errorf("failed to execute test template: %v", err) } // Create a new admission controller configFile, err := os.Open(pc) if err != nil { return nil, fmt.Errorf("failed to read test config: %v", err) } defer configFile.Close() wh, err := NewImagePolicyWebhook(configFile) if err != nil { return nil, err } return wh, err } func TestTLSConfig(t *testing.T) { tests := []struct { test string clientCert, clientKey, clientCA []byte serverCert, serverKey, serverCA []byte wantAllowed, wantErr bool }{ { test: "TLS setup between client and server", clientCert: clientCert, clientKey: clientKey, clientCA: caCert, serverCert: serverCert, serverKey: serverKey, serverCA: caCert, wantAllowed: true, }, { test: "Server does not require client auth", clientCA: caCert, serverCert: serverCert, serverKey: serverKey, wantAllowed: true, }, { test: "Server does not require client auth, client provides it", clientCert: clientCert, clientKey: clientKey, clientCA: caCert, serverCert: serverCert, serverKey: serverKey, wantAllowed: true, }, { test: "Client does not trust server", clientCert: clientCert, clientKey: clientKey, serverCert: serverCert, serverKey: serverKey, wantErr: true, }, { test: "Server does not trust client", clientCert: clientCert, clientKey: clientKey, clientCA: caCert, serverCert: serverCert, serverKey: serverKey, serverCA: badCACert, wantErr: true, }, { // Plugin does not support insecure configurations. test: "Server is using insecure connection", wantErr: true, }, } for _, tt := range tests { // Use a closure so defer statements trigger between loop iterations. t.Run(tt.test, func(t *testing.T) { service := new(mockService) service.statusCode = 200 server, err := NewTestServer(service, tt.serverCert, tt.serverKey, tt.serverCA) if err != nil { t.Errorf("%s: failed to create server: %v", tt.test, err) return } defer server.Close() wh, err := newImagePolicyWebhook(server.URL, tt.clientCert, tt.clientKey, tt.clientCA, -1, false) if err != nil { t.Errorf("%s: failed to create client: %v", tt.test, err) return } pod := goodPod(strconv.Itoa(rand.Intn(1000))) attr := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) // Allow all and see if we get an error. service.Allow() err = wh.Validate(attr, nil) if tt.wantAllowed { if err != nil { t.Errorf("expected successful admission") } } else { if err == nil { t.Errorf("expected failed admission") } } if tt.wantErr { if err == nil { t.Errorf("expected error making admission request: %v", err) } return } if err != nil { t.Errorf("%s: failed to admit with AllowAll policy: %v", tt.test, err) return } service.Deny() if err := wh.Validate(attr, nil); err == nil { t.Errorf("%s: incorrectly admitted with DenyAll policy", tt.test) } }) } } type webhookCacheTestCase struct { statusCode int expectedErr bool expectedAuthorized bool expectedCached bool } func testWebhookCacheCases(t *testing.T, serv *mockService, wh *Plugin, attr admission.Attributes, tests []webhookCacheTestCase) { for _, test := range tests { serv.statusCode = test.statusCode err := wh.Validate(attr, nil) authorized := err == nil if test.expectedErr && err == nil { t.Errorf("Expected error") } else if !test.expectedErr && err != nil { t.Fatal(err) } if test.expectedAuthorized && !authorized { if test.expectedCached { t.Errorf("Webhook should have successful response cached, but authorizer reported unauthorized.") } else { t.Errorf("Webhook returned HTTP %d, but authorizer reported unauthorized.", test.statusCode) } } else if !test.expectedAuthorized && authorized { t.Errorf("Webhook returned HTTP %d, but authorizer reported success.", test.statusCode) } } } // TestWebhookCache verifies that error responses from the server are not // cached, but successful responses are. func TestWebhookCache(t *testing.T) { serv := new(mockService) s, err := NewTestServer(serv, serverCert, serverKey, caCert) if err != nil { t.Fatal(err) } defer s.Close() // Create an admission controller that caches successful responses. wh, err := newImagePolicyWebhook(s.URL, clientCert, clientKey, caCert, 200, false) if err != nil { t.Fatal(err) } tests := []webhookCacheTestCase{ {statusCode: 500, expectedErr: true, expectedAuthorized: false, expectedCached: false}, {statusCode: 404, expectedErr: true, expectedAuthorized: false, expectedCached: false}, {statusCode: 403, expectedErr: true, expectedAuthorized: false, expectedCached: false}, {statusCode: 401, expectedErr: true, expectedAuthorized: false, expectedCached: false}, {statusCode: 200, expectedErr: false, expectedAuthorized: true, expectedCached: false}, {statusCode: 500, expectedErr: false, expectedAuthorized: true, expectedCached: true}, } attr := admission.NewAttributesRecord(goodPod("test"), nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) serv.allow = true testWebhookCacheCases(t, serv, wh, attr, tests) // For a different request, webhook should be called again. tests = []webhookCacheTestCase{ {statusCode: 500, expectedErr: true, expectedAuthorized: false, expectedCached: false}, {statusCode: 200, expectedErr: false, expectedAuthorized: true, expectedCached: false}, {statusCode: 500, expectedErr: false, expectedAuthorized: true, expectedCached: true}, } attr = admission.NewAttributesRecord(goodPod("test2"), nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) testWebhookCacheCases(t, serv, wh, attr, tests) } func TestContainerCombinations(t *testing.T) { tests := []struct { test string pod *api.Pod wantAllowed, wantErr bool }{ { test: "Single container allowed", pod: goodPod("good"), wantAllowed: true, }, { test: "Single container denied", pod: goodPod("bad"), wantAllowed: false, wantErr: true, }, { test: "One good container, one bad", pod: &api.Pod{ Spec: api.PodSpec{ ServiceAccountName: "default", SecurityContext: &api.PodSecurityContext{}, Containers: []api.Container{ { Image: "bad", SecurityContext: &api.SecurityContext{}, }, { Image: "good", SecurityContext: &api.SecurityContext{}, }, }, }, }, wantAllowed: false, wantErr: true, }, { test: "Multiple good containers", pod: &api.Pod{ Spec: api.PodSpec{ ServiceAccountName: "default", SecurityContext: &api.PodSecurityContext{}, Containers: []api.Container{ { Image: "good", SecurityContext: &api.SecurityContext{}, }, { Image: "good", SecurityContext: &api.SecurityContext{}, }, }, }, }, wantAllowed: true, wantErr: false, }, { test: "Multiple bad containers", pod: &api.Pod{ Spec: api.PodSpec{ ServiceAccountName: "default", SecurityContext: &api.PodSecurityContext{}, Containers: []api.Container{ { Image: "bad", SecurityContext: &api.SecurityContext{}, }, { Image: "bad", SecurityContext: &api.SecurityContext{}, }, }, }, }, wantAllowed: false, wantErr: true, }, { test: "Good container, bad init container", pod: &api.Pod{ Spec: api.PodSpec{ ServiceAccountName: "default", SecurityContext: &api.PodSecurityContext{}, Containers: []api.Container{ { Image: "good", SecurityContext: &api.SecurityContext{}, }, }, InitContainers: []api.Container{ { Image: "bad", SecurityContext: &api.SecurityContext{}, }, }, }, }, wantAllowed: false, wantErr: true, }, { test: "Bad container, good init container", pod: &api.Pod{ Spec: api.PodSpec{ ServiceAccountName: "default", SecurityContext: &api.PodSecurityContext{}, Containers: []api.Container{ { Image: "bad", SecurityContext: &api.SecurityContext{}, }, }, InitContainers: []api.Container{ { Image: "good", SecurityContext: &api.SecurityContext{}, }, }, }, }, wantAllowed: false, wantErr: true, }, { test: "Good container, good init container", pod: &api.Pod{ Spec: api.PodSpec{ ServiceAccountName: "default", SecurityContext: &api.PodSecurityContext{}, Containers: []api.Container{ { Image: "good", SecurityContext: &api.SecurityContext{}, }, }, InitContainers: []api.Container{ { Image: "good", SecurityContext: &api.SecurityContext{}, }, }, }, }, wantAllowed: true, wantErr: false, }, } for _, tt := range tests { // Use a closure so defer statements trigger between loop iterations. t.Run(tt.test, func(t *testing.T) { service := new(mockService) service.statusCode = 200 server, err := NewTestServer(service, serverCert, serverKey, caCert) if err != nil { t.Errorf("%s: failed to create server: %v", tt.test, err) return } defer server.Close() wh, err := newImagePolicyWebhook(server.URL, clientCert, clientKey, caCert, 0, false) if err != nil { t.Errorf("%s: failed to create client: %v", tt.test, err) return } attr := admission.NewAttributesRecord(tt.pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) err = wh.Validate(attr, nil) if tt.wantAllowed { if err != nil { t.Errorf("expected successful admission: %s", tt.test) } } else { if err == nil { t.Errorf("expected failed admission: %s", tt.test) } } if tt.wantErr { if err == nil { t.Errorf("expected error making admission request: %v", err) } return } if err != nil { t.Errorf("%s: failed to admit: %v", tt.test, err) return } }) } } // fakeAttributes decorate kadmission.Attributes. It's used to trace the added annotations. type fakeAttributes struct { admission.Attributes annotations map[string]string } func (f fakeAttributes) AddAnnotation(k, v string) error { f.annotations[k] = v return f.Attributes.AddAnnotation(k, v) } func TestDefaultAllow(t *testing.T) { tests := []struct { test string pod *api.Pod defaultAllow bool wantAllowed, wantErr, wantFailOpen bool }{ { test: "DefaultAllow = true, backend unreachable, bad image", pod: goodPod("bad"), defaultAllow: true, wantAllowed: true, wantFailOpen: true, }, { test: "DefaultAllow = true, backend unreachable, good image", pod: goodPod("good"), defaultAllow: true, wantAllowed: true, wantFailOpen: true, }, { test: "DefaultAllow = false, backend unreachable, good image", pod: goodPod("good"), defaultAllow: false, wantAllowed: false, wantErr: true, wantFailOpen: false, }, { test: "DefaultAllow = false, backend unreachable, bad image", pod: goodPod("bad"), defaultAllow: false, wantAllowed: false, wantErr: true, wantFailOpen: false, }, } for _, tt := range tests { // Use a closure so defer statements trigger between loop iterations. t.Run(tt.test, func(t *testing.T) { service := new(mockService) service.statusCode = 500 server, err := NewTestServer(service, serverCert, serverKey, caCert) if err != nil { t.Errorf("%s: failed to create server: %v", tt.test, err) return } defer server.Close() wh, err := newImagePolicyWebhook(server.URL, clientCert, clientKey, caCert, 0, tt.defaultAllow) if err != nil { t.Errorf("%s: failed to create client: %v", tt.test, err) return } attr := admission.NewAttributesRecord(tt.pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) annotations := make(map[string]string) attr = &fakeAttributes{attr, annotations} err = wh.Validate(attr, nil) if tt.wantAllowed { if err != nil { t.Errorf("expected successful admission") } } else { if err == nil { t.Errorf("expected failed admission") } } if tt.wantErr { if err == nil { t.Errorf("expected error making admission request: %v", err) } return } if err != nil { t.Errorf("%s: failed to admit: %v", tt.test, err) return } podAnnotations := tt.pod.GetAnnotations() if tt.wantFailOpen { if podAnnotations == nil || podAnnotations[api.ImagePolicyFailedOpenKey] != "true" { t.Errorf("missing expected fail open pod annotation") } if annotations[AuditKeyPrefix+ImagePolicyFailedOpenKeySuffix] != "true" { t.Errorf("missing expected fail open attributes annotation") } } else { if podAnnotations != nil && podAnnotations[api.ImagePolicyFailedOpenKey] == "true" { t.Errorf("found unexpected fail open pod annotation") } if annotations[AuditKeyPrefix+ImagePolicyFailedOpenKeySuffix] == "true" { t.Errorf("found unexpected fail open attributes annotation") } } }) } } // A service that can record annotations sent to it type annotationService struct { annotations map[string]string } func (a *annotationService) Review(r *v1alpha1.ImageReview) { a.annotations = make(map[string]string) for k, v := range r.Spec.Annotations { a.annotations[k] = v } r.Status.Allowed = true } func (a *annotationService) HTTPStatusCode() int { return 200 } func (a *annotationService) Annotations() map[string]string { return a.annotations } func TestAnnotationFiltering(t *testing.T) { tests := []struct { test string annotations map[string]string outAnnotations map[string]string }{ { test: "all annotations filtered out", annotations: map[string]string{ "test": "test", "another": "annotation", "": "", }, outAnnotations: map[string]string{}, }, { test: "image-policy annotations allowed", annotations: map[string]string{ "my.image-policy.k8s.io/test": "test", "other.image-policy.k8s.io/test2": "annotation", "test": "test", "another": "another", "": "", }, outAnnotations: map[string]string{ "my.image-policy.k8s.io/test": "test", "other.image-policy.k8s.io/test2": "annotation", }, }, } for _, tt := range tests { // Use a closure so defer statements trigger between loop iterations. t.Run(tt.test, func(t *testing.T) { service := new(annotationService) server, err := NewTestServer(service, serverCert, serverKey, caCert) if err != nil { t.Errorf("%s: failed to create server: %v", tt.test, err) return } defer server.Close() wh, err := newImagePolicyWebhook(server.URL, clientCert, clientKey, caCert, 0, true) if err != nil { t.Errorf("%s: failed to create client: %v", tt.test, err) return } pod := goodPod("test") pod.Annotations = tt.annotations attr := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) err = wh.Validate(attr, nil) if err != nil { t.Errorf("expected successful admission") } if !reflect.DeepEqual(tt.outAnnotations, service.Annotations()) { t.Errorf("expected annotations sent to webhook: %v to match expected: %v", service.Annotations(), tt.outAnnotations) } }) } } func TestReturnedAnnotationAdd(t *testing.T) { tests := []struct { test string pod *api.Pod verifierAnnotations map[string]string expectedAnnotations map[string]string }{ { test: "Add valid response annotations", pod: goodPod("good"), verifierAnnotations: map[string]string{ "foo-test": "true", "bar-test": "false", }, expectedAnnotations: map[string]string{ "imagepolicywebhook.image-policy.k8s.io/foo-test": "true", "imagepolicywebhook.image-policy.k8s.io/bar-test": "false", }, }, { test: "No returned annotations are ignored", pod: goodPod("good"), verifierAnnotations: map[string]string{}, expectedAnnotations: map[string]string{}, }, { test: "Handles nil annotations", pod: goodPod("good"), verifierAnnotations: nil, expectedAnnotations: map[string]string{}, }, { test: "Adds annotations for bad request", pod: &api.Pod{ Spec: api.PodSpec{ ServiceAccountName: "default", SecurityContext: &api.PodSecurityContext{}, Containers: []api.Container{ { Image: "bad", SecurityContext: &api.SecurityContext{}, }, }, }, }, verifierAnnotations: map[string]string{ "foo-test": "false", }, expectedAnnotations: map[string]string{ "imagepolicywebhook.image-policy.k8s.io/foo-test": "false", }, }, } for _, tt := range tests { // Use a closure so defer statements trigger between loop iterations. t.Run(tt.test, func(t *testing.T) { service := new(mockService) service.statusCode = 200 service.outAnnotations = tt.verifierAnnotations server, err := NewTestServer(service, serverCert, serverKey, caCert) if err != nil { t.Errorf("%s: failed to create server: %v", tt.test, err) return } defer server.Close() wh, err := newImagePolicyWebhook(server.URL, clientCert, clientKey, caCert, 0, true) if err != nil { t.Errorf("%s: failed to create client: %v", tt.test, err) return } pod := tt.pod attr := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, false, &user.DefaultInfo{}) annotations := make(map[string]string) attr = &fakeAttributes{attr, annotations} err = wh.Validate(attr, nil) if !reflect.DeepEqual(annotations, tt.expectedAnnotations) { t.Errorf("got audit annotations: %v; want: %v", annotations, tt.expectedAnnotations) } }) } } func goodPod(containerID string) *api.Pod { return &api.Pod{ Spec: api.PodSpec{ ServiceAccountName: "default", SecurityContext: &api.PodSecurityContext{}, Containers: []api.Container{ { Image: containerID, SecurityContext: &api.SecurityContext{}, }, }, }, } }<|fim▁end|>
<|file_name|>tipoDocumentoFormulario.js<|end_file_name|><|fim▁begin|>var baseURL; $.validator.addMethod("alfanumerico", function(value, element) {<|fim▁hole|>}, "Este campo es alfanumerico."); $("#frmGuardaTipoDocumento").validate({ rules : { descripcion : "required", codigo : {required:true,alfanumerico:true}, }, messages : { descripcion : "Ingrese este campo.", codigo : {required:"Ingrese este campo.",alfanumerico:"Este campo es alfanumerico."}, }, submitHandler : function(form) { $.ajax(form.action, { async : false, type : "POST", data : $(form).serialize(), success : function(contenido) { //alert("contenido :"+ contenido); if(contenido=="error"){ var mensaje="Este tipo de documento ya ha sido registrado"; alert(mensaje); } else{ baseURL = $("#baseURL").val(); $.get(baseURL + "mantenimientoInterno/listarTiposDocumentos?info="+contenido, function(respuesta) { $("#contenidoPrincipal").html(respuesta); $("#title-page").html("Mantenimiento Tipo Entidad Documento - Listado"); }); } } }); } }); function cancelarTipoDocumento(){ var baseURL; baseURL = $("#baseURL").val(); $("#contenidoPrincipal").html("Cargando . . ."); $.get(baseURL + "mantenimientoInterno/listarTiposDocumentos", function(respuesta) { $("#contenidoPrincipal").html(respuesta); $("#title-page").html("Mantenimiento Tipo Entidad Documento - Listado"); }); }<|fim▁end|>
return this.optional(element) || /^[-._a-z0-9\- ]+$/i.test(value);
<|file_name|>test_randbytes.py<|end_file_name|><|fim▁begin|># Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Test cases for L{twisted.python.randbytes}. """ import os from twisted.trial import unittest from twisted.python import randbytes class SecureRandomTestCaseBase(object): """ Base class for secureRandom test cases. """ def _check(self, source): """ The given random bytes source should return the number of bytes requested each time it is called and should probably not return the same bytes on two consecutive calls (although this is a perfectly legitimate occurrence and rejecting it may generate a spurious failure -- maybe we'll get lucky and the heat death with come first). """ for nbytes in range(17, 25): s = source(nbytes) self.assertEqual(len(s), nbytes) s2 = source(nbytes) self.assertEqual(len(s2), nbytes) # This is crude but hey self.assertNotEquals(s2, s) class SecureRandomTestCase(SecureRandomTestCaseBase, unittest.TestCase): """ Test secureRandom under normal conditions. """ def test_normal(self): """ L{randbytes.secureRandom} should return a string of the requested length and make some effort to make its result otherwise unpredictable. """ self._check(randbytes.secureRandom) class ConditionalSecureRandomTestCase(SecureRandomTestCaseBase, unittest.TestCase): """ Test random sources one by one, then remove it to. """ def setUp(self): """ Create a L{randbytes.RandomFactory} to use in the tests. """ self.factory = randbytes.RandomFactory() def errorFactory(self, nbytes): """ A factory raising an error when a source is not available. """ raise randbytes.SourceNotAvailable() def test_osUrandom(self): """ L{RandomFactory._osUrandom} should work as a random source whenever L{os.urandom} is available. """ self._check(self.factory._osUrandom) def test_withoutAnything(self): """ Remove all secure sources and assert it raises a failure. Then try the fallback parameter. """ self.factory._osUrandom = self.errorFactory self.assertRaises(randbytes.SecureRandomNotAvailable, self.factory.secureRandom, 18) def wrapper(): return self.factory.secureRandom(18, fallback=True) s = self.assertWarns( RuntimeWarning, "urandom unavailable - " "proceeding with non-cryptographically secure random source", __file__, wrapper) self.assertEqual(len(s), 18) class RandomTestCaseBase(SecureRandomTestCaseBase, unittest.TestCase): """ 'Normal' random test cases. """ def test_normal(self): """ Test basic case.<|fim▁hole|> def test_withoutGetrandbits(self): """ Test C{insecureRandom} without C{random.getrandbits}. """ factory = randbytes.RandomFactory() factory.getrandbits = None self._check(factory.insecureRandom)<|fim▁end|>
""" self._check(randbytes.insecureRandom)
<|file_name|>test_iamgen.py<|end_file_name|><|fim▁begin|># Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .common import BaseTest, load_data from c7n.config import Config, Bag from c7n import manager import fnmatch class TestIamGen(BaseTest): <|fim▁hole|> def check_permissions(self, perm_db, perm_set, path): invalid = [] for p in perm_set: if ':' not in p: invalid.append(p) continue s, a = p.split(':', 1) if s not in perm_db: invalid.append(p) continue if '*' in a: if not fnmatch.filter(perm_db[s], a): invalid.append(p) continue elif a not in perm_db[s]: invalid.append(p) if not invalid: return [] return [(path, invalid)] def test_iam_permissions_validity(self): cfg = Config.empty() missing = set() all_invalid = [] perms = load_data('iam-actions.json') for k, v in manager.resources.items(): p = Bag({'name': 'permcheck', 'resource': k, 'provider_name': 'aws'}) ctx = self.get_context(config=cfg, policy=p) mgr = v(ctx, p) invalid = [] # if getattr(mgr, 'permissions', None): # print(mgr) found = False for s in (mgr.resource_type.service, getattr(mgr.resource_type, 'permission_prefix', None)): if s in perms: found = True if not found: missing.add("%s->%s" % (k, mgr.resource_type.service)) continue invalid.extend(self.check_permissions(perms, mgr.get_permissions(), k)) for n, a in v.action_registry.items(): p['actions'] = [n] invalid.extend( self.check_permissions( perms, a({}, mgr).get_permissions(), "{k}.actions.{n}".format(k=k, n=n))) for n, f in v.filter_registry.items(): if n in ('or', 'and', 'not', 'missing'): continue p['filters'] = [n] invalid.extend( self.check_permissions( perms, f({}, mgr).get_permissions(), "{k}.filters.{n}".format(k=k, n=n))) if invalid: for k, perm_set in invalid: perm_set = [i for i in perm_set if not i.startswith('elasticloadbalancing')] if perm_set: all_invalid.append((k, perm_set)) if missing: raise ValueError( "resources missing service %s" % ('\n'.join(sorted(missing)))) if all_invalid: raise ValueError( "invalid permissions \n %s" % ('\n'.join(sorted(map(str, all_invalid)))))<|fim▁end|>
<|file_name|>app-ctrl.ts<|end_file_name|><|fim▁begin|>/// <reference path="../../../typings/angularjs/angular.d.ts" /><|fim▁hole|> export class AppCtrl { static $inject = [ '$http' ]; loaded: boolean; scripts: Array<{}>; constructor($http: ng.IHttpService) { this.loaded = false; this.scripts = []; var url = 'https://hspd-api.herokuapp.com/hubot_scripts'; $http.get<Array<{}>>(url).then((res) => { this.loaded = true; this.scripts = res.data; }); } }<|fim▁end|>
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2015, 2016 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. #![warn(missing_docs)] #![cfg_attr(all(nightly, feature="dev"), feature(plugin))] #![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] //! Signer module //! //! This module manages your private keys and accounts/identities //! that can be used within Dapps. //! //! It exposes API (over `WebSockets`) accessed by Signer UIs. //! Each transaction sent by Dapp is broadcasted to Signer UIs //! and their responsibility is to confirm (or confirm and sign) //! the transaction for you. //! //! ``` //! extern crate jsonrpc_core; //! extern crate ethcore_signer; //! extern crate ethcore_rpc; //! //! use std::sync::Arc; //! use jsonrpc_core::IoHandler; //! use jsonrpc_core::reactor::RpcEventLoop; //! use ethcore_signer::ServerBuilder; //! use ethcore_rpc::ConfirmationsQueue; //! //! fn main() { //! let queue = Arc::new(ConfirmationsQueue::default()); //! let io = Arc::new(IoHandler::new().into()); //! let event_loop = RpcEventLoop::spawn(); //! let _server = ServerBuilder::new(queue, "/tmp/authcodes".into()) //! .start("127.0.0.1:8084".parse().unwrap(), event_loop.handler(io)); //! } //! ``` #[macro_use] extern crate log; extern crate env_logger; extern crate rand; extern crate ethcore_util as util; extern crate ethcore_rpc as rpc; extern crate ethcore_io as io; extern crate jsonrpc_core; extern crate ws; extern crate ethcore_devtools as devtools; mod authcode_store;<|fim▁hole|>pub use authcode_store::*; pub use ws_server::*;<|fim▁end|>
mod ws_server; /// Exported tests for use in signer RPC client testing pub mod tests;
<|file_name|>_virtual_machine_images_operations.py<|end_file_name|><|fim▁begin|># coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.mgmt.core.exceptions import ARMErrorFormat from msrest import Serializer from .. import models as _models from .._vendor import _convert_request, _format_url_section T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False def build_get_request( location: str, publisher_name: str, offer: str, skus: str, version: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2020-12-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}') path_format_arguments = { "location": _SERIALIZER.url("location", location, 'str'), "publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'), "offer": _SERIALIZER.url("offer", offer, 'str'), "skus": _SERIALIZER.url("skus", skus, 'str'), "version": _SERIALIZER.url("version", version, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')<|fim▁hole|> # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_request( location: str, publisher_name: str, offer: str, skus: str, subscription_id: str, *, expand: Optional[str] = None, top: Optional[int] = None, orderby: Optional[str] = None, **kwargs: Any ) -> HttpRequest: api_version = "2020-12-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions') path_format_arguments = { "location": _SERIALIZER.url("location", location, 'str'), "publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'), "offer": _SERIALIZER.url("offer", offer, 'str'), "skus": _SERIALIZER.url("skus", skus, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] if expand is not None: query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str') if top is not None: query_parameters['$top'] = _SERIALIZER.query("top", top, 'int') if orderby is not None: query_parameters['$orderby'] = _SERIALIZER.query("orderby", orderby, 'str') query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_offers_request( location: str, publisher_name: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2020-12-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers') path_format_arguments = { "location": _SERIALIZER.url("location", location, 'str'), "publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_publishers_request( location: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2020-12-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers') path_format_arguments = { "location": _SERIALIZER.url("location", location, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_skus_request( location: str, publisher_name: str, offer: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2020-12-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus') path_format_arguments = { "location": _SERIALIZER.url("location", location, 'str'), "publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'), "offer": _SERIALIZER.url("offer", offer, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) class VirtualMachineImagesOperations(object): """VirtualMachineImagesOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.compute.v2020_12_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def get( self, location: str, publisher_name: str, offer: str, skus: str, version: str, **kwargs: Any ) -> "_models.VirtualMachineImage": """Gets a virtual machine image. :param location: The name of a supported Azure region. :type location: str :param publisher_name: A valid image publisher. :type publisher_name: str :param offer: A valid image publisher offer. :type offer: str :param skus: A valid image SKU. :type skus: str :param version: A valid image SKU version. :type version: str :keyword callable cls: A custom type or function that will be passed the direct response :return: VirtualMachineImage, or the result of cls(response) :rtype: ~azure.mgmt.compute.v2020_12_01.models.VirtualMachineImage :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineImage"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_request( location=location, publisher_name=publisher_name, offer=offer, skus=skus, version=version, subscription_id=self._config.subscription_id, template_url=self.get.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualMachineImage', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}'} # type: ignore @distributed_trace def list( self, location: str, publisher_name: str, offer: str, skus: str, expand: Optional[str] = None, top: Optional[int] = None, orderby: Optional[str] = None, **kwargs: Any ) -> List["_models.VirtualMachineImageResource"]: """Gets a list of all virtual machine image versions for the specified location, publisher, offer, and SKU. :param location: The name of a supported Azure region. :type location: str :param publisher_name: A valid image publisher. :type publisher_name: str :param offer: A valid image publisher offer. :type offer: str :param skus: A valid image SKU. :type skus: str :param expand: The expand expression to apply on the operation. :type expand: str :param top: :type top: int :param orderby: :type orderby: str :keyword callable cls: A custom type or function that will be passed the direct response :return: list of VirtualMachineImageResource, or the result of cls(response) :rtype: list[~azure.mgmt.compute.v2020_12_01.models.VirtualMachineImageResource] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_list_request( location=location, publisher_name=publisher_name, offer=offer, skus=skus, subscription_id=self._config.subscription_id, expand=expand, top=top, orderby=orderby, template_url=self.list.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions'} # type: ignore @distributed_trace def list_offers( self, location: str, publisher_name: str, **kwargs: Any ) -> List["_models.VirtualMachineImageResource"]: """Gets a list of virtual machine image offers for the specified location and publisher. :param location: The name of a supported Azure region. :type location: str :param publisher_name: A valid image publisher. :type publisher_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: list of VirtualMachineImageResource, or the result of cls(response) :rtype: list[~azure.mgmt.compute.v2020_12_01.models.VirtualMachineImageResource] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_list_offers_request( location=location, publisher_name=publisher_name, subscription_id=self._config.subscription_id, template_url=self.list_offers.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized list_offers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers'} # type: ignore @distributed_trace def list_publishers( self, location: str, **kwargs: Any ) -> List["_models.VirtualMachineImageResource"]: """Gets a list of virtual machine image publishers for the specified Azure location. :param location: The name of a supported Azure region. :type location: str :keyword callable cls: A custom type or function that will be passed the direct response :return: list of VirtualMachineImageResource, or the result of cls(response) :rtype: list[~azure.mgmt.compute.v2020_12_01.models.VirtualMachineImageResource] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_list_publishers_request( location=location, subscription_id=self._config.subscription_id, template_url=self.list_publishers.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized list_publishers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers'} # type: ignore @distributed_trace def list_skus( self, location: str, publisher_name: str, offer: str, **kwargs: Any ) -> List["_models.VirtualMachineImageResource"]: """Gets a list of virtual machine image SKUs for the specified location, publisher, and offer. :param location: The name of a supported Azure region. :type location: str :param publisher_name: A valid image publisher. :type publisher_name: str :param offer: A valid image publisher offer. :type offer: str :keyword callable cls: A custom type or function that will be passed the direct response :return: list of VirtualMachineImageResource, or the result of cls(response) :rtype: list[~azure.mgmt.compute.v2020_12_01.models.VirtualMachineImageResource] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_list_skus_request( location=location, publisher_name=publisher_name, offer=offer, subscription_id=self._config.subscription_id, template_url=self.list_skus.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized list_skus.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus'} # type: ignore<|fim▁end|>
<|file_name|>RegCache.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2012- PPSSPP Project. // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, version 2.0 or later versions. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License 2.0 for more details. // A copy of the GPL 2.0 should have been included with the program. // If not, see http://www.gnu.org/licenses/ // Official git repository and contact information can be found at // https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/. #include "Core/MIPS/MIPS.h" #include "Core/MIPS/MIPSTables.h" #include "Core/MIPS/MIPSAnalyst.h" #include "Core/MIPS/x86/Jit.h" #include "Core/MIPS/x86/Asm.h" #include "Core/MIPS/x86/RegCache.h" using namespace Gen; static const int allocationOrder[] = { // R12, when used as base register, for example in a LEA, can generate bad code! Need to look into this. #ifdef _M_X64 #ifdef _WIN32 RSI, RDI, R13, R14, R8, R9, R10, R11, R12, //, RCX #else RBP, R13, R14, R8, R9, R10, R11, R12, //, RCX #endif #elif _M_IX86 ESI, EDI, EBP, EDX, ECX, // Let's try to free up EBX as well. #endif }; GPRRegCache::GPRRegCache() : emit(0), mips(0) { memset(regs, 0, sizeof(regs)); memset(xregs, 0, sizeof(xregs)); } void GPRRegCache::Start(MIPSState *mips, MIPSAnalyst::AnalysisResults &stats) { this->mips = mips; for (int i = 0; i < NUM_X_REGS; i++) { xregs[i].free = true; xregs[i].dirty = false; xregs[i].allocLocked = false; } for (int i = 0; i < NUM_MIPS_GPRS; i++) { regs[i].location = GetDefaultLocation(i); regs[i].away = false; regs[i].locked = false; } // todo: sort to find the most popular regs /* int maxPreload = 2; for (int i = 0; i < NUM_MIPS_GPRS; i++) { if (stats.numReads[i] > 2 || stats.numWrites[i] >= 2) { LoadToX64(i, true, false); //stats.firstRead[i] <= stats.firstWrite[i], false); maxPreload--; if (!maxPreload) break; } }*/ //Find top regs - preload them (load bursts ain't bad) //But only preload IF written OR reads >= 3 } // these are MIPS reg indices void GPRRegCache::Lock(int p1, int p2, int p3, int p4) { regs[p1].locked = true; if (p2 != 0xFF) regs[p2].locked = true; if (p3 != 0xFF) regs[p3].locked = true; if (p4 != 0xFF) regs[p4].locked = true; } // these are x64 reg indices void GPRRegCache::LockX(int x1, int x2, int x3, int x4) { if (xregs[x1].allocLocked) { PanicAlert("RegCache: x %i already locked!", x1); } xregs[x1].allocLocked = true; if (x2 != 0xFF) xregs[x2].allocLocked = true; if (x3 != 0xFF) xregs[x3].allocLocked = true; if (x4 != 0xFF) xregs[x4].allocLocked = true; } void GPRRegCache::UnlockAll() { for (int i = 0; i < NUM_MIPS_GPRS; i++) regs[i].locked = false; } void GPRRegCache::UnlockAllX() { for (int i = 0; i < NUM_X_REGS; i++) xregs[i].allocLocked = false; } X64Reg GPRRegCache::GetFreeXReg() { int aCount; const int *aOrder = GetAllocationOrder(aCount); for (int i = 0; i < aCount; i++) { X64Reg xr = (X64Reg)aOrder[i]; if (!xregs[xr].allocLocked && xregs[xr].free) { return (X64Reg)xr; } } //Okay, not found :( Force grab one //TODO - add a pass to grab xregs whose mipsreg is not used in the next 3 instructions for (int i = 0; i < aCount; i++) { X64Reg xr = (X64Reg)aOrder[i]; if (xregs[xr].allocLocked) continue; int preg = xregs[xr].mipsReg; if (!regs[preg].locked) { StoreFromRegister(preg); return xr; } } //Still no dice? Die! _assert_msg_(DYNA_REC, 0, "Regcache ran out of regs"); return (X64Reg) -1; } void GPRRegCache::FlushR(X64Reg reg) { if (reg >= NUM_X_REGS) PanicAlert("Flushing non existent reg"); if (!xregs[reg].free) StoreFromRegister(xregs[reg].mipsReg); } int GPRRegCache::SanityCheck() const { for (int i = 0; i < NUM_MIPS_GPRS; i++) { if (regs[i].away) { if (regs[i].location.IsSimpleReg()) { Gen::X64Reg simple = regs[i].location.GetSimpleReg(); if (xregs[simple].allocLocked) return 1; if (xregs[simple].mipsReg != i) return 2; }<|fim▁hole|> return 0; } void GPRRegCache::DiscardRegContentsIfCached(int preg) { if (regs[preg].away && regs[preg].location.IsSimpleReg()) { X64Reg xr = regs[preg].location.GetSimpleReg(); xregs[xr].free = true; xregs[xr].dirty = false; xregs[xr].mipsReg = -1; regs[preg].away = false; regs[preg].location = GetDefaultLocation(preg); } } void GPRRegCache::SetImmediate32(int preg, u32 immValue) { // ZERO is always zero. Let's just make sure. if (preg == 0) immValue = 0; DiscardRegContentsIfCached(preg); regs[preg].away = true; regs[preg].location = Imm32(immValue); } bool GPRRegCache::IsImmediate(int preg) const { // Always say yes for ZERO, even if it's in a temp reg. if (preg == 0) return true; return regs[preg].location.IsImm(); } u32 GPRRegCache::GetImmediate32(int preg) const { _dbg_assert_msg_(JIT, IsImmediate(preg), "Reg %d must be an immediate.", preg); // Always 0 for ZERO. if (preg == 0) return 0; return regs[preg].location.GetImmValue(); } const int *GPRRegCache::GetAllocationOrder(int &count) { count = sizeof(allocationOrder) / sizeof(const int); return allocationOrder; } OpArg GPRRegCache::GetDefaultLocation(int reg) const { return M(&mips->r[reg]); } void GPRRegCache::KillImmediate(int preg, bool doLoad, bool makeDirty) { if (regs[preg].away) { if (regs[preg].location.IsImm()) BindToRegister(preg, doLoad, makeDirty); else if (regs[preg].location.IsSimpleReg()) xregs[RX(preg)].dirty |= makeDirty; } } void GPRRegCache::BindToRegister(int i, bool doLoad, bool makeDirty) { if (!regs[i].away && regs[i].location.IsImm()) PanicAlert("Bad immediate"); if (!regs[i].away || (regs[i].away && regs[i].location.IsImm())) { X64Reg xr = GetFreeXReg(); if (xregs[xr].dirty) PanicAlert("Xreg already dirty"); if (xregs[xr].allocLocked) PanicAlert("GetFreeXReg returned locked register"); xregs[xr].free = false; xregs[xr].mipsReg = i; xregs[xr].dirty = makeDirty || regs[i].location.IsImm(); OpArg newloc = ::Gen::R(xr); if (doLoad) { // Force ZERO to be 0. if (i == 0) emit->MOV(32, newloc, Imm32(0)); else emit->MOV(32, newloc, regs[i].location); } for (int j = 0; j < 32; j++) { if (i != j && regs[j].location.IsSimpleReg() && regs[j].location.GetSimpleReg() == xr) { ERROR_LOG(JIT, "BindToRegister: Strange condition"); Crash(); } } regs[i].away = true; regs[i].location = newloc; } else { // reg location must be simplereg; memory locations // and immediates are taken care of above. xregs[RX(i)].dirty |= makeDirty; } if (xregs[RX(i)].allocLocked) { PanicAlert("Seriously WTF, this reg should have been flushed"); } } void GPRRegCache::StoreFromRegister(int i) { if (regs[i].away) { bool doStore; if (regs[i].location.IsSimpleReg()) { X64Reg xr = RX(i); xregs[xr].free = true; xregs[xr].mipsReg = -1; doStore = xregs[xr].dirty; xregs[xr].dirty = false; } else { //must be immediate - do nothing doStore = true; } OpArg newLoc = GetDefaultLocation(i); // But never store to ZERO. if (doStore && i != 0) emit->MOV(32, newLoc, regs[i].location); regs[i].location = newLoc; regs[i].away = false; } } void GPRRegCache::Flush() { for (int i = 0; i < NUM_X_REGS; i++) { if (xregs[i].allocLocked) PanicAlert("Someone forgot to unlock X64 reg %i.", i); } for (int i = 0; i < NUM_MIPS_GPRS; i++) { if (regs[i].locked) { PanicAlert("Somebody forgot to unlock MIPS reg %i.", i); } if (regs[i].away) { if (regs[i].location.IsSimpleReg()) { X64Reg xr = RX(i); StoreFromRegister(i); xregs[xr].dirty = false; } else if (regs[i].location.IsImm()) { StoreFromRegister(i); } else { _assert_msg_(DYNA_REC,0,"Jit64 - Flush unhandled case, reg %i PC: %08x", i, mips->pc); } } } }<|fim▁end|>
else if (regs[i].location.IsImm()) return 3; } }
<|file_name|>hooking.py<|end_file_name|><|fim▁begin|># # Copyright 2011-2017 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Refer to the README and COPYING files for full details of the license # """ hooking - various stuff useful when writing vdsm hooks A vm hook expects domain xml in a file named by an environment variable called _hook_domxml. The hook may change the xml, but the "china store rule" applies - if you break something, you own it. before_migration_destination hook receives the xml of the domain from the source host. The xml of the domain at the destination will differ in various details. Return codes: 0 - the hook ended successfully. 1 - the hook failed, other hooks should be processed. 2 - the hook failed, no further hooks should be processed. >2 - reserved """ from vdsm import hooks import json import os import sys from xml.dom import minidom from vdsm.commands import execCmd from vdsm.common.conv import tobool # make pyflakes happy execCmd tobool def read_domxml(): with open(os.environ['_hook_domxml']) as f: return minidom.parseString(f.read()) def write_domxml(domxml): with open(os.environ['_hook_domxml'], 'w') as f:<|fim▁hole|> def read_json(): with open(os.environ['_hook_json']) as f: return json.loads(f.read()) def write_json(data): with open(os.environ['_hook_json'], 'w') as f: f.write(json.dumps(data)) def log(message): sys.stderr.write(message + '\n') def exit_hook(message, return_code=2): """ Exit the hook with a given message, which will be printed to the standard error stream. A newline will be printed at the end. The default return code is 2 for signaling that an error occurred. """ sys.stderr.write(message + "\n") sys.exit(return_code) def load_vm_launch_flags_from_file(vm_id): return hooks.load_vm_launch_flags_from_file(vm_id) def dump_vm_launch_flags_to_file(vm_id, flags): hooks.dump_vm_launch_flags_to_file(vm_id, flags)<|fim▁end|>
f.write(domxml.toxml(encoding='utf-8'))
<|file_name|>views.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from django.shortcuts import render from .models import * from .forms import * from comunicacion.lugar.models import * from mapeo.models import * from django.http import HttpResponse from django.db.models import Sum, Count, Avg import collections import numpy as np # Create your views here. def _queryset_filtrado(request): params = {} if request.session['year']: params['annio'] = request.session['year'] if request.session['municipio']: params['productor__comunidad__municipio__in'] = request.session['municipio'] else: if request.session['comunidad']: params['productor__comunidad__in'] = request.session['comunidad'] if request.session['ciclo']: params['ciclo_productivo'] = request.session['ciclo'] if request.session['rubro']: params['cultivo'] = request.session['rubro'] if request.session['organizacion']: params['productor__productor__organizacion'] = request.session['organizacion'] unvalid_keys = [] for key in params: if not params[key]: unvalid_keys.append(key) for key in unvalid_keys: del params[key] return Monitoreo.objects.filter(**params) def consulta(request,template="granos_basicos/consulta.html"): if request.method == 'POST': mensaje = None form = Consulta(request.POST) if form.is_valid(): request.session['year'] = form.cleaned_data['year'] request.session['municipio'] = form.cleaned_data['municipio'] request.session['comunidad'] = form.cleaned_data['comunidad'] request.session['ciclo'] = form.cleaned_data['ciclo'] request.session['rubro'] = form.cleaned_data['rubro'] request.session['organizacion'] = form.cleaned_data['organizacion'] mensaje = "Todas las variables estan correctamente :)" request.session['activo'] = True centinela = 1 else: centinela = 0 else: form = Consulta() mensaje = "Existen alguno errores" centinela = 0 try: del request.session['year'] del request.session['municipio'] del request.session['comunidad'] del request.session['ciclo'] del request.session['rubro'] del request.session['organizacion'] except: pass return render(request, template, locals()) def genero_produccion(request,template="granos_basicos/productores/genero_produccion.html"): filtro = _queryset_filtrado(request) productores = filtro.distinct('productor').count() CHOICE_SEXO = ((1,'Hombre'),(2,'Mujer')) choice = ((1,'Hombre'),(2,'Mujer'),(3,'Compartida')) sexo_productor = {} for obj in choice: conteo = filtro.filter(productor__productor__jefe = obj[0]).distinct('productor').count() sexo_productor[obj[1]] = conteo if request.GET.get('jefe'): jefe = request.GET['jefe'] if jefe == '1': CHOICE_SEXO_JEFE = ((1,'Hombre'),) elif jefe == '2': CHOICE_SEXO_JEFE = ((2,'Mujer'),) elif jefe == '3': CHOICE_SEXO_JEFE = ((3,'Compartida'),) else: CHOICE_SEXO_JEFE = ((1,'Hombre'),(2,'Mujer'),(3,'Compartida')) RELACION_CHOICES = ((1,'Jefe/Jefa de familia'),(2,'Cónyuge'), (3,'Hijo/Hija'),(4,'Otro familiar'), (5,'Administrador'),) prod_gb = {} prod = {} dic_relacion = {} for obj in CHOICE_SEXO_JEFE: for x in CHOICE_SEXO: #relacion entre responsables de familia jefe_familia = filtro.filter(productor__sexo = x[0],productor__productor__jefe = obj[0]).distinct('productor').count() prod[x[1]] = jefe_familia for relacion in RELACION_CHOICES: conteo = filtro.filter(productor__productorgranosbasicos__relacion = relacion[0],productor__productor__jefe = obj[0]).distinct('productor').count() dic_relacion[relacion[1]] = conteo for x in CHOICE_SEXO: conteo = filtro.filter(productor__sexo = x[0]).distinct('productor').count() prod_gb[x[1]] = conteo return render(request, template, locals()) def composicion_familiar(request,template="granos_basicos/productores/composicion_familiar.html"): filtro = _queryset_filtrado(request) productores = filtro.distinct('productor').count() #nuevas salidas lista_hijos = [] lista_hijas = [] lista_sumatoria = [] for obj in filtro: hijos = ComposicionFamiliar.objects.filter(persona = obj.productor,familia = '3').count() lista_hijos.append(hijos) hijas = ComposicionFamiliar.objects.filter(persona = obj.productor,familia = '4').count() lista_hijas.append(hijas) sumatoria = hijos + hijas lista_sumatoria.append(sumatoria) result = [] #promedio,mediana,desviacion standard, minimo y maximo promedios = [np.mean(lista_hijos),np.mean(lista_hijas),np.mean(lista_sumatoria)] mediana = [np.median(lista_hijos),np.median(lista_hijas),np.median(lista_sumatoria)] desviacion = [np.std(lista_hijos),np.std(lista_hijas),np.std(lista_sumatoria)] minimo = [min(lista_hijos),min(lista_hijas),min(lista_sumatoria)] maximo = [max(lista_hijos),max(lista_hijas),max(lista_sumatoria)] # agregando a la lista result.append(promedios) result.append(mediana) result.append(desviacion) result.append(minimo) result.append(maximo) #grafico nivel educativo de los padres en las familias ESCOLARIDAD_CHOICES = ( (1,'Ninguno'),(2,'Primaria Incompleta'),(3,'Primaria'), (4,'Secundaria Incompleta'),(5,'Secundaria'),(6,'Técnico'), (7,'Universitario'),(8,'Profesional')) escolaridad = collections.OrderedDict() for obj in ESCOLARIDAD_CHOICES: madre = filtro.filter(productor__composicionfamiliar__familia = '2', productor__composicionfamiliar__escolaridad = obj[0]).distinct('productor__composicionfamiliar').count() padre = filtro.filter(productor__composicionfamiliar__familia = '1', productor__composicionfamiliar__escolaridad = obj[0]).distinct('productor__composicionfamiliar').count() #hijos-------------------- hijos_5_12 = filtro.filter(productor__composicionfamiliar__familia = '3', productor__composicionfamiliar__escolaridad = obj[0], productor__composicionfamiliar__edad__range = (5,12)).distinct('productor__composicionfamiliar').count() hijos_13_18 = filtro.filter(productor__composicionfamiliar__familia = '3', productor__composicionfamiliar__escolaridad = obj[0], productor__composicionfamiliar__edad__range = (13,18)).distinct('productor__composicionfamiliar').count() hijos_19 = filtro.filter(productor__composicionfamiliar__familia = '3', productor__composicionfamiliar__escolaridad = obj[0], productor__composicionfamiliar__edad__range = (19,100)).distinct('productor__composicionfamiliar').count() #hijas-------------------- hijas_5_12 = filtro.filter(productor__composicionfamiliar__familia = '4', productor__composicionfamiliar__escolaridad = obj[0], productor__composicionfamiliar__edad__range = (5,12)).distinct('productor__composicionfamiliar').count() hijas_13_18 = filtro.filter(productor__composicionfamiliar__familia = '4', productor__composicionfamiliar__escolaridad = obj[0], productor__composicionfamiliar__edad__range = (13,18)).distinct('productor__composicionfamiliar').count() hijas_19 = filtro.filter(productor__composicionfamiliar__familia = '4', productor__composicionfamiliar__escolaridad = obj[0], productor__composicionfamiliar__edad__range = (19,100)).distinct('productor__composicionfamiliar').count() escolaridad[obj[1]] = (madre,padre, hijos_5_12,hijos_13_18,hijos_19, hijas_5_12,hijas_13_18,hijas_19) #-------------------------------------------------------------------------------- SI_NO_CHOICES = ((1,'Si'),(2,'No')) FAMILIA_CHOICES = ((1,'Padre'),(2,'Madre'),(3,'Hijo'),(4,'Hija'),(5,'Hermano'), (6,'Hermana'),(7,'Sobrino'),(8,'Sobrina'),(9,'Abuelo'), (10,'Abuela'),(11,'Cuñado'),(12,'Cuñada'),(13,'Yerno'), (14,'Nuera'),(15,'Otro'),) list_participacion = [] for obj in FAMILIA_CHOICES: total = filtro.filter(productor__composicionfamiliar__familia = obj[0]).distinct( 'productor__composicionfamiliar').count() si_participa = filtro.filter(productor__composicionfamiliar__familia = obj[0], productor__composicionfamiliar__participacion = '1').distinct( 'productor__composicionfamiliar').count() promedio = total / float(productores) promedio = round(promedio, 2) list_participacion.append((obj[1],saca_porcentajes(si_participa,total,False),promedio)) return render(request, template, locals()) def georeferencia(request,template="granos_basicos/monitoreos/georeferencia.html"): filtro = _queryset_filtrado(request) productores = filtro.distinct('productor').count() lista_mapa = filtro.values('nombre_parcela','latitud','longitud') mapa = [] for obj in lista_mapa: if obj['latitud'] != None and obj['longitud'] != None: mapa.append((obj['nombre_parcela'],obj['latitud'],obj['longitud'])) return render(request, template, locals()) def caracteristicas_parcela(request,template="granos_basicos/monitoreos/caracteristicas_parcela.html"): filtro = _queryset_filtrado(request) productores = filtro.distinct('productor').count() lista_parcela = [] lista_inclinado = [] lista_plano = [] #edad parcela y profundidad capa arable parcela = filtro.values('edad_parcela','profundidad_capa') for obj in parcela: if obj['edad_parcela'] != None and obj['profundidad_capa'] != None: lista_parcela.append((obj['edad_parcela'],obj['profundidad_capa'])) #edad de las parcelas menor_5 = filtro.filter(edad_parcela__range = (0,5)).count() edad_6_20 = filtro.filter(edad_parcela__range = (5.1,20)).count() mayor_20 = filtro.filter(edad_parcela__range = (20.1,100)).count() for obj in filtro: # % area inclinado > 60% area = DistribucionPendiente.objects.filter(monitoreo = obj,seleccion = '1').values_list('inclinado',flat = True) for x in area: if x >= 60: inclinado = DistribucionPendiente.objects.filter(monitoreo = obj,seleccion = '2').values_list('inclinado','monitoreo__profundidad_capa') lista_inclinado.append(inclinado) # % area plano > 60% area1 = DistribucionPendiente.objects.filter(monitoreo = obj,seleccion = '1').values_list('plano',flat = True) for y in area1: if y >= 60: plano = DistribucionPendiente.objects.filter(monitoreo = obj,seleccion = '2').values_list('plano','monitoreo__profundidad_capa') lista_plano.append(plano) #acceso agua SI_NO_CHOICES = ((1,'Si'),(2,'No')) acceso_agua = {} conteo_si = 0 for obj in SI_NO_CHOICES: conteo = filtro.filter(acceso_agua = obj[0]).count() acceso_agua[obj[1]] = conteo #fuente agua fuente_agua = {} conteo_si = filtro.filter(acceso_agua = 1).count()<|fim▁hole|> fuente_agua[obj[1]] = saca_porcentajes(conteo,conteo_si,False) return render(request, template, locals()) def ciclo_productivo(request,template="granos_basicos/monitoreos/ciclo_productivo.html"): filtro = _queryset_filtrado(request) productores = filtro.distinct('productor').count() #siembra fecha_siembra = filtro.values_list('datosmonitoreo__fecha_siembra',flat = True) lista_siembra = [] for obj in fecha_siembra: if obj != None: x = obj.isocalendar()[1] lista_siembra.append(x) l_siembra = sorted(lista_siembra) dic_siembra = collections.OrderedDict() for v in l_siembra: count = l_siembra.count(v) dic_siembra[v] = count #cosecha fecha_cosecha = filtro.values_list('datosmonitoreo__fecha_cosecha',flat = True) lista_cosecha = [] for obj in fecha_cosecha: if obj != None: x = obj.isocalendar()[1] lista_cosecha.append(x) l_cosecha = sorted(lista_cosecha) dic_cosecha = collections.OrderedDict() for v in l_cosecha: count = l_cosecha.count(v) dic_cosecha[v] = count return render(request, template, locals()) def uso_suelo(request,template="granos_basicos/monitoreos/uso_suelo.html"): filtro = _queryset_filtrado(request) productores = filtro.distinct('productor').count() USO_SUELO_CHOICES = ((1,'Área Total'),(2,'Cultivos Anuales (GB)'),(3,'Cultivos perennes'), (4,'Tacotales'),(5,'Potreros'),(6,'Pasto de Corte')) total = filtro.filter(productor__usosuelo__uso = '1').aggregate(total = Sum('productor__usosuelo__cantidad'))['total'] uso_suelo = collections.OrderedDict() for obj in USO_SUELO_CHOICES: #tabla 1 familias = filtro.filter(productor__usosuelo__uso = obj[0]).count() mz = filtro.filter(productor__usosuelo__uso = obj[0]).aggregate(total = Sum('productor__usosuelo__cantidad'))['total'] porcentaje = saca_porcentajes(mz,total,False) try: promedio = mz / float(productores) except: promedio = 0 #diccionario de datos uso_suelo[obj[1]] = (familias,mz,porcentaje,promedio) #tabla 2 tamano_finca = filtro.filter(productor__usosuelo__uso = '1').values_list('productor__usosuelo__cantidad',flat = True) granos_basicos = filtro.filter(productor__usosuelo__uso = '2').values_list('productor__usosuelo__cantidad',flat = True) area_siembra = filtro.values_list('datosmonitoreo__area_siembra',flat = True) result = [] #promedio,mediana,desviacion standard, minimo y maximo promedios = [np.mean(tamano_finca),np.mean(granos_basicos)] mediana = [np.median(tamano_finca),np.median(granos_basicos)] desviacion = [np.std(tamano_finca),np.std(granos_basicos)] minimo = [min(tamano_finca),min(granos_basicos)] maximo = [max(tamano_finca),max(granos_basicos)] # agregando a la lista result.append(promedios) result.append(mediana) result.append(desviacion) result.append(minimo) result.append(maximo) #distribucion area de siembra menor_1 = filtro.filter(datosmonitoreo__area_siembra__range = (0,0.99)).count() entre_1_2 = filtro.filter(datosmonitoreo__area_siembra__range = (1,2)).count() entre_2_3 = filtro.filter(datosmonitoreo__area_siembra__range = (2.1,3)).count() entre_3_4 = filtro.filter(datosmonitoreo__area_siembra__range = (3.1,4)).count() entre_4_5 = filtro.filter(datosmonitoreo__area_siembra__range = (4.1,5)).count() #promedio area de siembra area_siembra = filtro.values_list('datosmonitoreo__area_siembra',flat = True) lista = [] for obj in area_siembra: if obj != None: lista.append(obj) promedio_area = np.mean(lista) desviacion_area = np.std(lista) mediana_area = np.median(lista) minimo_area = min(lista) maximo_area = max(lista) return render(request, template, locals()) def recursos_economicos(request,template="granos_basicos/monitoreos/recursos_economicos.html"): filtro = _queryset_filtrado(request) productores = filtro.distinct('productor').count() dic = {} for obj in RESPUESTA_CHOICES: conteo = filtro.filter(recursossiembra__respuesta = obj[0]).count() dic[obj[1]] = conteo return render(request, template, locals()) def rendimiento(request,template="granos_basicos/monitoreos/rendimiento.html"): filtro = _queryset_filtrado(request) productores = filtro.distinct('productor').count() ANIO_CHOICES = ((2014,'2014'),(2015,'2015'),(2016,'2016'),(2017,'2017'), (2018,'2018'),(2019,'2019'),(2020,'2020'),) #maiz rend_maiz = collections.OrderedDict() productores_maiz = HistorialRendimiento.objects.filter(ciclo_productivo = '1',rubro = '1').distinct('monitoreo__productor').count() for obj in ANIO_CHOICES: primera_maiz = HistorialRendimiento.objects.filter(ciclo_productivo = '1',rubro = '1', anio = obj[1]).aggregate(avg = Avg('rendimiento'))['avg'] postrera_maiz = HistorialRendimiento.objects.filter(ciclo_productivo = '2',rubro = '1', anio = obj[1]).aggregate(avg = Avg('rendimiento'))['avg'] apante_maiz = HistorialRendimiento.objects.filter(ciclo_productivo = '3',rubro = '1', anio = obj[1]).aggregate(avg = Avg('rendimiento'))['avg'] if primera_maiz != None or postrera_maiz != None or apante_maiz != None: rend_maiz[obj[1]] = (primera_maiz,postrera_maiz,apante_maiz) #frijol rend_frijol = collections.OrderedDict() productores_frijol = HistorialRendimiento.objects.filter(ciclo_productivo = '1',rubro = '2').distinct('monitoreo__productor').count() for obj in ANIO_CHOICES: primera_frijol = HistorialRendimiento.objects.filter(ciclo_productivo = '1',rubro = '2', anio = obj[1]).aggregate(avg = Avg('rendimiento'))['avg'] postrera_frijol = HistorialRendimiento.objects.filter(ciclo_productivo = '2',rubro = '3', anio = obj[1]).aggregate(avg = Avg('rendimiento'))['avg'] apante_frijol = HistorialRendimiento.objects.filter(ciclo_productivo = '3',rubro = '4', anio = obj[1]).aggregate(avg = Avg('rendimiento'))['avg'] if primera_frijol != None or postrera_frijol != None or apante_frijol != None: rend_frijol[obj[1]] = (primera_frijol,postrera_frijol,apante_frijol) return render(request, template, locals()) def get_comunies(request): ids = request.GET.get('ids', '') results = [] dicc = {} if ids: lista = ids.split(',') for id in lista: monitoreos = Monitoreo.objects.filter(productor__municipio__id = id).distinct().values_list('productor__comunidad__id', flat=True) municipios = Municipio.objects.get(pk = id) comunies = Comunidad.objects.filter(municipio__id = municipios.pk,id__in = monitoreos).order_by('nombre') lista1 = [] for c in comunies: comu = {} comu['id'] = c.id comu['nombre'] = c.nombre lista1.append(comu) dicc[municipios.nombre] = lista1 return HttpResponse(simplejson.dumps(dicc), content_type = 'application/json') def saca_porcentajes(dato, total, formato=True): if dato != None: try: porcentaje = (dato/float(total)) * 100 if total != None or total != 0 else 0 except: return 0 if formato: return porcentaje else: return '%.2f' % porcentaje else: return 0<|fim▁end|>
for obj in ACCESO_AGUA_CHOICES: conteo = filtro.filter(fuente_agua__icontains = obj[0]).count()
<|file_name|>univariate.py<|end_file_name|><|fim▁begin|>r""" ================================================== Feature computation for univariate time series ================================================== This sub-module provides routines for computing features on univariate time series. Many functions are improved version of PyEEG [PYEEG]_ functions. Be careful, some functions will give different results compared to PyEEG as the maths have been changed to match original definitions. Have a look at the documentation notes/ source code to know more. Here a list of the functions that were reimplemented: * Approximate entropy :func:`~pyrem.univariate.ap_entropy` [RIC00]_ * Fisher information :func:`~pyrem.univariate.fisher_info` [PYEEG]_ * Higuchi fractal dimension :func:`~pyrem.univariate.hfd` [HIG88]_ * Hjorth parameters :func:`~pyrem.univariate.hjorth` [HJO70]_ * Petrosian fractal dimension :func:`~pyrem.univariate.pfd` [PET95]_ * Sample entropy :func:`~pyrem.univariate.samp_entropy` [RIC00]_ * Singular value decomposition entropy :func:`~pyrem.univariate.svd_entropy` [PYEEG]_ * Spectral entropy :func:`~pyrem.univariate.spectral_entropy` [PYEEG]_ .. [PET95] A. Petrosian, Kolmogorov complexity of finite sequences and recognition of different preictal EEG patterns, in , Proceedings of the Eighth IEEE Symposium on Computer-Based Medical Systems, 1995, 1995, pp. 212-217. .. [PYEEG] F. S. Bao, X. Liu, and C. Zhang, PyEEG: An Open Source Python Module for EEG/MEG Feature Extraction, Computational Intelligence and Neuroscience, vol. 2011, p. e406391, Mar. 2011. .. [HJO70] B. Hjorth, EEG analysis based on time domain properties, Electroencephalography and Clinical Neurophysiology, vol. 29, no. 3, pp. 306-310, Sep. 1970. .. [COS05] M. Costa, A. L. Goldberger, and C.-K. Peng, "Multiscale entropy analysis of biological signals," Phys. Rev. E, vol. 71, no. 2, p. 021906, Feb. 2005. .. [RIC00] J. S. Richman and J. R. Moorman, "Physiological time-series analysis using approximate entropy and sample entropy," American Journal of Physiology - Heart and Circulatory Physiology, vol. 278, no. 6, pp. H2039-H2049, Jun. 2000. .. [HIG88] T. Higuchi, "Approach to an irregular time series on the basis of the fractal theory," Physica D: Nonlinear Phenomena, vol. 31, no. 2, pp. 277-283, Jun. 1988. """ __author__ = 'quentin' import numpy as np def _embed_seq(X,tau,de): N =len(X) if de * tau > N: raise ValueError("Cannot build such a matrix, because D * Tau > N") if tau<1: raise ValueError("Tau has to be at least 1") Y=np.zeros((de, N - (de - 1) * tau)) for i in range(de): Y[i] = X[i *tau : i*tau + Y.shape[1] ] return Y.T def _make_cmp(X, M, R, in_range_i, in_range_j): #Then we make Cmp N = len(X) Emp = _embed_seq(X, 1, M + 1) inrange_cmp = np.abs(Emp[in_range_i,-1] - Emp[in_range_j,-1]) <= R in_range_cmp_i = in_range_i[inrange_cmp] Cmp = np.bincount(in_range_cmp_i, minlength=N-M) in_range_cmp_j = in_range_j[inrange_cmp] Cmp += np.bincount(in_range_cmp_j, minlength=N-M) return Cmp.astype(np.float) def _coarse_grainning(a, tau): """ Coarse grainning for multiscale (sample) entropy. """ if tau ==1: return a length_out = a.size / tau n_dropped = a.size % tau mat = a[0:a.size - n_dropped].reshape((tau, length_out)) return np.mean(mat, axis=0) def _make_cm(X,M,R): N = len(X) # we pregenerate all indices i_idx,j_idx = np.triu_indices(N - M) # We start by making Cm Em = _embed_seq(X, 1, M) dif = np.abs(Em[i_idx] - Em[j_idx]) max_dist = np.max(dif, 1) inrange_cm = max_dist <= R in_range_i = i_idx[inrange_cm] in_range_j = j_idx[inrange_cm] Cm = np.bincount(in_range_i, minlength=N-M+1) Cm += np.bincount(in_range_j, minlength=N-M+1) inrange_last = np.max(np.abs(Em[:-1] - Em[-1]),1) <= R Cm[inrange_last] += 1 # all matches + self match Cm[-1] += np.sum(inrange_last) + 1 return Cm.astype(np.float), in_range_i, in_range_j def pfd(a): r""" Compute Petrosian Fractal Dimension of a time series [PET95]_. It is defined by: .. math:: \frac{log(N)}{log(N) + log(\frac{N}{N+0.4N_{\delta}})} .. note:: **Difference with PyEEG:** Results is different from [PYEEG]_ which implemented an apparently erroneous formulae: .. math:: \frac{log(N)}{log(N) + log(\frac{N}{N}+0.4N_{\delta})} Where: :math:`N` is the length of the time series, and :math:`N_{\delta}` is the number of sign changes. :param a: a one dimensional floating-point array representing a time series. :type a: :class:`~numpy.ndarray` or :class:`~pyrem.time_series.Signal` :return: the Petrosian Fractal Dimension; a scalar. :rtype: float Example: >>> import pyrem as pr >>> import numpy as np >>> # generate white noise: >>> noise = np.random.normal(size=int(1e4)) >>> pr.univariate.pdf(noise) """ diff = np.diff(a) # x[i] * x[i-1] for i in t0 -> tmax prod = diff[1:-1] * diff[0:-2] # Number of sign changes in derivative of the signal N_delta = np.sum(prod < 0) n = len(a) return np.log(n)/(np.log(n)+np.log(n/(n+0.4*N_delta))) def hjorth(a): r""" Compute Hjorth parameters [HJO70]_. .. math:: Activity = m_0 = \sigma_{a}^2 .. math:: Complexity = m_2 = \sigma_{d}/ \sigma_{a} .. math:: Morbidity = m_4 = \frac{\sigma_{dd}/ \sigma_{d}}{m_2} Where: :math:`\sigma_{x}^2` is the mean power of a signal :math:`x`. That is, its variance, if it's mean is zero. :math:`a`, :math:`d` and :math:`dd` represent the original signal, its first and second derivatives, respectively. .. note:: **Difference with PyEEG:** Results is different from [PYEEG]_ which appear to uses a non normalised (by the length of the signal) definition of the activity: .. math:: \sigma_{a}^2 = \sum{\mathbf{x}[i]^2} As opposed to .. math:: \sigma_{a}^2 = \frac{1}{n}\sum{\mathbf{x}[i]^2} :param a: a one dimensional floating-point array representing a time series. :type a: :class:`~numpy.ndarray` or :class:`~pyrem.time_series.Signal` :return: activity, complexity and morbidity :rtype: tuple(float, float, float) Example: >>> import pyrem as pr >>> import numpy as np >>> # generate white noise: >>> noise = np.random.normal(size=int(1e4)) >>> activity, complexity, morbidity = pr.univariate.hjorth(noise) """ first_deriv = np.diff(a) second_deriv = np.diff(a,2) var_zero = np.mean(a ** 2) var_d1 = np.mean(first_deriv ** 2) var_d2 = np.mean(second_deriv ** 2) activity = var_zero morbidity = np.sqrt(var_d1 / var_zero) complexity = np.sqrt(var_d2 / var_d1) / morbidity return activity, morbidity, complexity def svd_entropy(a, tau, de): r""" Compute the Singular Value Decomposition entropy of a signal with embedding dimension "de" and delay "tau" [PYEEG]_. <|fim▁hole|> .. note:: **Difference with PyEEG:** The result differs from PyEEG implementation because :math:`log_2` is used (as opposed to natural logarithm in PyEEG code), according to the definition in their paper [PYEEG]_ (eq. 9): .. math:: H_{SVD} = -\sum{\bar\sigma{}_i log_2 \bar\sigma{}_i} :param a: a one dimensional floating-point array representing a time series. :type a: :class:`~numpy.ndarray` or :class:`~pyrem.time_series.Signal` :param tau: the delay :type tau: int :param de: the embedding dimension :type de: int :return: the SVD entropy, a scalar :rtype: float """ mat = _embed_seq(a, tau, de) W = np.linalg.svd(mat, compute_uv = False) W /= sum(W) # normalize singular values return -1*sum(W * np.log2(W)) def fisher_info(a, tau, de): r""" Compute the Fisher information of a signal with embedding dimension "de" and delay "tau" [PYEEG]_. Vectorised (i.e. faster) version of the eponymous PyEEG function. :param a: a one dimensional floating-point array representing a time series. :type a: :class:`~numpy.ndarray` or :class:`~pyrem.time_series.Signal` :param tau: the delay :type tau: int :param de: the embedding dimension :type de: int :return: the Fisher information, a scalar :rtype: float """ mat = _embed_seq(a, tau, de) W = np.linalg.svd(mat, compute_uv = False) W /= sum(W) # normalize singular values FI_v = (W[1:] - W[:-1]) **2 / W[:-1] return np.sum(FI_v) def ap_entropy(a, m, R): r""" Compute the approximate entropy of a signal with embedding dimension "de" and delay "tau" [PYEEG]_. Vectorised version of the PyEEG function. Faster than PyEEG, but still critically slow. :param a: a one dimensional floating-point array representing a time series. :type a: :class:`~numpy.ndarray` or :class:`~pyrem.time_series.Signal` :param m: the scale :type m: int :param R: The tolerance :type R: float` :return: the approximate entropy, a scalar :rtype: float """ N = len(a) Cm, in_range_i, in_range_j = _make_cm(a,m,R) Cmp = _make_cmp(a, m, R, in_range_i, in_range_j) Cm /= float((N - m +1 )) Cmp /= float(N - m) Phi_m, Phi_mp = np.sum(np.log(Cm)), np.sum(np.log(Cmp)) Ap_En = (Phi_m - Phi_mp) / (N - m) return Ap_En def samp_entropy(a, m, r, tau=1, relative_r=True): r""" Compute the sample entropy [RIC00]_ of a signal with embedding dimension `de` and delay `tau` [PYEEG]_. Vectorised version of the eponymous PyEEG function. In addition, this function can also be used to vary tau and therefore compute Multi-Scale Entropy(MSE) [COS05]_ by coarse grainning the time series (see example bellow). By default, r is expressed as relatively to the standard deviation of the signal. :param a: a one dimensional floating-point array representing a time series. :type a: :class:`~numpy.ndarray` or :class:`~pyrem.time_series.Signal` :param m: the scale :type m: int :param r: The tolerance :type r: float :param tau: The scale for coarse grainning. :type tau: int :param relative_r: whether the argument r is relative to the standard deviation. If false, an absolute value should be given for r. :type relative_r: true :return: the approximate entropy, a scalar :rtype: float Example: >>> import pyrem as pr >>> import numpy as np >>> # generate white noise: >>> noise = np.random.normal(size=int(1e4)) >>> pr.univariate.samp_entropy(noise, m=2, r=1.5) >>> # now we can do that for multiple scales (MSE): >>> [pr.univariate.samp_entropy(noise, m=2, r=1.5, tau=tau) for tau in range(1, 5)] """ coarse_a = _coarse_grainning(a, tau) if relative_r: coarse_a /= np.std(coarse_a) embsp = _embed_seq(coarse_a, 1 , m + 1) embsp_last = embsp[:,-1] embs_mini = embsp[:, :-1] # Buffers are preallocated chunks of memory storing temporary results. # see the `out` argument in numpy *ufun* documentation dist_buffer = np.zeros(embsp.shape[0] - 1, dtype=np.float32) subtract_buffer = np.zeros((dist_buffer.size ,m), dtype=np.float32) in_range_buffer = np.zeros_like(dist_buffer, dtype=np.bool) sum_cm, sum_cmp = 0.0, 0.0 # we iterate through all templates (rows), except last one. for i,template in enumerate(embs_mini[:-1]): # these are just views to the buffer arrays. to store intermediary matrices dist_b_view = dist_buffer[i:] sub_b_view = subtract_buffer[i:] range_b_view = in_range_buffer[i:] embsp_view = embsp_last[i+1:] # substract the template from each subsequent row of the embedded matrix np.subtract(embs_mini[i+1:], template, out=sub_b_view) # Absolute distance np.abs(sub_b_view, out=sub_b_view) # Maximal absolute difference between a scroll and a template is the distance np.max(sub_b_view, axis=1, out=dist_b_view) # we compare this distance to a tolerance r np.less_equal(dist_b_view, r, out= range_b_view) # score one for this template for each match in_range_sum = np.sum(range_b_view) sum_cm += in_range_sum ### reuse the buffers for last column dist_b_view = dist_buffer[:in_range_sum] where = np.flatnonzero(range_b_view) dist_b_view= np.take(embsp_view,where,out=dist_b_view) range_b_view = in_range_buffer[range_b_view] # score one to TODO for each match of the last element dist_b_view -= embsp_last[i] np.abs(dist_b_view, out=dist_b_view) np.less_equal(dist_b_view, r, out=range_b_view) sum_cmp += np.sum(range_b_view) if sum_cm == 0 or sum_cmp ==0: return np.NaN return np.log(sum_cm/sum_cmp) def spectral_entropy(a, sampling_freq, bands=None): r""" Compute spectral entropy of a signal with respect to frequency bands. The power spectrum is computed through fft. Then, it is normalised and assimilated to a probability density function. The entropy of the signal :math:`x` can be expressed by: .. math:: H(x) = -\sum_{f=0}^{f = f_s/2} PSD(f) log_2[PSD(f)] Where: :math:`PSD` is the normalised power spectrum (Power Spectrum Density), and :math:`f_s` is the sampling frequency :param a: a one dimensional floating-point array representing a time series. :type a: :class:`~numpy.ndarray` or :class:`~pyrem.time_series.Signal` :param sampling_freq: the sampling frequency :type sampling_freq: float :param bands: a list of numbers delimiting the bins of the frequency bands. If None the entropy is computed over the whole range of the DFT (from 0 to :math:`f_s/2`) :return: the spectral entropy; a scalar """ psd = np.abs(np.fft.rfft(a))**2 psd /= np.sum(psd) # psd as a pdf (normalised to one) if bands is None: power_per_band= psd[psd>0] else: freqs = np.fft.rfftfreq(a.size, 1/float(sampling_freq)) bands = np.asarray(bands) freq_limits_low = np.concatenate([[0.0],bands]) freq_limits_up = np.concatenate([bands, [np.Inf]]) power_per_band = [np.sum(psd[np.bitwise_and(freqs >= low, freqs<up)]) for low,up in zip(freq_limits_low, freq_limits_up)] power_per_band= power_per_band[ power_per_band > 0] return - np.sum(power_per_band * np.log2(power_per_band)) def hfd(a, k_max): r""" Compute Higuchi Fractal Dimension of a time series. Vectorised version of the eponymous [PYEEG]_ function. .. note:: **Difference with PyEEG:** Results is different from [PYEEG]_ which appears to have implemented an erroneous formulae. [HIG88]_ defines the normalisation factor as: .. math:: \frac{N-1}{[\frac{N-m}{k} ]\dot{} k} [PYEEG]_ implementation uses: .. math:: \frac{N-1}{[\frac{N-m}{k}]} The latter does *not* give the expected fractal dimension of approximately `1.50` for brownian motion (see example bellow). :param a: a one dimensional floating-point array representing a time series. :type a: :class:`~numpy.ndarray` or :class:`~pyrem.time_series.Signal` :param k_max: the maximal value of k :type k_max: int :return: Higuchi's fractal dimension; a scalar :rtype: float Example from [HIG88]_. This should produce a result close to `1.50`: >>> import numpy as np >>> import pyrem as pr >>> i = np.arange(2 ** 15) +1001 >>> z = np.random.normal(size=int(2 ** 15) + 1001) >>> y = np.array([np.sum(z[1:j]) for j in i]) >>> pr.univariate.hfd(y,2**8) """ L = [] x = [] N = a.size # TODO this could be used to pregenerate k and m idxs ... but memory pblem? # km_idxs = np.triu_indices(k_max - 1) # km_idxs = k_max - np.flipud(np.column_stack(km_idxs)) -1 # km_idxs[:,1] -= 1 # for k in xrange(1,k_max): Lk = 0 for m in xrange(0,k): #we pregenerate all idxs idxs = np.arange(1,int(np.floor((N-m)/k)),dtype=np.int32) Lmk = np.sum(np.abs(a[m+idxs*k] - a[m+k*(idxs-1)])) Lmk = (Lmk*(N - 1)/(((N - m)/ k)* k)) / k Lk += Lmk L.append(np.log(Lk/(m+1))) x.append([np.log(1.0/ k), 1]) (p, r1, r2, s)=np.linalg.lstsq(x, L) return p[0] def dfa(X, Ave = None, L = None, sampling= 1): """ WIP on this function. It is basically copied and pasted from [PYEEG]_, without verification of the maths or unittests. """ X = np.array(X) if Ave is None: Ave = np.mean(X) Y = np.cumsum(X) Y -= Ave if not L: max_power = np.int(np.log2(len(X)))-4 L = X.size / 2 ** np.arange(4,max_power) if len(L)<2: raise Exception("Too few values for L. Time series too short?") F = np.zeros(len(L)) # F(n) of different given box length n for i,n in enumerate(L): sampled = 0 for j in xrange(0,len(X) -n ,n): if np.random.rand() < sampling: F[i] += np.polyfit(np.arange(j,j+n), Y[j:j+n],1, full=True)[1] sampled += 1 if sampled > 0: F[i] /= float(sampled) LF = np.array([(l,f) for l,f in zip(L,F) if l>0]).T F = np.sqrt(LF[1]) Alpha = np.polyfit(np.log(LF[0]), np.log(F),1)[0] return Alpha def hurst(signal): """ **Experimental**/untested implementation taken from: http://drtomstarke.com/index.php/calculation-of-the-hurst-exponent-to-test-for-trend-and-mean-reversion/ Use at your own risks. """ tau = []; lagvec = [] # Step through the different lags for lag in range(2,20): # produce price difference with lag pp = np.subtract(signal[lag:],signal[:-lag]) # Write the different lags into a vector lagvec.append(lag) # Calculate the variance of the difference vector tau.append(np.std(pp)) # linear fit to double-log graph (gives power) m = np.polyfit(np.log10(lagvec),np.log10(tau),1) # calculate hurst hurst = m[0] return hurst<|fim▁end|>
<|file_name|>test_indexedDB.py<|end_file_name|><|fim▁begin|>from browser import window _kids=['Marsha', 'Jan', 'Cindy'] def continue1(event): _objectStore.get('Jan', onsuccess=exists, onerror=continue2) def continue2(event): for _kid in _kids: _rec={'name': _kid} _objectStore.put(_rec, _kid, onsuccess=printmsg, onerror=printerr) _objectStore.get('Jan', onsuccess=continue3, onerror=printerr) def continue3(event): print ("Async operations complete..") def exists(event): if event.target.pyresult() is None: #handle cause of when get returns undefined if the key doesn't exist #in the db.. continue2(event) else: print(event.result) #this shouldn't get called, output message if called print("this shouldn't get called") def printrec(event): _obj=event.target.pyresult() assert isinstance(_obj, dict) assert _obj['name']=='Jan' def printmsg(event): _obj=event.target.pyresult() assert _obj in _kids def printerr(event): print("Error: %s" % (event.result)) def onsuccess(event): global db db = request.result def onupgradeneeded(e): print("event: ", e, "target", e.target) print("event type: ", e.type) print("e.oldVersion: ", e.oldVersion) print("e.newVersion: ", e.newVersion) # todo.. override createObjectStore to take options (ie, like OS.put) #e.target.result.createObjectStore("BradyKids") db = request.result for _kid in _kids: print(_kid, db) _rec={'name': _kid}<|fim▁hole|> req.onsuccess=printmsg req.onerror=printerr db = None request = window.indexedDB.open("BradyKids", 3) request.onsuccess = onsuccess request.onupgradeneeded=onupgradeneeded print(db) print("allowing async operations to complete")<|fim▁end|>
req = db.put(_rec, _kid)
<|file_name|>trestle.py<|end_file_name|><|fim▁begin|>#(c) 2016-2018 by Authors #This file is a part of Flye program. #Released under the BSD license (see LICENSE file) """ Created on Wed Jan 4 03:50:31 2017 @author: jeffrey_yuan """ from __future__ import absolute_import from __future__ import division import os import logging from itertools import combinations, product import copy import multiprocessing, signal import flye.polishing.alignment as flye_aln from flye.utils.sam_parser import SynchronizedSamReader, Alignment import flye.utils.fasta_parser as fp import flye.config.py_cfg as config import flye.polishing.polish as pol import flye.trestle.divergence as div import flye.trestle.trestle_config as trestle_config from flye.six.moves import range from flye.six.moves import zip logger = logging.getLogger() def resolve_repeats(args, trestle_dir, repeats_info, summ_file, resolved_repeats_seqs): all_file_names = define_file_names() all_labels, initial_file_names = all_file_names[0], all_file_names[2] all_resolved_reps_dict = {} all_summaries = [] init_summary(summ_file) #1. Process repeats from graph - generates a folder for each repeat logger.debug("Finding unbridged repeats") process_outputs = process_repeats(args.reads, repeats_info, trestle_dir, all_labels, initial_file_names) repeat_list, repeat_edges, all_edge_headers = process_outputs logger.info("Simple unbridged repeats: %d", len(repeat_list)) #if not repeat_list: # return #Resolve every repeat in a separate thread def _thread_worker(func_args, log_file, results_queue, error_queue): try: #each thred logs to a separate file log_formatter = \ logging.Formatter("[%(asctime)s] %(name)s: %(levelname)s: " "%(message)s", "%Y-%m-%d %H:%M:%S") file_handler = logging.FileHandler(log_file, mode="a") file_handler.setFormatter(log_formatter) for handler in logger.handlers[:]: logger.removeHandler(handler) logger.addHandler(file_handler) result = resolve_each_repeat(*func_args) results_queue.put(result) except Exception as e: error_queue.put(e) job_chunks = [repeat_list[i:i + args.threads] for i in range(0, len(repeat_list), args.threads)] for job_chunk in job_chunks: manager = multiprocessing.Manager() results_queue = manager.Queue() error_queue = manager.Queue() repeat_threads = max(1, args.threads // len(job_chunk)) orig_sigint = signal.signal(signal.SIGINT, signal.SIG_IGN) threads = [] for rep_id in sorted(job_chunk): func_args = (rep_id, repeat_edges, all_edge_headers, args, trestle_dir, repeats_info, all_file_names, repeat_threads) log_file = os.path.join(trestle_dir, "repeat_{0}".format(rep_id), "log.txt") threads.append(multiprocessing.Process(target=_thread_worker, args=(func_args, log_file, results_queue, error_queue))) signal.signal(signal.SIGINT, orig_sigint) for t in threads: t.start() try: for t in threads: t.join() if t.exitcode == -9: logger.error("Looks like the system ran out of memory") if t.exitcode != 0: raise Exception("One of the processes exited with code: {0}" .format(t.exitcode)) except KeyboardInterrupt: for t in threads: t.terminate() raise while not error_queue.empty(): logger.warning("Non-critical error in trestle thread: " + str(error_queue.get())) #if not error_queue.empty(): # raise error_queue.get() while not results_queue.empty(): resolved_dict, summary_list = results_queue.get() all_resolved_reps_dict.update(resolved_dict) all_summaries.extend(summary_list) fp.write_fasta_dict(all_resolved_reps_dict, resolved_repeats_seqs) num_resolved = 0 for summ_items in all_summaries: if summ_items[6]: num_resolved += 1 update_summary(summ_items, summ_file) logger.info("Resolved: %d", num_resolved) def resolve_each_repeat(rep_id, repeat_edges, all_edge_headers, args, trestle_dir, repeats_info, all_file_names, num_threads): SUB_THRESH = trestle_config.vals["sub_thresh"] DEL_THRESH = trestle_config.vals["del_thresh"] INS_THRESH = trestle_config.vals["ins_thresh"] MAX_ITER = trestle_config.vals["max_iter"] MIN_ALN_RATE = trestle_config.vals["min_aln_rate"] NUM_POL_ITERS = trestle_config.vals["num_pol_iters"] ORIENT_CONFIG = trestle_config.vals["orientations_to_run"] zero_it = 0 (all_labels, pol_dir_names, initial_file_names, pre_file_names, div_file_names, aln_names, middle_file_names, output_file_names) = all_file_names repeat_label, side_labels = all_labels pol_temp_name, pol_ext_name, pol_cons_name = pol_dir_names (template_name, extended_name, repeat_reads_name, pre_partitioning_name) = initial_file_names pre_edge_reads_name, pre_read_aln_name, partitioning_name = pre_file_names div_freq_name, div_pos_name, div_summ_name = div_file_names (reads_template_aln_name, cons_temp_aln_name, cut_cons_temp_aln_name, reads_cons_aln_name) = aln_names (confirmed_pos_name, edge_reads_name, cut_cons_name, cons_vs_cons_name) = middle_file_names (side_stats_name, int_stats_name, int_confirmed_pos_name, resolved_rep_name, res_vs_res_name) = output_file_names logger.info("Resolving repeat %d: %s", rep_id, repeats_info[rep_id].repeat_path) repeat_dir = os.path.join(trestle_dir, repeat_label.format(rep_id)) run_orientations = [] if ORIENT_CONFIG == "forward": run_orientations = [("forward", rep_id)] elif ORIENT_CONFIG == "reverse": run_orientations = [("reverse", -rep_id)] elif ORIENT_CONFIG == "both": run_orientations = [("forward", rep_id), ("reverse", -rep_id)] repeat_bridged = False resolved_dict = {} summary_list = [] for orientation, rep in run_orientations: logger.debug("Orientation: " + orientation) orient_dir = os.path.join(repeat_dir, orientation) template = os.path.join(orient_dir, template_name) extended = os.path.join(orient_dir, extended_name) repeat_reads = os.path.join(orient_dir, repeat_reads_name) term_bool = {s:False for s in side_labels} #2. Polish template and extended templates logger.debug("Polishing templates") pol_temp_dir = os.path.join(orient_dir, pol_temp_name) if not os.path.isdir(pol_temp_dir): os.mkdir(pol_temp_dir) polished_template, _ = \ pol.polish(template, [repeat_reads], pol_temp_dir, NUM_POL_ITERS, num_threads, args.platform, output_progress=False) if not os.path.getsize(polished_template): for side in side_labels: term_bool[side] = True polished_extended = {} pol_ext_dir = os.path.join(orient_dir, pol_ext_name) for side in side_labels: for edge_id in repeat_edges[rep][side]: if not os.path.isdir(pol_ext_dir.format(side, edge_id)): os.mkdir(pol_ext_dir.format(side, edge_id)) pol_output, _ = \ pol.polish(extended.format(side, edge_id), [repeat_reads], pol_ext_dir.format(side, edge_id), NUM_POL_ITERS, num_threads, args.platform, output_progress=False) polished_extended[(side, edge_id)] = pol_output if not os.path.getsize(pol_output): term_bool[side] = True #3. Find divergent positions logger.debug("Estimating divergence") frequency_path = os.path.join(orient_dir, div_freq_name) position_path = os.path.join(orient_dir, div_pos_name) summary_path = os.path.join(orient_dir, div_summ_name) #logger.info("running Minimap2") alignment_file = os.path.join(orient_dir, reads_template_aln_name) template_len = 0.0 if os.path.getsize(polished_template): flye_aln.make_alignment(polished_template, [repeat_reads], num_threads, orient_dir, args.platform, alignment_file, reference_mode=True, sam_output=True) template_info = flye_aln.get_contigs_info(polished_template) template_len = template_info[str(rep)].length logger.debug("Finding tentative divergence positions") div.find_divergence(alignment_file, polished_template, template_info, frequency_path, position_path, summary_path, MIN_ALN_RATE, args.platform, num_threads, SUB_THRESH, DEL_THRESH, INS_THRESH) read_endpoints = find_read_endpoints(alignment_file, polished_template) avg_cov = find_coverage(frequency_path) #4. Initialize paths, variables, and stats pre_partitioning = os.path.join(orient_dir, pre_partitioning_name) pre_edge_reads = os.path.join(orient_dir, pre_edge_reads_name) pre_read_align = os.path.join(orient_dir, pre_read_aln_name) partitioning = os.path.join(orient_dir, partitioning_name) cons_align = os.path.join(orient_dir, cons_temp_aln_name) cut_cons_align = os.path.join(orient_dir, cut_cons_temp_aln_name) read_align = os.path.join(orient_dir, reads_cons_aln_name) confirmed_pos_path = os.path.join(orient_dir, confirmed_pos_name) edge_reads = os.path.join(orient_dir, edge_reads_name) cut_cons = os.path.join(orient_dir, cut_cons_name) polishing_dir = os.path.join(orient_dir, pol_cons_name) cons_vs_cons = os.path.join(orient_dir, cons_vs_cons_name) side_stats = os.path.join(orient_dir, side_stats_name) integrated_stats = os.path.join(orient_dir, int_stats_name) int_confirmed_path = os.path.join(orient_dir, int_confirmed_pos_name) resolved_rep_path = os.path.join(orient_dir, resolved_rep_name) res_vs_res = os.path.join(orient_dir, res_vs_res_name) #5. Re-align reads to extended and initialize partitioning 0 logger.debug("Checking initial set of edge reads") for side in side_labels: for edge_id in repeat_edges[rep][side]: write_edge_reads(zero_it, side, edge_id, repeat_reads, pre_partitioning.format(side), pre_edge_reads.format(side, edge_id)) flye_aln.make_alignment(polished_extended[(side, edge_id)], [pre_edge_reads.format(side, edge_id)], num_threads, orient_dir, args.platform, pre_read_align.format(side, edge_id), reference_mode=True, sam_output=True) init_partitioning(repeat_edges[rep][side], side, pre_partitioning.format(side), pre_read_align, polished_extended, partitioning.format(zero_it, side)) cut_consensus = {} side_it = {s:0 for s in side_labels} iter_pairs = [] edge_below_cov = {s:False for s in side_labels} dup_part = {s:False for s in side_labels} prev_partitionings = {s:set() for s in side_labels} #6. Initialize stats for side in side_labels: edge_below_cov[side] = init_side_stats( rep, side, repeat_edges, args.min_overlap, position_path, partitioning.format(zero_it, side), prev_partitionings[side], template_len, side_stats.format(side)) init_int_stats(rep, repeat_edges, zero_it, position_path, partitioning, repeat_reads, template_len, avg_cov, integrated_stats) #7. Start iterations logger.debug("Iterative procedure") for it in range(1, MAX_ITER + 1): both_break = True for side in side_labels: if (edge_below_cov[side] or dup_part[side] or term_bool[side]): continue else: logger.debug("Iteration %d, '%s'", it, side) both_break = False for edge_id in sorted(repeat_edges[rep][side]): #7a. Call consensus on partitioned reads pol_con_dir = polishing_dir.format( it, side, edge_id) curr_reads = edge_reads.format(it, side, edge_id) write_edge_reads( it, side, edge_id, repeat_reads, partitioning.format(it - 1, side), curr_reads) curr_extended = polished_extended[(side, edge_id)] logger.debug("\tPolishing '%s %s' reads", side, edge_id) if not os.path.isdir(pol_con_dir): os.mkdir(pol_con_dir) pol_con_out, _ = \ pol.polish(curr_extended, [curr_reads], pol_con_dir, NUM_POL_ITERS, num_threads, args.platform, output_progress=False) #7b. Cut consensus where coverage drops cutpoint = locate_consensus_cutpoint( side, read_endpoints, curr_reads) if os.path.getsize(pol_con_out): cons_al_file = cons_align.format(it, side, edge_id) flye_aln.make_alignment(polished_template, [pol_con_out], num_threads, orient_dir, args.platform, cons_al_file, reference_mode=True, sam_output=True) else: term_bool[side] = True curr_cut_cons = cut_cons.format(it, side, edge_id) cut_consensus[(it, side, edge_id)] = curr_cut_cons if os.path.isfile(cons_al_file): truncate_consensus(side, cutpoint, cons_al_file, polished_template, pol_con_out, curr_cut_cons) else: term_bool[side] = True #7c. Align consensuses to template # and reads to consensuses if os.path.isfile(curr_cut_cons): cut_cons_al_file = cut_cons_align.format(it, side, edge_id) flye_aln.make_alignment(polished_template, [curr_cut_cons], num_threads, orient_dir, args.platform, cut_cons_al_file, reference_mode=True, sam_output=True) read_al_file = read_align.format(it, side, edge_id) flye_aln.make_alignment(curr_cut_cons, [repeat_reads], num_threads, orient_dir, args.platform, read_al_file, reference_mode=True, sam_output=True) else: term_bool[side] = True #7d. Partition reads using divergent positions logger.debug("\tPartitioning '%s' reads", side) partition_reads(repeat_edges[rep][side], it, side, position_path, cut_cons_align, polished_template, read_align, cut_consensus, confirmed_pos_path, partitioning, all_edge_headers[rep]) #7e. Write stats file for current iteration edge_pairs = sorted(combinations(repeat_edges[rep][side], 2)) for edge_one, edge_two in edge_pairs: cons_one = cut_consensus[(it, side, edge_one)] cons_two = cut_consensus[(it, side, edge_two)] if (not os.path.isfile(cons_one) or not os.path.isfile(cons_two)): continue cons_cons_file = cons_vs_cons.format( it, side, edge_one, it, side, edge_two) flye_aln.make_alignment(cons_two, [cons_one], num_threads, orient_dir, args.platform, cons_cons_file, reference_mode=True, sam_output=True) side_stat_outputs = update_side_stats( repeat_edges[rep][side], it, side, cut_cons_align, polished_template, confirmed_pos_path.format(it, side), partitioning.format(it, side), prev_partitionings[side], side_stats.format(side)) edge_below_cov[side], dup_part[side] = side_stat_outputs side_it[side] = it iter_pairs.append((side_it[side_labels[0]], side_it[side_labels[1]])) update_int_stats(rep, repeat_edges, side_it, cut_cons_align, polished_template, template_len, confirmed_pos_path, int_confirmed_path, partitioning, integrated_stats) if both_break: break #8. Finalize stats files logger.debug("Writing stats files") for side in side_labels: finalize_side_stats(repeat_edges[rep][side], side_it[side], side, cut_cons_align, polished_template, cons_vs_cons, cut_consensus, confirmed_pos_path.format(side_it[side], side), partitioning.format(side_it[side], side), edge_below_cov[side], dup_part[side], term_bool[side], side_stats.format(side)) final_int_outputs = finalize_int_stats(rep, repeat_edges, side_it, cut_cons_align, polished_template, template_len, cons_vs_cons, cut_consensus, int_confirmed_path, partitioning, integrated_stats, resolved_rep_path) bridged, repeat_seqs, summ_vals = final_int_outputs #9. Generate summary and resolved repeat file logger.debug("Generating summary and resolved repeat file") avg_div = 0.0 both_resolved_present = False if bridged: res_inds = list(range(len(repeat_edges[rep]["in"]))) for res_one, res_two in sorted(combinations(res_inds, 2)): res_one_path = resolved_rep_path.format(rep, res_one) res_two_path = resolved_rep_path.format(rep, res_two) if (os.path.isfile(res_one_path) and os.path.isfile(res_two_path)): both_resolved_present = True repeat_bridged = True flye_aln.make_alignment(res_two_path, [res_one_path], num_threads, orient_dir, args.platform, res_vs_res.format(rep, res_one, res_two), reference_mode=True, sam_output=True) if both_resolved_present: avg_div = int_stats_postscript(rep, repeat_edges, integrated_stats, resolved_rep_path, res_vs_res) if both_resolved_present: resolved_dict.update(repeat_seqs) summary_list.append((rep, repeats_info[rep].repeat_path, template_len, avg_cov, summ_vals, avg_div, both_resolved_present)) remove_unneeded_files(repeat_edges, rep, side_labels, side_it, orient_dir, template, extended, pol_temp_dir, pol_ext_dir, pre_edge_reads, pre_partitioning, pre_read_align, partitioning, cons_align, cut_cons_align, read_align, confirmed_pos_path, edge_reads, cut_cons, polishing_dir, cons_vs_cons, int_confirmed_path, repeat_reads, frequency_path, alignment_file, NUM_POL_ITERS, iter_pairs) if repeat_bridged: logger.info("Repeat successfully resolved") else: logger.info("Repeat not resolved") return resolved_dict, summary_list def define_file_names(): #Defining directory and file names for trestle output repeat_label = "repeat_{0}" side_labels = ["in", "out"] all_labels = repeat_label, side_labels pol_temp_name = "Polishing.Template" pol_ext_name = "Polishing.Extended.{0}.{1}" pol_cons_name = "Polishing.Consensus.{0}.{1}.{2}" pol_dir_names = pol_temp_name, pol_ext_name, pol_cons_name template_name = "template.fasta" extended_name = "extended_templates.{0}.{1}.fasta" repeat_reads_name = "repeat_reads.fasta" pre_partitioning_name = "pre_partitioning.{0}.txt" initial_file_names = (template_name, extended_name, repeat_reads_name, pre_partitioning_name) pre_edge_reads_name = "pre_edge_reads.{0}.{1}.txt" pre_read_aln_name = "pre_edge_reads.{0}.{1}.vs.extended.minimap.bam" partitioning_name = "partitioning.{0}.{1}.txt" pre_file_names = pre_edge_reads_name, pre_read_aln_name, partitioning_name div_freq_name = "divergence_frequencies.txt" div_pos_name = "divergent_positions.txt" div_summ_name = "divergence_summary.txt" div_file_names = div_freq_name, div_pos_name, div_summ_name reads_template_aln_name = "reads.vs.template.minimap.bam" cons_temp_aln_name = "uncut_consensus.{0}.{1}.{2}.vs.template.minimap.bam" cut_cons_temp_aln_name = "consensus.{0}.{1}.{2}.vs.template.minimap.bam" reads_cons_aln_name = "reads.vs.consensus.{0}.{1}.{2}.minimap.bam" aln_names = (reads_template_aln_name, cons_temp_aln_name, cut_cons_temp_aln_name, reads_cons_aln_name) confirmed_pos_name = "confirmed_positions.{0}.{1}.txt" edge_reads_name = "edge_reads.{0}.{1}.{2}.fasta" cut_cons_name = "consensus.{0}.{1}.{2}.fasta" cons_vs_cons_name = "".join(["consensus.{0}.{1}.{2}.vs.", "consensus.{3}.{4}.{5}.minimap.bam"]) middle_file_names = (confirmed_pos_name, edge_reads_name, cut_cons_name, cons_vs_cons_name) side_stats_name = "stats_from_{0}.txt" int_stats_name = "stats_integrated.txt" int_confirmed_pos_name = "integrated_confirmed_positions.{0}.{1}.txt" resolved_rep_name = "resolved_repeat_{0}.copy.{1}.fasta" res_vs_res_name = "resolved_repeat_{0}.copy.{1}.vs.{2}.minimap.bam" output_file_names = (side_stats_name, int_stats_name, int_confirmed_pos_name, resolved_rep_name, res_vs_res_name) all_file_names = (all_labels, pol_dir_names, initial_file_names, pre_file_names, div_file_names, aln_names, middle_file_names, output_file_names) return all_file_names #Process Repeats functions class ProcessingException(Exception): pass def process_repeats(reads, repeats_dict, work_dir, all_labels, initial_file_names): """ Generates repeat dirs and files given reads, repeats_dump and graph_edges files. Only returns repeats between min_mult and max_mult """ if not repeats_dict: return [], {}, {} #creates a separate process to make sure that #read dictionary is released after the function exits manager = multiprocessing.Manager() return_queue = manager.Queue() orig_sigint = signal.signal(signal.SIGINT, signal.SIG_IGN) thread = multiprocessing.Process(target=_process_repeats_impl, args=(reads, repeats_dict, work_dir, all_labels, initial_file_names, return_queue)) signal.signal(signal.SIGINT, orig_sigint) thread.start() try: thread.join() if thread.exitcode == -9: logger.error("Looks like the system ran out of memory") if thread.exitcode != 0: raise Exception("One of the processes exited with code: {0}" .format(thread.exitcode)) except KeyboardInterrupt: thread.terminate() raise return return_queue.get() def _process_repeats_impl(reads, repeats_dict, work_dir, all_labels, initial_file_names, return_queue): """ This function is called in a separate process """ MIN_MULT = trestle_config.vals["min_mult"] MAX_MULT = trestle_config.vals["max_mult"] FLANKING_LEN = trestle_config.vals["flanking_len"] ORIENT_CONFIG = trestle_config.vals["orientations_to_run"] repeat_label, side_labels = all_labels (template_name, extended_name, repeat_reads_name, pre_partitioning_name) = initial_file_names reads_dict = {} for read_file in reads: reads_dict.update(fp.read_sequence_dict(read_file)) #orig_graph = fp.read_sequence_dict(graph_edges) #graph_dict = {int(h.split('_')[1]):orig_graph[h] for h in orig_graph} if not reads_dict: raise ProcessingException("No reads found from {0}".format(reads)) #if not graph_dict: # raise ProcessingException("No edges found from {0}".format( # graph_edges)) repeat_list = [] repeat_edges = {} all_edge_headers = {} for rep in sorted(repeats_dict, reverse=True): #Checks multiplicity of repeat and presence of reverse strand #One run processes both forward and reverse strand of repeat if rep <= 0: continue valid_repeat = True if -rep not in repeats_dict: logger.debug("Repeat %s missing reverse strand", rep) valid_repeat = False elif (repeats_dict[rep].multiplicity < MIN_MULT or repeats_dict[rep].multiplicity > MAX_MULT or repeats_dict[-rep].multiplicity < MIN_MULT or repeats_dict[-rep].multiplicity > MAX_MULT): logger.debug("Repeat %s multiplicity not in range: %s", rep, repeats_dict[rep].multiplicity) valid_repeat = False #if rep not in graph_dict: # logger.debug("Repeat {0} missing from graph file".format(rep)) # valid_repeat = False if not valid_repeat: continue #Makes repeat dirs repeat_dir = os.path.join(work_dir, repeat_label.format(rep)) if not os.path.isdir(repeat_dir): os.mkdir(repeat_dir) repeat_list.append(rep) run_orientations = [] if ORIENT_CONFIG == "forward": run_orientations = [("forward", rep)] elif ORIENT_CONFIG == "reverse": run_orientations = [("reverse", -rep)] elif ORIENT_CONFIG == "both": run_orientations = [("forward", rep), ("reverse", -rep)] for curr_label, curr_rep in run_orientations: orient_path = os.path.join(repeat_dir, curr_label) if not os.path.isdir(orient_path): os.mkdir(orient_path) template_path = os.path.join(orient_path, template_name) extended_path = os.path.join(orient_path, extended_name) repeat_reads_path = os.path.join(orient_path, repeat_reads_name) partitioning_path = os.path.join(orient_path, pre_partitioning_name) in_label = side_labels[0] out_label = side_labels[1] repeat_edges[curr_rep] = {in_label:[], out_label:[]} #(mult, all_reads_list, inputs_dict, # outputs_dict) = repeats_dict[curr_rep] #mult = repeats_dict[curr_rep].multiplicity all_reads_list = repeats_dict[curr_rep].all_reads inputs_dict = repeats_dict[curr_rep].in_reads outputs_dict = repeats_dict[curr_rep].out_reads template_dict = {} extended_dicts = {} repeat_reads_dict = {} #Partitioning parts: id_num, Partitioned/Tied/None, #edge_id, top_score, total_score, Header partitioning = {in_label:[], out_label:[]} read_id = 0 template_seq = repeats_dict[curr_rep].sequences["template"] #if curr_label == "reverse": # template_seq = fp.reverse_complement(graph_dict[rep]) template_dict[curr_rep] = template_seq all_edge_headers[curr_rep] = {} out_headers = set() #Headers will be in the form -h or +h, #edge_dict is in the form >[Input,Output]_edge##_h, #rev_comp of read will be written if the header is -h for edge_id in inputs_dict: repeat_edges[curr_rep][in_label].append(edge_id) extended_dicts[(in_label, edge_id)] = {} headers = inputs_dict[edge_id] for header in headers: if (not header) or (header[0] != '+' and header[0] != '-'): raise ProcessingException( "Input read format not recognized: {0}".format( header)) if header[1:] not in reads_dict: raise ProcessingException( "Read header {0} not in any of {1}".format( header[1:], reads)) if header[1:] not in all_edge_headers[curr_rep]: status_label = "Partitioned" edge_label = str(edge_id) score = 1 total_score = 0 partitioning[in_label].append((read_id, status_label, edge_label, score, total_score, header[1:])) all_edge_headers[curr_rep][header[1:]] = read_id read_id += 1 extend_in_header = "Extended_Template_Input_{0}".format( edge_id) #if edge_id > 0: # edge_seq = graph_dict[edge_id] #elif edge_id < 0: # edge_seq = fp.reverse_complement(graph_dict[-edge_id]) edge_seq = repeats_dict[curr_rep].sequences[edge_id] extended_seq = edge_seq[-FLANKING_LEN:] extended_dicts[(in_label, edge_id)][extend_in_header] = ( extended_seq + template_seq) for edge_id in outputs_dict: repeat_edges[curr_rep][out_label].append(edge_id) extended_dicts[(out_label, edge_id)] = {} headers = outputs_dict[edge_id] for header in headers: if (not header) or (header[0] != '+' and header[0] != '-'): raise ProcessingException( "Output read format not recognized: {0}".format( header)) if header[1:] not in reads_dict: raise ProcessingException( "Read header {0} not in any of {1}".format( header[1:], reads)) curr_read_id = read_id if header[1:] not in all_edge_headers[curr_rep]: status_label = "None" edge_label = "NA" score = 0 total_score = 0 partitioning[in_label].append((read_id, status_label, edge_label, score, total_score, header[1:])) all_edge_headers[curr_rep][header[1:]] = read_id read_id += 1 else: curr_read_id = all_edge_headers[curr_rep][header[1:]] if header[1:] not in out_headers: status_label = "Partitioned" edge_label = str(edge_id) score = 1 total_score = 0 partitioning[out_label].append((curr_read_id, status_label, edge_label, score, total_score, header[1:])) out_headers.add(header[1:]) extend_out_header = "Extended_Template_Output_{0}".format( edge_id) #if edge_id > 0: # edge_seq = graph_dict[edge_id] #elif edge_id < 0: # edge_seq = fp.reverse_complement(graph_dict[-edge_id]) edge_seq = repeats_dict[curr_rep].sequences[edge_id] extended_seq = edge_seq[:FLANKING_LEN] extended_dicts[(out_label, edge_id)][extend_out_header] = ( template_seq + extended_seq) #Need to reiterate over in_headers to add in_headers to #out-partitioning while avoiding double-adding ones in both for edge_id in inputs_dict: headers = inputs_dict[edge_id] for header in headers: if header[1:] not in out_headers: curr_read_id = all_edge_headers[curr_rep][header[1:]] status_label = "None" edge_label = "NA" score = 0 total_score = 0 partitioning[out_label].append((curr_read_id, status_label, edge_label, score, total_score, header[1:])) for header in all_reads_list: if (not header) or (header[0] != '+' and header[0] != '-'): raise ProcessingException( "All reads format not recognized: {0}".format(header)) if header[1:] not in reads_dict: raise ProcessingException( "Read header {0} not in any of {1}".format( header[1:], reads)) seq = reads_dict[header[1:]] if header[0] == '-': seq = fp.reverse_complement(seq) repeat_reads_dict[header[1:]] = seq curr_read_id = read_id if header[1:] not in all_edge_headers[curr_rep]: all_edge_headers[curr_rep][header[1:]] = read_id read_id += 1 status_label = "None" edge_label = "NA" score = 0 total_score = 0 partitioning[in_label].append((curr_read_id, status_label, edge_label, score, total_score, header[1:])) status_label = "None" edge_label = "NA" score = 0 total_score = 0 partitioning[out_label].append((curr_read_id, status_label, edge_label, score, total_score, header[1:])) if template_dict and list(template_dict.values())[0]: fp.write_fasta_dict(template_dict, template_path) for edge in extended_dicts: if extended_dicts[edge] and list(extended_dicts[edge].values())[0]: extended_edge_path = extended_path.format(edge[0], edge[1]) fp.write_fasta_dict(extended_dicts[edge], extended_edge_path) if repeat_reads_dict and list(repeat_reads_dict.values())[0]: fp.write_fasta_dict(repeat_reads_dict, repeat_reads_path) for side in side_labels: _write_partitioning_file(partitioning[side], partitioning_path.format(side)) if not template_dict: raise ProcessingException("No template {0} found".format( curr_rep)) for edge in extended_dicts: if not template_dict: raise ProcessingException( "No extended template {0} {1} {2} found".format( curr_rep, edge[0], edge[1])) if not repeat_reads_dict: raise ProcessingException("No repeat reads {0} found".format( curr_rep)) for side in side_labels: if not partitioning[side]: raise ProcessingException( "Empty partitioning file {0}".format( partitioning_path.format(side))) return_queue.put((repeat_list, repeat_edges, all_edge_headers)) def _write_partitioning_file(part_list, part_path): with open(part_path, "w") as f: header_labels = ["Read_ID", "Status", "Edge", "Top Score", "Total Score", "Header"] spaced_header = ["{:11}".format(h) for h in header_labels] f.write("\t".join(spaced_header)) f.write("\n") for read_label in sorted(part_list): spaced_label = ["{:11}".format(h) for h in read_label] f.write("\t".join(spaced_label)) f.write("\n") def _read_partitioning_file(partitioning_file): part_list = [] with open(partitioning_file, "r") as f: for i, line in enumerate(f): if i > 0: line = line.strip() tokens = [t.strip() for t in line.split("\t")] for int_ind in [0, 3, 4]: tokens[int_ind] = int(tokens[int_ind]) part_list.append(tuple(tokens)) return part_list def find_coverage(frequency_file): coverage = 0.0 if os.path.isfile(frequency_file): header, freqs = div.read_frequency_path(frequency_file) cov_ind = header.index("Cov") all_covs = [f[cov_ind] for f in freqs] coverage = _mean(all_covs) #print min(all_covs), _mean(all_covs), max(all_covs) return coverage def write_edge_reads(it, side, edge_id, all_reads, partitioning, out_file): all_reads_dict = fp.read_sequence_dict(all_reads) part_list = _read_partitioning_file(partitioning) edge_header_name = "Read_{0}|Iter_{1}|Side_{2}|Edge_{3}|{4}" edge_reads = {} for read_id, status, edge, _, _, header in part_list: if status == "Partitioned" and edge != "NA" and int(edge) == edge_id: edge_seq = all_reads_dict[header] edge_header = edge_header_name.format(read_id, it, side, edge_id, header) edge_reads[edge_header] = edge_seq if edge_reads and list(edge_reads.values())[0]: fp.write_fasta_dict(edge_reads, out_file) def init_partitioning(edges, side, pre_partitioning, pre_read_align, extended, partitioning): FLANKING_LEN = trestle_config.vals["flanking_len"] CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"] #dict from read_header to edge extend_overlap_reads = {} for edge in edges: non_overlap_reads = 0 aligns = _read_alignment(pre_read_align.format(side, edge), extended[(side, edge)], CONS_ALN_RATE) if aligns and aligns[0]: for aln in aligns[0]: edge_header = aln.qry_id read_header = edge_header.split("|")[-1] if ((side == "in" and aln.trg_start < FLANKING_LEN) or (side == "out" and aln.trg_end >= aln.trg_len - FLANKING_LEN)): extend_overlap_reads[read_header] = str(edge) else: non_overlap_reads += 1 logger.debug("Side %s, edge %s, non-overlap reads = %d", side, edge, non_overlap_reads) partitioned_reads = [] part_list = _read_partitioning_file(pre_partitioning) for read_id, _, edge, _, _, header in part_list: if header in extend_overlap_reads: partitioned_reads.append((read_id, "Partitioned", extend_overlap_reads[header], 1, 0, header)) else: partitioned_reads.append((read_id, "None", "NA", 0, 0, header)) _write_partitioning_file(partitioned_reads, partitioning) #Cut Consensus Functions def find_read_endpoints(alignment_file, template): CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"] read_endpoints = {} aligns = _read_alignment(alignment_file, template, CONS_ALN_RATE) if aligns and aligns[0]: for aln in aligns[0]: read_header = aln.qry_id start = aln.trg_start end = aln.trg_end if read_header not in read_endpoints: read_endpoints[read_header] = (start, end) else: logger.debug("No read alignment to template, no read_endpoints") return read_endpoints def locate_consensus_cutpoint(side, read_endpoints, edge_read_file): MIN_EDGE_COV = trestle_config.vals["min_edge_cov"] all_endpoints = [] max_endpoint = 0 edge_reads = fp.read_sequence_dict(edge_read_file) for edge_header in edge_reads: parts = edge_header.split("|") read_header = parts[-1] if read_header in read_endpoints: endpoint = read_endpoints[read_header] if max(endpoint) > max_endpoint: max_endpoint = max(endpoint) all_endpoints.append(endpoint) coverage = [0 for _ in range(max_endpoint + 1)] for start, end in all_endpoints: for x in range(start, end): coverage[x] += 1 window_len = 100 cutpoint = -1 for i in range(len(coverage) - window_len): if side == "in": window_start = (len(coverage) - window_len) - i window_end = len(coverage) - i if window_start < 0: window_start = 0 if window_end > len(coverage): window_end = len(coverage) avg_cov = _mean(coverage[window_start:window_end]) if avg_cov >= MIN_EDGE_COV: cutpoint = window_end break elif side == "out": window_start = i window_end = i + window_len if window_start < 0: window_start = 0 if window_end > len(coverage): window_end = len(coverage) avg_cov = _mean(coverage[window_start:window_end]) if avg_cov >= MIN_EDGE_COV: cutpoint = window_start break return cutpoint def truncate_consensus(side, cutpoint, cons_al_file, template, polished_consensus, cut_cons_file): if cutpoint == -1: logger.debug("No cutpoint for consensus file") return CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"] cons_al = _read_alignment(cons_al_file, template, CONS_ALN_RATE) consensus_endpoint = -1 if cons_al and cons_al[0]: consensus_endpoint = _find_consensus_endpoint(cutpoint, cons_al, side) else: logger.debug("No cons alignment to template, no cut consensus") return if consensus_endpoint != -1: cons_seqs = fp.read_sequence_dict(polished_consensus) cons_head = list(cons_seqs.keys())[0] consensus = list(cons_seqs.values())[0] if side == "in": start = 0 end = consensus_endpoint elif side == "out": start = consensus_endpoint end = len(consensus) cut_head = "".join([cons_head, "|{0}_{1}".format(start, end)]) cut_dict = {cut_head:consensus[start:end]} fp.write_fasta_dict(cut_dict, cut_cons_file) def _find_consensus_endpoint(cutpoint, aligns, side): consensus_endpoint = -1 #first try collapsing coll_aln = _collapse_cons_aln(aligns) if cutpoint >= coll_aln.trg_start and cutpoint < coll_aln.trg_end: trg_aln, _ = _index_mapping(coll_aln.trg_seq) _, aln_qry = _index_mapping(coll_aln.qry_seq) cutpoint_minus_start = cutpoint - coll_aln.trg_start aln_ind = trg_aln[cutpoint_minus_start] qry_ind = aln_qry[aln_ind] consensus_endpoint = qry_ind + coll_aln.qry_start else: #otherwise try each alignment MIN_SUPP_ALN_LEN = trestle_config.vals["min_supp_align_len"] #save tuples of cutpoint distance, cutpoint aln_endpoints = [] for i, aln in enumerate(aligns[0]): if i == 0 or len(aln.trg_seq) >= MIN_SUPP_ALN_LEN: if cutpoint >= aln.trg_start and cutpoint < aln.trg_end: trg_aln, _ = _index_mapping(aln.trg_seq) _, aln_qry = _index_mapping(aln.qry_seq) cutpoint_minus_start = cutpoint - aln.trg_start if cutpoint_minus_start < 0: logger.warning("%s %s %s %s %s", aln.qry_id, aln.trg_id, side, cutpoint, cutpoint_minus_start) aln_ind = trg_aln[0] elif cutpoint_minus_start >= len(trg_aln): logger.warning("%s %s %s %s %s", aln.qry_id, aln.trg_id, side, cutpoint, cutpoint_minus_start) aln_ind = trg_aln[-1] else: aln_ind = trg_aln[cutpoint_minus_start] qry_ind = aln_qry[aln_ind] endpoint = qry_ind + coll_aln.qry_start aln_endpoints.append((0, endpoint)) elif side == "in" and cutpoint >= aln.trg_end: endpoint = aln.qry_end distance = cutpoint - aln.trg_end aln_endpoints.append((distance, endpoint)) elif side == "out" and cutpoint < aln.trg_start: endpoint = aln.qry_start distance = aln.trg_start - cutpoint aln_endpoints.append((distance, endpoint)) if aln_endpoints: consensus_endpoint = sorted(aln_endpoints)[0][1] return consensus_endpoint #Partition Reads Functions def partition_reads(edges, it, side, position_path, cons_align_path, template, read_align_path, consensuses, confirmed_pos_path, part_file, headers_to_id): CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"] BUFFER_COUNT = trestle_config.vals["buffer_count"] skip_bool = False _, pos = div.read_positions(position_path) cons_aligns = {} for edge_id in edges: if not os.path.isfile(cons_align_path.format(it, side, edge_id)): skip_bool = True else: cons_aligns[edge_id] = _read_alignment(cons_align_path.format(it, side, edge_id), template, CONS_ALN_RATE) if (skip_bool or not cons_aligns or not cons_aligns[edge_id] or not cons_aligns[edge_id][0]): logger.debug("No cons alignment found for edge %s", edge_id) skip_bool = True if skip_bool: if it <= 1: confirmed_pos = {"total":[], "sub":[], "ins":[], "del":[]} rejected_pos = {"total":[], "sub":[], "ins":[], "del":[]} consensus_pos = pos else: previous_pos = _read_confirmed_positions( confirmed_pos_path.format(it - 1, side)) confirmed_pos, rejected_pos, consensus_pos = previous_pos else: #collapse multiple consensus alignments to the template coll_cons_aligns = {} for edge_id in cons_aligns: aln = cons_aligns[edge_id] coll_cons_aligns[edge_id] = _collapse_cons_aln(aln) curr_pos = _evaluate_positions(pos, coll_cons_aligns, side) confirmed_pos, rejected_pos, consensus_pos = curr_pos _write_confirmed_positions(confirmed_pos, rejected_pos, pos, confirmed_pos_path.format(it, side)) read_aligns = {} for edge_id in edges: if (not os.path.isfile(read_align_path.format(it, side, edge_id)) or not os.path.isfile(consensuses[(it, side, edge_id)])): skip_bool = True elif not skip_bool: read_aligns[edge_id] = _read_alignment( read_align_path.format(it, side, edge_id), consensuses[(it, side, edge_id)], CONS_ALN_RATE) if (skip_bool or not read_aligns or not read_aligns[edge_id] or not read_aligns[edge_id][0]): logger.debug("No read alignment found for edge %s", edge_id) skip_bool = True if skip_bool: partitioning = _read_partitioning_file(part_file.format(it - 1, side)) else: partitioning = _classify_reads(read_aligns, consensus_pos, headers_to_id, BUFFER_COUNT) _write_partitioning_file(partitioning, part_file.format(it, side)) def _read_alignment(alignment, target_path, min_aln_rate): alignments = [] aln_reader = SynchronizedSamReader(alignment, fp.read_sequence_dict(target_path), config.vals["max_read_coverage"]) while not aln_reader.is_eof(): ctg_id, ctg_aln = aln_reader.get_chunk() if ctg_id is None: break alignments.append(ctg_aln) aln_reader.close() return alignments def _collapse_cons_aln(cons_aligns): MAX_SUPP_ALIGN_OVERLAP = trestle_config.vals["max_supp_align_overlap"] coll_aln = None for aln in cons_aligns[0]: if coll_aln is None: coll_aln = aln elif _overlap(coll_aln, aln) <= MAX_SUPP_ALIGN_OVERLAP: coll_aln = _collapse(coll_aln, aln) return coll_aln def _overlap(aln_one, aln_two): qry_overlap_lens = [] if (aln_one.qry_start >= aln_two.qry_start and aln_one.qry_start < aln_two.qry_end): if aln_one.qry_end >= aln_two.qry_end: qry_overlap_lens.append(aln_two.qry_end - aln_one.qry_start) else: qry_overlap_lens.append(aln_one.qry_end - aln_one.qry_start) if (aln_one.qry_end > aln_two.qry_start and aln_one.qry_end <= aln_two.qry_end): if aln_one.qry_start <= aln_two.qry_start: qry_overlap_lens.append(aln_one.qry_end - aln_two.qry_start) else: qry_overlap_lens.append(aln_one.qry_end - aln_one.qry_start) if (aln_two.qry_start >= aln_one.qry_start and aln_two.qry_start < aln_one.qry_end): if aln_two.qry_end >= aln_one.qry_end: qry_overlap_lens.append(aln_one.qry_end - aln_two.qry_start) else: qry_overlap_lens.append(aln_two.qry_end - aln_two.qry_start) if (aln_two.qry_end > aln_one.qry_start and aln_two.qry_end <= aln_one.qry_end): if aln_two.qry_start <= aln_one.qry_start: qry_overlap_lens.append(aln_two.qry_end - aln_one.qry_start) else: qry_overlap_lens.append(aln_two.qry_end - aln_two.qry_start) qry_len = 0 if qry_overlap_lens: qry_len = min(qry_overlap_lens) trg_overlap_lens = [] if (aln_one.trg_start >= aln_two.trg_start and aln_one.trg_start < aln_two.trg_end): if aln_one.trg_end >= aln_two.trg_end: trg_overlap_lens.append(aln_two.trg_end - aln_one.trg_start) else: trg_overlap_lens.append(aln_one.trg_end - aln_one.trg_start) if (aln_one.trg_end > aln_two.trg_start and aln_one.trg_end <= aln_two.trg_end): if aln_one.trg_start <= aln_two.trg_start: trg_overlap_lens.append(aln_one.trg_end - aln_two.trg_start) else: trg_overlap_lens.append(aln_one.trg_end - aln_one.trg_start) if (aln_two.trg_start >= aln_one.trg_start and aln_two.trg_start < aln_one.trg_end): if aln_two.trg_end >= aln_one.trg_end: trg_overlap_lens.append(aln_one.trg_end - aln_two.trg_start) else: trg_overlap_lens.append(aln_two.trg_end - aln_two.trg_start) if (aln_two.trg_end > aln_one.trg_start and aln_two.trg_end <= aln_one.trg_end): if aln_two.trg_start <= aln_one.trg_start: trg_overlap_lens.append(aln_two.trg_end - aln_one.trg_start) else: trg_overlap_lens.append(aln_two.trg_end - aln_two.trg_start) trg_len = 0 if trg_overlap_lens: trg_len = min(trg_overlap_lens) return max([qry_len, trg_len]) def _collapse(aln_one, aln_two): MAX_SUPP_ALIGN_OVERLAP = trestle_config.vals["max_supp_align_overlap"] out_aln = copy.deepcopy(aln_one) if (aln_one.qry_sign == "-" or aln_two.qry_sign == "-" or _overlap(aln_one, aln_two) > MAX_SUPP_ALIGN_OVERLAP): return out_aln if (aln_one.qry_start <= aln_two.qry_start and aln_one.trg_start <= aln_two.trg_start): qry_merge_outs = _merge_alns(aln_one.qry_start, aln_one.qry_end, aln_one.qry_seq, aln_two.qry_start, aln_two.qry_end, aln_two.qry_seq) one_qry_seq, two_qry_seq, out_qry_end = qry_merge_outs trg_merge_outs = _merge_alns(aln_one.trg_start, aln_one.trg_end, aln_one.trg_seq, aln_two.trg_start, aln_two.trg_end, aln_two.trg_seq) one_trg_seq, two_trg_seq, out_trg_end = trg_merge_outs fill_qry = "" fill_trg = "" qry_lens = len(one_qry_seq) + len(two_qry_seq) trg_lens = len(one_trg_seq) + len(two_trg_seq) if qry_lens > trg_lens: diff = qry_lens - trg_lens fill_trg = "-" * diff elif trg_lens > qry_lens: diff = trg_lens - qry_lens fill_qry = "-" * diff out_qry_seq = "".join([one_qry_seq, fill_qry, two_qry_seq]) out_trg_seq = "".join([one_trg_seq, fill_trg, two_trg_seq]) out_err_rate = ((aln_one.err_rate * len(aln_one.trg_seq) + aln_two.err_rate * len(aln_two.trg_seq)) / (len(aln_one.trg_seq) + len(aln_two.trg_seq))) out_aln = Alignment(aln_one.qry_id, aln_one.trg_id, aln_one.qry_start, out_qry_end, aln_one.qry_sign, aln_one.qry_len, aln_one.trg_start, out_trg_end, aln_one.trg_sign, aln_one.trg_len, out_qry_seq, out_trg_seq, out_err_rate, is_secondary=False) return out_aln elif (aln_two.qry_start <= aln_one.qry_start and aln_two.trg_start <= aln_one.trg_start): qry_merge_outs = _merge_alns(aln_two.qry_start, aln_two.qry_end, aln_two.qry_seq, aln_one.qry_start, aln_one.qry_end, aln_one.qry_seq) two_qry_seq, one_qry_seq, out_qry_end = qry_merge_outs trg_merge_outs = _merge_alns(aln_two.trg_start, aln_two.trg_end, aln_two.trg_seq, aln_one.trg_start, aln_one.trg_end, aln_one.trg_seq) two_trg_seq, one_trg_seq, out_trg_end = trg_merge_outs fill_qry = "" fill_trg = "" qry_lens = len(two_qry_seq) + len(one_qry_seq) trg_lens = len(two_trg_seq) + len(one_trg_seq) if qry_lens > trg_lens: diff = qry_lens - trg_lens fill_trg = "-" * diff elif trg_lens > qry_lens: diff = trg_lens - qry_lens fill_qry = "-" * diff out_qry_seq = "".join([two_qry_seq, fill_qry, one_qry_seq]) out_trg_seq = "".join([two_trg_seq, fill_trg, one_trg_seq]) out_err_rate = ((aln_one.err_rate * len(aln_one.trg_seq) + aln_two.err_rate * len(aln_two.trg_seq)) / (len(aln_one.trg_seq) + len(aln_two.trg_seq))) out_aln = Alignment(aln_one.qry_id, aln_one.trg_id, aln_two.qry_start, out_qry_end, aln_one.qry_sign, aln_one.qry_len, aln_two.trg_start, out_trg_end, aln_one.trg_sign, aln_one.trg_len, out_qry_seq, out_trg_seq, out_err_rate, is_secondary=False) return out_aln return out_aln def _merge_alns(first_start, first_end, first_seq, second_start, second_end, second_seq): first_out_seq = first_seq second_out_seq = second_seq out_end = second_end if first_end <= second_start: fill_qry_seq = "N" * (second_start - first_end) first_out_seq = "".join([first_seq, fill_qry_seq]) second_out_seq = second_seq else: if first_end < second_end: overlap = first_end - second_start two_cut_ind = _overlap_to_aln_ind(overlap, second_seq) first_out_seq = first_seq second_out_seq = second_seq[two_cut_ind:] else: first_out_seq = first_seq second_out_seq = "" out_end = first_end return first_out_seq, second_out_seq, out_end def _overlap_to_aln_ind(overlap, aln): num_bases = 0 for i, base in enumerate(aln): if base != "-": num_bases += 1<|fim▁hole|> class EdgeAlignment(object): __slots__ = ("edge_id", "qry_seq", "trg_seq", "qry_start", "trg_start", "trg_end", "in_alignment", "curr_aln_ind", "curr_qry_ind", "curr_qry_nuc", "curr_trg_nuc", "curr_ins_nuc") def __init__(self, edge_id, qry_seq, trg_seq, qry_start, trg_start, trg_end): self.edge_id = edge_id self.qry_seq = flye_aln.shift_gaps(trg_seq, qry_seq) self.trg_seq = flye_aln.shift_gaps(self.qry_seq, trg_seq) self.qry_start = qry_start self.trg_start = trg_start self.trg_end = trg_end self.in_alignment = False self.curr_aln_ind = -1 self.curr_qry_ind = -1 self.curr_qry_nuc = "" self.curr_trg_nuc = "" self.curr_ins_nuc = "" def reset_nucs(self): self.curr_qry_nuc = "" self.curr_trg_nuc = "" self.curr_ins_nuc = "" def _evaluate_positions(pos, cons_aligns, side): #Includes insertions! confirmed_pos = {"total":[], "sub":[], "ins":[], "del":[]} rejected_pos = {"total":[], "sub":[], "ins":[], "del":[]} consensus_pos = {e:[] for e in cons_aligns} alns = {} for edge_id in cons_aligns: orig_aln = cons_aligns[edge_id] alns[edge_id] = EdgeAlignment(edge_id, orig_aln.qry_seq, orig_aln.trg_seq, orig_aln.qry_start, orig_aln.trg_start, orig_aln.trg_end) min_start_edge = min([alns[e].trg_start for e in alns]) max_end_edge = max([alns[e].trg_end for e in alns]) #end indices for conservatively defining confirmed positions min_end_edge = min([alns[e].trg_end for e in alns]) max_start_edge = max([alns[e].trg_start for e in alns]) for trg_ind in range(min_start_edge, max_end_edge): for edge_id in alns: aln = alns[edge_id] if aln.trg_start == trg_ind: aln.curr_aln_ind = 0 aln.curr_qry_ind = aln.qry_start aln.in_alignment = True if aln.trg_start > trg_ind or aln.trg_end <= trg_ind: aln.in_alignment = False if aln.in_alignment: while aln.trg_seq[aln.curr_aln_ind] == "-": if aln.qry_seq[aln.curr_aln_ind] != "-": aln.curr_ins_nuc += aln.qry_seq[aln.curr_aln_ind] aln.curr_qry_ind += 1 aln.curr_aln_ind += 1 aln.curr_qry_nuc = aln.qry_seq[aln.curr_aln_ind] aln.curr_trg_nuc = aln.trg_seq[aln.curr_aln_ind] if trg_ind in pos["total"]: if ((side == "in" and trg_ind < min_end_edge) or (side == "out" and trg_ind >= max_start_edge)): ins_confirmed = False del_confirmed = False sub_confirmed = False qry_nuc = "" trg_nuc = "" for edge_id in alns: aln = alns[edge_id] if aln.in_alignment: #Directly add positions only to consensuses # where insertions occur #Add the position prior to curr_qry_ind to # account for insertion if aln.curr_ins_nuc: ins_confirmed = True consensus_pos[edge_id].append(aln.curr_qry_ind - 1) if qry_nuc and qry_nuc != aln.curr_qry_nuc: if qry_nuc != "N" and aln.curr_qry_nuc != "N": if qry_nuc == "-": del_confirmed = True else: sub_confirmed = True else: qry_nuc = aln.curr_qry_nuc if (trg_nuc and trg_nuc != aln.curr_trg_nuc and trg_nuc != "N" and aln.curr_trg_nuc != "N"): logger.debug("Inconsistent trg_nuc, %s %s %s %s", edge_id, trg_ind, trg_nuc, aln.curr_trg_nuc) trg_nuc = aln.curr_trg_nuc if ins_confirmed or del_confirmed or sub_confirmed: confirmed_pos["total"].append(trg_ind) #Add positions to consensuses for only subs/deletions if del_confirmed or sub_confirmed: for edge_id in alns: aln = alns[edge_id] if aln.in_alignment: consensus_pos[edge_id].append(aln.curr_qry_ind) if trg_ind in pos["ins"]: if ins_confirmed: confirmed_pos["ins"].append(trg_ind) else: rejected_pos["ins"].append(trg_ind) if trg_ind in pos["del"]: if del_confirmed: confirmed_pos["del"].append(trg_ind) else: rejected_pos["del"].append(trg_ind) if trg_ind in pos["sub"]: if sub_confirmed: confirmed_pos["sub"].append(trg_ind) else: rejected_pos["sub"].append(trg_ind) else: rejected_pos["total"].append(trg_ind) if trg_ind in pos["ins"]: rejected_pos["ins"].append(trg_ind) if trg_ind in pos["del"]: rejected_pos["del"].append(trg_ind) if trg_ind in pos["sub"]: rejected_pos["sub"].append(trg_ind) for edge_id in alns: aln = alns[edge_id] if aln.in_alignment: if aln.qry_seq[aln.curr_aln_ind] != "-": aln.curr_qry_ind += 1 aln.curr_aln_ind += 1 aln.reset_nucs() return confirmed_pos, rejected_pos, consensus_pos def _write_confirmed_positions(confirmed, rejected, pos, out_file): with open(out_file, 'w') as f: f.write(">Confirmed_total_positions_{0}\n" .format(len(confirmed["total"]))) f.write(",".join([str(x) for x in sorted(confirmed["total"])]) + "\n") f.write(">Confirmed_sub_positions_{0}\n".format(len(confirmed["sub"]))) f.write(",".join([str(x) for x in sorted(confirmed["sub"])]) + "\n") f.write(">Confirmed_del_positions_{0}\n".format(len(confirmed["del"]))) f.write(",".join([str(x) for x in sorted(confirmed["del"])]) + "\n") f.write(">Confirmed_ins_positions_{0}\n".format(len(confirmed["ins"]))) f.write(",".join([str(x) for x in sorted(confirmed["ins"])]) + "\n") f.write(">Rejected_total_positions_{0}\n".format(len(rejected["total"]))) f.write(",".join([str(x) for x in sorted(rejected["total"])]) + "\n") f.write(">Rejected_sub_positions_{0}\n".format(len(rejected["sub"]))) f.write(",".join([str(x) for x in sorted(rejected["sub"])]) + "\n") f.write(">Rejected_del_positions_{0}\n".format(len(rejected["del"]))) f.write(",".join([str(x) for x in sorted(rejected["del"])])+ "\n") f.write(">Rejected_ins_positions_{0}\n".format(len(rejected["ins"]))) f.write(",".join([str(x) for x in sorted(rejected["ins"])]) + "\n") f.write(">Tentative_total_positions_{0}\n".format(len(pos["total"]))) f.write(",".join([str(x) for x in sorted(pos["total"])]) + "\n") f.write(">Tentative_sub_positions_{0}\n".format(len(pos["sub"]))) f.write(",".join([str(x) for x in sorted(pos["sub"])]) + "\n") f.write(">Tentative_del_positions_{0}\n".format(len(pos["del"]))) f.write(",".join([str(x) for x in sorted(pos["del"])]) + "\n") f.write(">Tentative_ins_positions_{0}\n".format(len(pos["ins"]))) f.write(",".join([str(x) for x in sorted(pos["ins"])]) + "\n") def _read_confirmed_positions(confirmed_file): confirmed = {"total":[], "sub":[], "ins":[], "del":[]} rejected = {"total":[], "sub":[], "ins":[], "del":[]} pos = {"total":[], "sub":[], "ins":[], "del":[]} with open(confirmed_file, "r") as f: for i, line in enumerate(f): line = line.strip() if i == 1 and line: confirmed["total"] = [int(x) for x in line.split(",")] elif i == 3 and line: confirmed["sub"] = [int(x) for x in line.split(",")] elif i == 5 and line: confirmed["del"] = [int(x) for x in line.split(",")] elif i == 7 and line: confirmed["ins"] = [int(x) for x in line.split(",")] elif i == 9 and line: rejected["total"] = [int(x) for x in line.split(",")] elif i == 11 and line: rejected["sub"] = [int(x) for x in line.split(",")] elif i == 13 and line: rejected["del"] = [int(x) for x in line.split(",")] elif i == 15 and line: rejected["ins"] = [int(x) for x in line.split(",")] elif i == 17 and line: pos["total"] = [int(x) for x in line.split(",")] elif i == 19 and line: pos["sub"] = [int(x) for x in line.split(",")] elif i == 21 and line: pos["del"] = [int(x) for x in line.split(",")] elif i == 23 and line: pos["ins"] = [int(x) for x in line.split(",")] return confirmed, rejected, pos def _classify_reads(read_aligns, consensus_pos, headers_to_id, buffer_count): #Includes insertion positions where an insertion occurs right before the #position for the read. #partitioning format same as above: #list of (read_id, status, edge_id, top_score, total_score, header) partitioning = [] read_scores = {} for edge_id in read_aligns: read_counts = {} for aln in read_aligns[edge_id][0]: read_header = aln.qry_id cons_header = aln.trg_id #Unmapped segments will not be scored if cons_header == "*": continue if read_header not in read_scores: read_scores[read_header] = {} read_scores[read_header][edge_id] = 0 if read_header not in read_counts: read_counts[read_header] = 1 else: read_counts[read_header] += 1 #Any alignments after the first supplementary will not be scored if read_counts[read_header] > 2: continue positions = consensus_pos[edge_id] trg_aln, _ = _index_mapping(aln.trg_seq) for pos in positions: if pos >= aln.trg_start and pos < aln.trg_end: pos_minus_start = pos - aln.trg_start aln_ind = trg_aln[pos_minus_start] if aln.qry_seq[aln_ind] == aln.trg_seq[aln_ind]: read_scores[read_header][edge_id] += 1 #Iterate through all read_headers so partitioning will be a complete set for read_header in headers_to_id: read_id = headers_to_id[read_header] if read_header in read_scores: tie_bool = False top_edge = 0 top_score = 0 total_score = 0 for edge_id in read_scores[read_header]: edge_score = read_scores[read_header][edge_id] #print edge_id, edge_score, top_score if edge_score - buffer_count > top_score: top_edge = edge_id top_score = edge_score tie_bool = False elif (edge_score - buffer_count <= top_score and edge_score >= top_score): top_score = edge_score tie_bool = True elif (edge_score >= top_score - buffer_count and edge_score < top_score): tie_bool = True total_score += edge_score if total_score == 0: status_label = "None" edge_label = "NA" elif tie_bool: status_label = "Tied" edge_label = "NA" else: status_label = "Partitioned" edge_label = str(top_edge) partitioning.append((read_id, status_label, edge_label, top_score, total_score, read_header)) else: status_label = "None" edge_label = "NA" top_score = 0 total_score = 0 partitioning.append((read_id, status_label, edge_label, top_score, total_score, read_header)) return partitioning def _index_mapping(aln): #Given a genomic index, return the alignment index of the alignment al_inds = [] #Given an alignment index, return the genomic index at that position gen_inds = [] for i,b in enumerate(aln): gen_inds.append(len(al_inds)) if b != '-': al_inds.append(i) return al_inds, gen_inds def init_side_stats(rep, side, repeat_edges, min_overlap, position_path, partitioning, prev_parts, template_len, stats_file): SUB_THRESH = trestle_config.vals["sub_thresh"] DEL_THRESH = trestle_config.vals["del_thresh"] INS_THRESH = trestle_config.vals["ins_thresh"] FLANKING_LEN = trestle_config.vals["flanking_len"] BUFFER_COUNT = trestle_config.vals["buffer_count"] MAX_ITER = trestle_config.vals["max_iter"] MIN_EDGE_COV = trestle_config.vals["min_edge_cov"] CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"] _, pos = div.read_positions(position_path) #Count partitioned reads edge_below_cov = False part_list = _read_partitioning_file(partitioning) edge_reads, _, _ = _get_partitioning_info(part_list, repeat_edges[rep][side]) #Check break condition for iteration loop for edge in repeat_edges[rep][side]: if edge_reads[edge] < MIN_EDGE_COV: edge_below_cov = True prev_parts.add(tuple(part_list)) #Prepare header for iteration stats #Iter,Rep Lens,Confirmed/Rejected Pos,Partitioned Reads header_labels = ["Iter"] for edge in sorted(repeat_edges[rep][side]): header_labels.extend(["Rep Len {0}".format(edge)]) header_labels.extend(["Confirmed Pos", "Rejected Pos"]) for edge in sorted(repeat_edges[rep][side]): header_labels.extend(["#Reads {0}".format(edge)]) header_labels.extend(["#Tied", "#Unassigned"]) spaced_header = ["{:11}".format(h) for h in header_labels] #Write stats output with open(stats_file, 'w') as f: f.write("{0:25}\t{1}\n".format("Repeat:", rep)) f.write("{0:25}\t'{1}'\n".format("Side:", side)) f.write("{0:25}\t".format("Edges:")) f.write(", ".join([str(x) for x in sorted(repeat_edges[rep][side])]) + "\n") f.write("{0:25}\t{1}\n\n".format("Template Length:", template_len)) f.write("Initial Option Values\n") f.write("{0:25}\t{1}\n".format("min_overlap:", min_overlap)) f.write("{0:25}\t{1}\n".format("sub_thresh:", SUB_THRESH)) f.write("{0:25}\t{1}\n".format("del_thresh:", DEL_THRESH)) f.write("{0:25}\t{1}\n".format("ins_thresh:", INS_THRESH)) f.write("{0:25}\t{1}\n".format("flanking_len:", FLANKING_LEN)) f.write("{0:25}\t{1}\n".format("buffer_count:", BUFFER_COUNT)) f.write("{0:25}\t{1}\n".format("max_iter:", MAX_ITER)) f.write("{0:25}\t{1}\n".format("min_edge_cov:", MIN_EDGE_COV)) f.write("{0:25}\t{1}\n".format("cons_aln_rate:", CONS_ALN_RATE)) f.write("\n") f.write("The following numbers are calculated based on moving ") f.write("into the repeat from the '{0}' direction\n\n".format(side)) f.write("{0}\n".format("Divergent Positions:")) f.write("{0:25}\t{1}\n".format("Total", len(pos["total"]))) f.write("{0:25}\t{1}\n".format("Substitutions", len(pos["sub"]))) f.write("{0:25}\t{1}\n".format("Deletions", len(pos["del"]))) f.write("{0:25}\t{1}\n".format("Insertions", len(pos["ins"]))) f.write("\n") f.write("{0:25}\t{1}\n".format("Total Starting Reads:", sum(edge_reads.values()))) for edge in sorted(repeat_edges[rep][side]): f.write("{0}{1}{2:18}\t{3}\n".format("Edge ", edge, " starting reads:", edge_reads[edge])) f.write("\n\n") f.write("\t".join(spaced_header)) f.write("\n") return edge_below_cov def update_side_stats(edges, it, side, cons_align_path, template, confirmed_pos_path, partitioning, prev_parts, stats_file): CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"] MIN_EDGE_COV = trestle_config.vals["min_edge_cov"] #Write stats for each iteration #Iter,Rep Lens,Confirmed/Rejected Pos,Partitioned Reads stats_out = [str(it)] for edge_id in sorted(edges): rep_len = 0 if os.path.isfile(cons_align_path.format(it, side, edge_id)): cons_align = _read_alignment(cons_align_path.format(it, side, edge_id), template, CONS_ALN_RATE) if cons_align and cons_align[0]: if side == "in": rep_len = (cons_align[0][0].qry_len - cons_align[0][0].qry_start) elif side == "out": rep_len = cons_align[0][0].qry_end stats_out.extend([str(rep_len)]) confirmed_total = 0 rejected_total = 0 if it > 0: confirmed, rejected, _ = _read_confirmed_positions(confirmed_pos_path) confirmed_total = len(confirmed["total"]) rejected_total = len(rejected["total"]) stats_out.extend([str(confirmed_total), str(rejected_total)]) edge_below_cov = False dup_part = False part_list = _read_partitioning_file(partitioning) edge_reads, tied_reads, unassigned_reads = _get_partitioning_info(part_list, edges) for edge_id in sorted(edges): stats_out.extend([str(edge_reads[edge_id])]) stats_out.extend([str(tied_reads), str(unassigned_reads)]) #Check break conditions for iteration loop for edge in edges: if edge_reads[edge] < MIN_EDGE_COV: edge_below_cov = True if tuple(part_list) in prev_parts: dup_part = True else: prev_parts.add(tuple(part_list)) spaced_header = ["{:11}".format(x) for x in stats_out] with open(stats_file, "a") as f: f.write("\t".join(spaced_header)) f.write("\n") return edge_below_cov, dup_part def finalize_side_stats(edges, it, side, cons_align_path, template, cons_vs_cons_path, consensuses, confirmed_pos_path, partitioning, edge_below_cov, dup_part, term_bool, stats_file): CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"] MAX_ITER = trestle_config.vals["max_iter"] with open(stats_file, "a") as f: f.write("\n\n") f.write("{0:26}\t{1}\n\n".format("Final Iter:", it)) f.write("Iteration terminated because:\n") if it == MAX_ITER: f.write("Max iter reached\n") if edge_below_cov: f.write("Edge coverage fell below min_edge_cov\n") if dup_part: f.write("Partitioning was identical to a previous iteration\n") if term_bool: f.write("Encountered empty consensus sequence or alignment\n") f.write("\n") #Write out alignment indices for edges vs template limit_ind = None limit_label = "" if side == "in": limit_label = "Min Template End" elif side == "out": limit_label = "Max Template Start" for edge_id in sorted(edges): qry_start = 0 qry_end = 0 qry_len = 0 trg_start = 0 trg_end = 0 trg_len = 0 curr_cons_path = cons_align_path.format(it, side, edge_id) if os.path.isfile(curr_cons_path): cons_align = _read_alignment(curr_cons_path, template, CONS_ALN_RATE) if cons_align and cons_align[0]: #collapse multiple consensus alignments coll_cons = _collapse_cons_aln(cons_align) qry_start = coll_cons.qry_start qry_end = coll_cons.qry_end qry_len = coll_cons.qry_len trg_start = coll_cons.trg_start trg_end = coll_cons.trg_end trg_len = coll_cons.trg_len if limit_ind is None or ( (side == "in" and trg_end < limit_ind) or (side == "out" and trg_start >= limit_ind)): if side == "in": limit_ind = trg_end elif side == "out": limit_ind = trg_start f.write("Edge {0}|Template Alignment\n".format(edge_id)) f.write("{0}{1}{2:20}\t{3:5}-{4:5} of {5:5}\n".format( "Edge ", edge_id, ":", qry_start, qry_end, qry_len)) f.write("{0:26}\t{1:5}-{2:5} of {3:5}\n".format("Template:", trg_start, trg_end, trg_len)) f.write("\n") f.write("{0:26}\t{1}\n".format(limit_label, limit_ind)) f.write("(End of positions considered)\n\n") #Write out alignment indices for edges vs edges edge_pairs = sorted(combinations(edges, 2)) for edge_one, edge_two in edge_pairs: qry_start = 0 qry_end = 0 qry_len = 0 trg_start = 0 trg_end = 0 trg_len = 0 qry_seq = "" trg_seq = "" if (os.path.isfile(cons_vs_cons_path.format(it, side, edge_one, it, side, edge_two)) and os.path.isfile(consensuses[(it, side, edge_two)])): cons_vs_cons = _read_alignment(cons_vs_cons_path.format( it, side, edge_one, it, side, edge_two), consensuses[(it, side, edge_two)], CONS_ALN_RATE) if cons_vs_cons and cons_vs_cons[0]: qry_start = cons_vs_cons[0][0].qry_start qry_end = cons_vs_cons[0][0].qry_end qry_len = cons_vs_cons[0][0].qry_len trg_start = cons_vs_cons[0][0].trg_start trg_end = cons_vs_cons[0][0].trg_end trg_len = cons_vs_cons[0][0].trg_len qry_seq = cons_vs_cons[0][0].qry_seq trg_seq = cons_vs_cons[0][0].trg_seq f.write("Edge {0}|Edge {1} Alignment\n".format(edge_one, edge_two)) f.write("{0}{1}{2:20}\t{3:5}-{4:5} of {5:5}\n".format( "Edge ", edge_one, ":", qry_start, qry_end, qry_len)) f.write("{0}{1}{2:20}\t{3:5}-{4:5} of {5:5}\n".format( "Edge ", edge_two, ":", trg_start, trg_end, trg_len)) div_rate = _calculate_divergence(qry_seq, trg_seq) f.write("{0:26}\t{1:.4f}\n".format("Divergence Rate:", div_rate)) f.write("\n") #Write overall position stats types = ["total", "sub", "del", "ins"] confirmed = {t:[] for t in types} rejected = {t:[] for t in types} pos = {t:[] for t in types} if it > 0: confirmed_pos_output = _read_confirmed_positions(confirmed_pos_path) confirmed, rejected, pos = confirmed_pos_output if side == "in": largest_pos = -1 if confirmed["total"]: largest_pos = max(confirmed["total"]) f.write("{0:26}\t{1}\n".format("Largest Confirmed Position:", largest_pos)) elif side == "out": smallest_pos = -1 if confirmed["total"]: smallest_pos = min(confirmed["total"]) f.write("{0:26}\t{1}\n".format("Smallest Confirmed Position:", smallest_pos)) remainings = {} for typ in types: remainings[typ] = len(pos[typ]) - (len(confirmed[typ]) + len(rejected[typ])) type_strs = ["Total", "Sub", "Del", "Ins"] for typ, typ_str in zip(types, type_strs): confirmed_frac = 0.0 rejected_frac = 0.0 remaining_frac = 0.0 if len(pos[typ]) != 0: confirmed_frac = len(confirmed[typ]) / float(len(pos[typ])) rejected_frac = len(rejected[typ]) / float(len(pos[typ])) remaining_frac = remainings[typ] / float(len(pos[typ])) f.write("{0:26}\t{1}/{2} = {3:.3f}\n".format( "Confirmed {0} Positions:".format(typ_str), len(confirmed[typ]), len(pos[typ]), confirmed_frac)) f.write("{0:26}\t{1}/{2} = {3:.3f}\n".format( "Rejected {0} Positions:".format(typ_str), len(rejected[typ]), len(pos[typ]), rejected_frac)) f.write("{0:26}\t{1}/{2} = {3:.3f}\n".format( "Remaining {0} Positions:".format(typ_str), remainings[typ], len(pos[typ]), remaining_frac)) f.write("\n") f.write("\n") #Write overall partitioning stats part_list = _read_partitioning_file(partitioning) edge_reads = {edge:0 for edge in edges} tied_reads = 0 unassigned_reads = 0 total_reads = len(part_list) for _, status, edge, _, _, _ in part_list: if status == "Partitioned" and edge != "NA": edge_reads[int(edge)] += 1 elif status == "Tied": tied_reads += 1 elif status == "None": unassigned_reads += 1 else: exception_str = "Unknown status {0} in partitioning file {1}" raise Exception(exception_str.format(status, partitioning)) for edge_id in sorted(edges): f.write("{0}{1}{2:13}\t{3}/{4} = {5:.4f}\n".format( "Total Edge ", edge_id, " Reads:", edge_reads[edge_id], total_reads, edge_reads[edge_id] / float(total_reads))) f.write("{0:26}\t{1}/{2} = {3:.4f}\n".format("Total Tied Reads:", tied_reads, total_reads, tied_reads / float(total_reads))) f.write("{0:26}\t{1}/{2} = {3:.4f}\n".format("Total Unassigned Reads:", unassigned_reads, total_reads, unassigned_reads / float(total_reads))) f.write("\n") def init_int_stats(rep, repeat_edges, zero_it, position_path, partitioning, all_reads_file, template_len, cov, int_stats_file): #Count edge reads side_reads = {} total_reads = 0 all_side_reads = 0 internal_reads = 0 for side in sorted(repeat_edges[rep]): part_list = _read_partitioning_file(partitioning.format(zero_it, side)) total_reads = len(part_list) partitioning_outputs = _get_partitioning_info(part_list, repeat_edges[rep][side]) side_reads[side], _, _ = partitioning_outputs all_side_reads += sum(side_reads[side].values()) internal_reads = total_reads - all_side_reads all_reads_n50 = _n50(all_reads_file) #Prepare header for iterative integrated stats #in/out Iter,Mean in/out/gap Len,Confirmed/Rejected Pos,Bridging Reads header_labels = [] for side in sorted(repeat_edges[rep]): header_labels.extend(["{0} Iter".format(side)]) header_labels.extend(["in Len", "Gap Len", "out Len"]) header_labels.extend(["Confirmed", "Rejected"]) side_edges = [] for side in sorted(repeat_edges[rep]): side_edges.append([]) for edge in sorted(repeat_edges[rep][side]): side_edges[-1].append("{0}{1}".format(side,edge)) for edge_pair in sorted(product(*side_edges)): header_labels.extend(["{0}".format("|".join(edge_pair))]) spaced_header = ["{:8}".format(x) for x in header_labels] #Write to file with open(int_stats_file, 'w') as f: f.write("{0:16}\t{1}\n".format("Repeat:", rep)) f.write("{0:16}\t{1}\n".format("Template Length:", template_len)) f.write("{0:16}\t{1:.2f}\n".format("Avg Coverage:", cov)) f.write("{0:16}\t{1}\n".format("# All Reads:", total_reads)) f.write("{0:16}\t{1}\n\n".format("All Reads N50:", all_reads_n50)) edge_headers = ["Side", " Edge", "# Reads"] spaced_edge_header = ["{:5}".format(h) for h in edge_headers] f.write("\t".join(spaced_edge_header)) f.write("\n") for side in sorted(repeat_edges[rep]): for edge_id in sorted(repeat_edges[rep][side]): edge_values = [side, edge_id, side_reads[side][edge_id]] spaced_values = ["{:6}".format(h) for h in edge_values] f.write("\t".join(spaced_values)) f.write("\n") f.write("{0:12}\t {1}\n".format("Internal", internal_reads)) f.write("\n\n") f.write("\t".join(spaced_header)) f.write("\n") def update_int_stats(rep, repeat_edges, side_it, cons_align_path, template, template_len, confirmed_pos_path, int_confirmed_path, partitioning, int_stats_file): CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"] stats_out = [] #Add side iters for side in sorted(repeat_edges[rep]): stats_out.extend([str(side_it[side])]) #Find median in, out, and gap lengths medians = {s:0 for s in repeat_edges[rep]} for side in sorted(repeat_edges[rep]): trg_limits = [] for edge_id in sorted(repeat_edges[rep][side]): curr_cons_path = cons_align_path.format(side_it[side], side, edge_id) if os.path.isfile(curr_cons_path): cons_align = _read_alignment(curr_cons_path, template, CONS_ALN_RATE) if cons_align and cons_align[0]: if side == "in": trg_limits.append(cons_align[0][0].trg_end) elif side == "out": trg_limits.append(template_len - cons_align[0][0].trg_start) if trg_limits: medians[side] = _get_median(trg_limits) gap_len = template_len - (medians["in"] + medians["out"]) stats_out.extend([str(medians["in"]), str(gap_len), str(medians["out"])]) #Add confirmed and rejected reads in_confirmed_path = confirmed_pos_path.format(side_it["in"], "in") out_confirmed_path = confirmed_pos_path.format(side_it["out"], "out") types = ["total", "sub", "del", "ins"] int_confirmed = {t:[] for t in types} int_rejected = {t:[] for t in types} pos = {t:[] for t in types} if side_it["in"] > 0 and side_it["out"] > 0: all_in_pos = _read_confirmed_positions(in_confirmed_path) all_out_pos = _read_confirmed_positions(out_confirmed_path) confirmed_pos_outputs = _integrate_confirmed_pos(all_in_pos, all_out_pos) int_confirmed, int_rejected, pos = confirmed_pos_outputs elif side_it["in"] > 0: all_in_pos = _read_confirmed_positions(in_confirmed_path) int_confirmed, int_rejected, pos = all_in_pos elif side_it["out"] > 0: all_out_pos = _read_confirmed_positions(out_confirmed_path) int_confirmed, int_rejected, pos = all_out_pos _write_confirmed_positions(int_confirmed, int_rejected, pos, int_confirmed_path.format(side_it["in"], side_it["out"])) stats_out.extend([str(len(int_confirmed["total"])), str(len(int_rejected["total"]))]) #Get bridging reads for each pair of in/out edges side_headers_dict = {} all_headers = set() for side in sorted(repeat_edges[rep]): side_headers_dict[side] = {} part_list = _read_partitioning_file(partitioning.format(side_it[side], side)) for _, status, edge, _, _, header in part_list: all_headers.add(header) if status == "Partitioned" and edge != "NA": side_headers_dict[side][header] = (side, int(edge)) bridging_reads = {} side_edges = [] for side in sorted(repeat_edges[rep]): side_edges.append([]) for edge in sorted(repeat_edges[rep][side]): side_edges[-1].append((side, edge)) for edge_pair in sorted(product(*side_edges)): bridging_reads[edge_pair] = 0 for header in all_headers: if (header in side_headers_dict["in"] and header in side_headers_dict["out"]): in_edge = side_headers_dict["in"][header] out_edge = side_headers_dict["out"][header] bridging_reads[(in_edge, out_edge)] += 1 for edge_pair in sorted(bridging_reads): #stats_out.extend(["{0}".format(edge_pair)]) stats_out.extend([str(bridging_reads[edge_pair])]) spaced_header = ["{:8}".format(x) for x in stats_out] #Write to file with open(int_stats_file, "a") as f: f.write("\t".join(spaced_header)) f.write("\n") def finalize_int_stats(rep, repeat_edges, side_it, cons_align_path, template, template_len, cons_vs_cons_path, consensuses, int_confirmed_path, partitioning, int_stats_file, resolved_seq_file): CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"] MIN_BRIDGE_COUNT = trestle_config.vals["min_bridge_count"] MIN_BRIDGE_FACTOR = trestle_config.vals["min_bridge_factor"] #Resolved repeat seqs to be returned, NOT written resolved_repeats = {} summ_vals = [] with open(int_stats_file, "a") as f: f.write("\n\n") for side in sorted(repeat_edges[rep]): f.write("{0}'{1}'{2:8}\t{3}\n" .format("Final ", side, " Iter:", side_it[side])) f.write("\n\n") #Overall confirmed and rejected positions types = ["total", "sub", "del", "ins"] int_confirmed = {t:[] for t in types} int_rejected = {t:[] for t in types} pos = {t:[] for t in types} if side_it["in"] > 0 or side_it["out"] > 0: int_confirmed, int_rejected, pos = _read_confirmed_positions( int_confirmed_path.format(side_it["in"], side_it["out"])) remainings = {} for typ in types: remainings[typ] = len(pos[typ]) - (len(int_confirmed[typ]) + len(int_rejected[typ])) type_strs = ["Total", "Sub", "Del", "Ins"] for typ, typ_str in zip(types, type_strs): confirmed_frac = 0.0 rejected_frac = 0.0 remaining_frac = 0.0 if len(pos[typ]) != 0: confirmed_frac = len(int_confirmed[typ]) / float(len(pos[typ])) rejected_frac = len(int_rejected[typ]) / float(len(pos[typ])) remaining_frac = remainings[typ] / float(len(pos[typ])) f.write("{0:26}\t{1}/{2} = {3:.3f}\n".format( "Confirmed {0} Positions:".format(typ_str), len(int_confirmed[typ]), len(pos[typ]), confirmed_frac)) f.write("{0:26}\t{1}/{2} = {3:.3f}\n".format( "Rejected {0} Positions:".format(typ_str), len(int_rejected[typ]), len(pos[typ]), rejected_frac)) f.write("{0:26}\t{1}/{2} = {3:.3f}\n".format( "Remaining {0} Positions:".format(typ_str), remainings[typ], len(pos[typ]), remaining_frac)) f.write("\n") f.write("\n") #Basic stats for confirmed positions av_div = 0.0 if template_len != 0: av_div = len(int_confirmed["total"]) / float(template_len) position_gaps = [0 for _ in range(len(int_confirmed["total"]) + 1)] curr_pos = 0 for i, p in enumerate(int_confirmed["total"]): position_gaps[i] = p - curr_pos curr_pos = p position_gaps[-1] = template_len - curr_pos mean_position_gap = _mean(position_gaps) max_position_gap = max(position_gaps) f.write("{0:26}\t{1}\n".format("Template Length:", template_len)) f.write("{0:26}\t{1}\n".format("# Confirmed Positions:", len(int_confirmed["total"]))) f.write("{0:26}\t{1:.4f}\n".format("Confirmed Pos Avg Divergence:", av_div)) f.write("{0:26}\t{1:.2f}\n".format("Mean Confirmed Pos Gap:", mean_position_gap)) f.write("{0:26}\t{1}\n".format("Max Confirmed Pos Gap:", max_position_gap)) f.write("\n\n") summ_vals.extend([len(int_confirmed["total"]), max_position_gap]) #Write bridging reads side_headers_dict = {} all_headers = set() for side in sorted(repeat_edges[rep]): side_headers_dict[side] = {} part_list = _read_partitioning_file(partitioning.format( side_it[side], side)) for _, status, edge, _, _, header in part_list: all_headers.add(header) if status == "Partitioned" and edge != "NA": side_headers_dict[side][header] = (side, int(edge)) bridging_reads = {} side_edges = [] for side in sorted(repeat_edges[rep]): side_edges.append([]) for edge in sorted(repeat_edges[rep][side]): side_edges[-1].append((side, edge)) for edge_pair in sorted(product(*side_edges)): bridging_reads[edge_pair] = 0 for header in all_headers: if (header in side_headers_dict["in"] and header in side_headers_dict["out"]): in_edge = side_headers_dict["in"][header] out_edge = side_headers_dict["out"][header] bridging_reads[(in_edge, out_edge)] += 1 for edge_pair in sorted(bridging_reads): pair_label = "|".join(["{0}{1}".format(x[0], x[1]) for x in edge_pair]) f.write("{0}{1:21}\t{2}\n".format(pair_label, " Bridging Reads:", bridging_reads[edge_pair])) f.write("\n\n") #Write combos which are sets of bridging reads all_combos = _get_combos(side_edges[0], side_edges[1]) combo_support = [0 for _ in all_combos] for i, combo in enumerate(all_combos): for edge_pair in combo: if edge_pair in bridging_reads: combo_support[i] += bridging_reads[edge_pair] for i, combo in enumerate(all_combos): f.write("{0} {1}\n".format("Combo", i)) coms = ["|".join(["".join([str(z) for z in x]) for x in y]) for y in combo] combo_edges = " + ".join(coms) f.write("{0:12}\t{1}\n".format("Resolution:", combo_edges)) f.write("{0:12}\t{1}\n\n".format("Support:", combo_support[i])) #Bridging conditions bridged = False bridged_edges = None combo_inds = list(zip(combo_support, list(range(len(combo_support))))) sorted_combos = sorted(combo_inds, reverse=True) if (len(sorted_combos) > 1 and sorted_combos[0][0] >= MIN_BRIDGE_COUNT and sorted_combos[0][0] >= sorted_combos[1][0] * MIN_BRIDGE_FACTOR): bridged = True bridged_edges = all_combos[sorted_combos[0][1]] best_combo = sorted_combos[0][1] best_support = sorted_combos[0][0] best_against = 0 second_combo = -1 second_support = 0 if len(sorted_combos) > 1: for support, _ in sorted_combos[1:]: best_against += support second_combo = sorted_combos[1][1] second_support = sorted_combos[1][0] if bridged: f.write("BRIDGED\n") f.write("Bridging Combo: {0}\n".format(best_combo)) br_ct_str = "{0} (min_bridge_count)".format(MIN_BRIDGE_COUNT) br_diff_str = "{0} * {1} (Combo {2} * min_bridge_factor)".format( second_support, MIN_BRIDGE_FACTOR, second_combo) f.write("Support = {0}\t> {1}\n{2:12}\t> {3}\n".format( best_support, br_ct_str, "", br_diff_str)) f.write("Resolution:\n") for edge_pair in sorted(bridged_edges): f.write("{0[0]} {0[1]:2} {1:3} {2[0]} {2[1]}\n" .format(edge_pair[0], "->", edge_pair[1])) f.write("\n\n") else: f.write("UNBRIDGED\n") f.write("Best combo {0}\n".format(best_combo)) f.write("{0:20}\t{1}\n".format("min_bridge_count", MIN_BRIDGE_COUNT)) f.write("{0:20}\t{1}\n\n\n".format("min_bridge_factor", MIN_BRIDGE_FACTOR)) summ_vals.extend([bridged, best_support, best_against]) #If not bridged, find in/gap/out lengths and divergence rates if not bridged: #Write median in, out, and gap lengths side_lens = {s:0 for s in repeat_edges[rep]} for side in sorted(repeat_edges[rep]): trg_limits = [] for edge_id in sorted(repeat_edges[rep][side]): curr_cons_path = cons_align_path.format(side_it[side], side, edge_id) if os.path.isfile(curr_cons_path): cons_align = _read_alignment(curr_cons_path, template, CONS_ALN_RATE) if cons_align and cons_align[0]: if side == "in": trg_limits.append(cons_align[0][0].trg_end) elif side == "out": trg_limits.append(template_len - cons_align[0][0].trg_start) if trg_limits: side_lens[side] = _get_median(trg_limits) gap_len = template_len - (side_lens["in"] + side_lens["out"]) f.write("{0:30}\t{1}\n".format("Median in Sequence Length:", side_lens["in"])) f.write("{0:30}\t{1}\n".format("Median out Sequence Length:", side_lens["out"])) f.write("{0:30}\t{1}\n\n".format("Median Gap/Overlap Length:", gap_len)) #Write mean in and out divergence rates div_rates = {s:[] for s in repeat_edges[rep]} for side in sorted(repeat_edges[rep]): side_pairs = sorted(combinations(repeat_edges[rep][side], 2)) for edge_one, edge_two in side_pairs: cons_cons_file = cons_vs_cons_path.format( side_it[side], side, edge_one, side_it[side], side, edge_two) if (os.path.isfile(cons_cons_file) and os.path.isfile(consensuses[(side_it[side], side, edge_two)])): cons_vs_cons = _read_alignment(cons_cons_file, consensuses[(side_it[side], side, edge_two)], CONS_ALN_RATE) if cons_vs_cons and cons_vs_cons[0]: edge_rate = _calculate_divergence( cons_vs_cons[0][0].qry_seq, cons_vs_cons[0][0].trg_seq) div_rates[side].append(edge_rate) mean_in_div = 0.0 if div_rates["in"]: mean_in_div = _mean(div_rates["in"]) mean_out_div = 0.0 if div_rates["out"]: mean_out_div = _mean(div_rates["out"]) weighted_mean_div = 0.0 if side_lens["in"] + side_lens["out"] != 0: weighted_mean_div = ((mean_in_div*side_lens["in"] + mean_out_div*side_lens["out"]) / float(side_lens["in"] + side_lens["out"])) f.write("{0:30}\t{1}\n".format("Mean in Divergence Rate:", mean_in_div)) f.write("{0:30}\t{1}\n".format("Mean out Divergence Rate:", mean_out_div)) f.write("{0:30}\t{1}\n\n".format("Weighted Mean Divergence Rate:", weighted_mean_div)) res_str = "No resolution so no resolved file for repeat {0}\n\n" f.write(res_str.format(rep)) #for i, edge in enumerate(sorted(repeat_edges[rep]["in"])): #header = "Repeat_{0}_unbridged_copy_{1}".format(rep, i) #resolved_repeats[header] = "" #seq_dict = {header:""} #fp.write_fasta_dict(seq_dict, resolved_seq_file.format(i)) summ_vals.extend(["*", "*"]) #If bridged, find overlap and construct repeat copy sequences else: #Find end of repeat as min/max of in/out cons_vs_cons alignments edge_limits = {} for side in sorted(repeat_edges[rep]): side_pairs = sorted(combinations(repeat_edges[rep][side], 2)) for edge_one, edge_two in side_pairs: cons_cons_file = cons_vs_cons_path.format( side_it[side], side, edge_one, side_it[side], side, edge_two) if (os.path.isfile(cons_cons_file) and os.path.isfile(consensuses[(side_it[side], side, edge_two)])): cons_vs_cons = _read_alignment(cons_cons_file, consensuses[(side_it[side], side, edge_two)], CONS_ALN_RATE) if cons_vs_cons and cons_vs_cons[0]: #collapse multiple consensus alignments coll_cons = _collapse_cons_aln(cons_vs_cons) one_start = coll_cons.qry_start one_end = coll_cons.qry_end two_start = coll_cons.trg_start two_end = coll_cons.trg_end if side == "in": if (side, edge_one) not in edge_limits: edge_limits[(side, edge_one)] = one_start elif one_start < edge_limits[(side, edge_one)]: edge_limits[(side, edge_one)] = one_start if (side, edge_two) not in edge_limits: edge_limits[(side, edge_two)] = two_start elif two_start < edge_limits[(side, edge_two)]: edge_limits[(side, edge_two)] = two_start elif side == "out": if (side, edge_one) not in edge_limits: edge_limits[(side, edge_one)] = one_end elif one_end > edge_limits[(side, edge_one)]: edge_limits[(side, edge_one)] = one_end if (side, edge_two) not in edge_limits: edge_limits[(side, edge_two)] = two_end elif two_end > edge_limits[(side, edge_two)]: edge_limits[(side, edge_two)] = two_end #For each edge_pair, find starting and ending indices of #in, out, and template sequences to construct sequences summ_resolution = [] resolved_sequences = [] for i, edge_pair in enumerate(sorted(bridged_edges)): f.write("Repeat Copy {0}\n".format(i)) f.write("{0[0]} {0[1]:2} {1:3} {2[0]} {2[1]}\n".format( edge_pair[0], "->", edge_pair[1])) in_start = None out_end = None out_align = None in_align = None for side, edge_id in edge_pair: if side == "in" and (side, edge_id) in edge_limits: in_start = edge_limits[(side, edge_id)] elif side == "out" and (side, edge_id) in edge_limits: out_end = edge_limits[(side, edge_id)] if os.path.isfile(cons_align_path.format(side_it[side], side, edge_id)): cons_align = _read_alignment( cons_align_path.format(side_it[side], side, edge_id), template, CONS_ALN_RATE) if cons_align and cons_align[0]: #collapse multiple consensus alignments coll_cons_align = _collapse_cons_aln(cons_align) if side == "in": in_align = coll_cons_align elif side == "out": out_align = coll_cons_align if not in_align: in_start = 0 in_end = 0 temp_start = 0 #if in_start is None: # in_start = 0 else: in_start = in_align.qry_start in_end = in_align.qry_end temp_start = in_align.trg_end #if in_start is None: # in_start = in_align.qry_start #f.write("CHECK: in qry {0} - {1} of {2}\n".format(in_align.qry_start, # in_align.qry_end, in_align.qry_len)) #f.write("CHECK: in trg {0} - {1} of {2}\n".format(in_align.trg_start, # in_align.trg_end, in_align.trg_len)) if not out_align: temp_end = 0 out_start = 0 out_end = 0 #if out_end is None: # out_end = 0 out_qry_seq = "" out_trg_seq = "" out_trg_end = 0 out_qry_end = 0 else: temp_end = out_align.trg_start out_start = out_align.qry_start out_end = out_align.qry_end #if out_end is None: # out_end = out_align.qry_end out_qry_seq = out_align.qry_seq out_trg_seq = out_align.trg_seq out_trg_end = out_align.trg_end out_qry_end = out_align.qry_end #f.write("CHECK: out qry {0} - {1} of {2}\n".format(out_align.qry_start, # out_align.qry_end, out_align.qry_len)) #f.write("CHECK: out trg {0} - {1} of {2}\n".format(out_align.trg_start, # out_align.trg_end, out_align.trg_len)) f.write("Alignment Indices:\n") f.write("{0:10}\t{1:5} - {2:5}\n".format("in", in_start, in_end)) #f.write("{0:10}\t{1:5} - {2:5}\n".format("Template", #temp_start, #temp_end)) f.write("{0:10}\t{1:5} - {2:5}\n".format("out", out_start, out_end)) #Report gap/overlap length gap_len = temp_end - temp_start if gap_len >= 0: f.write("{0}\t{1}\n".format("Gap between edges:", gap_len)) else: f.write("{0}\t{1}\n\n".format("Overlap between edges:", -gap_len)) #in sequence used to represent overlapping segment #print check of overlapping segment new_temp_end = temp_start new_out_start = None _, out_aln_qry = _index_mapping(out_qry_seq) out_trg_aln, _ = _index_mapping(out_trg_seq) in_edge = edge_pair[0][1] out_edge = edge_pair[1][1] if temp_start >= out_trg_end: #f.write("CHECK, unhelpful case, temp_start {0}\n".format(temp_start)) new_out_start = out_qry_end else: #f.write("CHECK: temp_start {0}, len(out_trg_aln) {1}\n".format(temp_start, len(out_trg_aln))) temp_trg_start = temp_start - temp_end if temp_trg_start < len(out_trg_aln): out_aln_ind = out_trg_aln[temp_trg_start] #f.write("CHECK: out_aln_ind {0}, len(out_aln_qry) {1}\n".format(out_aln_ind, len(out_aln_qry))) if out_aln_ind < len(out_aln_qry): new_out_start = (out_start + out_aln_qry[out_aln_ind]) #f.write("CHECK: new_out_start {0}\n".format(new_out_start)) #_check_overlap( # consensuses[(side_it["in"], "in", in_edge)], # template, # consensuses[(side_it["out"], "out", out_edge)], # -gap_len, in_start, in_end, temp_start, temp_end, # out_start, out_end, # new_out_start, in_align.qry_seq, in_align.trg_seq, # out_align.qry_seq, out_align.trg_seq, out_trg_aln, # out_aln_trg, out_qry_aln, out_aln_qry, # out_align.trg_end, out_align.qry_end, # in_align, out_align) temp_end = new_temp_end if new_out_start: out_start = new_out_start f.write("Adjusted Alignment Indices:\n") f.write("{0:10}\t{1:5} - {2:5}\n".format("in", in_start, in_end)) if temp_start != new_temp_end: f.write("{0:10}\t{1:5} - {2:5}\n".format("Template", temp_start, new_temp_end)) f.write("{0:10}\t{1:5} - {2:5}\n\n\n".format("out", new_out_start, out_end)) in_edge = edge_pair[0][1] out_edge = edge_pair[1][1] #header = "_".join(["Repeat_{0}".format(rep), # "bridged_copy_{0}".format(i), # "in_{0}_{1}_{2}".format(in_edge, # in_start, # in_end), # "template_{0}_{1}".format(temp_start, # temp_end), # "out_{0}_{1}_{2}".format(out_edge, # out_start, # out_end)]) header = "repeat_{0}_path_{1}_{2}".format(rep, in_edge, out_edge) copy_seq = "" if side_it["in"] > 0 and side_it["out"] > 0: copy_seq = _construct_repeat_copy( consensuses[(side_it["in"], "in", in_edge)], template, consensuses[(side_it["out"], "out", out_edge)], in_start, in_end, temp_start, temp_end, out_start, out_end) resolved_repeats[header] = copy_seq if copy_seq: seq_dict = {header:copy_seq} fp.write_fasta_dict(seq_dict, resolved_seq_file.format(rep, i)) #in_str = "".join(["in", str(in_edge)]) #out_str = "".join(["out", str(out_edge)]) #summ_resolution.append("|".join([in_str, out_str])) summ_resolution.append("{0},{1}".format(in_edge,out_edge)) resolved_sequences.append(header) #summ_vals.extend(["+".join(summ_resolution)]) summ_vals.append(":".join(summ_resolution)) summ_vals.append(":".join(resolved_sequences)) return bridged, resolved_repeats, summ_vals def int_stats_postscript(rep, repeat_edges, integrated_stats, resolved_rep_path, res_vs_res): CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"] divs = [] with open(integrated_stats, "a") as f: res_inds = list(range(len(repeat_edges[rep]["in"]))) f.write("Resolved Repeat Sequence Alignments\n") for res_one, res_two in sorted(combinations(res_inds, 2)): qry_start = 0 qry_end = 0 qry_len = 0 trg_start = 0 trg_end = 0 trg_len = 0 qry_seq = "" trg_seq = "" if os.path.isfile(res_vs_res.format(rep, res_one, res_two) and resolved_rep_path.format(rep, res_two)): res_align = _read_alignment(res_vs_res.format(rep, res_one, res_two), resolved_rep_path.format(rep, res_two), CONS_ALN_RATE) if res_align and res_align[0]: qry_start = res_align[0][0].qry_start qry_end = res_align[0][0].qry_end qry_len = res_align[0][0].qry_len trg_start = res_align[0][0].trg_start trg_end = res_align[0][0].trg_end trg_len = res_align[0][0].trg_len qry_seq = res_align[0][0].qry_seq trg_seq = res_align[0][0].trg_seq f.write("Copy {0}|Copy {1}\n".format(res_one, res_two)) f.write("{0}{1}{2:16}\t{3:5}-{4:5} of {5:5}\n".format( "Copy ", res_one, ":", qry_start, qry_end, qry_len)) f.write("{0}{1}{2:16}\t{3:5}-{4:5} of {5:5}\n".format( "Copy ", res_two, ":", trg_start, trg_end, trg_len)) div_rate = _calculate_divergence(qry_seq, trg_seq) divs.append(div_rate) f.write("{0:26}\t{1:.4f}\n".format("Divergence Rate:", div_rate)) f.write("\n") return _mean(divs) def _get_partitioning_info(part_list, edges): edge_reads = {edge:0 for edge in edges} tied_reads = 0 unassigned_reads = 0 for _, status, edge, _, _, _ in part_list: if status == "Partitioned" and edge != "NA": edge_reads[int(edge)] += 1 elif status == "Tied": tied_reads += 1 elif status == "None": unassigned_reads += 1 else: exception_str = "Unknown status {0} in partitioning file" raise Exception(exception_str.format(status)) return edge_reads, tied_reads, unassigned_reads def _calculate_divergence(qry_seq, trg_seq): if not qry_seq or not trg_seq: return 0.0 curr_del = 0 curr_ins = 0 match_count = 0 mis_count = 0 del_count = 0 ins_count = 0 for q, t in zip(qry_seq, trg_seq): if q == t: match_count += 1 if curr_del != 0: del_count += 1 curr_del = 0 if curr_ins != 0: ins_count += 1 curr_ins = 0 elif q == "-" and t != "-": curr_del += 1 if curr_ins != 0: ins_count += 1 curr_ins = 0 elif q != "-" and t == "-": curr_ins += 1 if curr_del != 0: del_count += 1 curr_del = 0 elif q != t: mis_count += 1 if curr_del != 0: del_count += 1 curr_del = 0 if curr_ins != 0: ins_count += 1 curr_ins = 0 else: raise Exception("No alignment conditions fit, {0} {1}".format(q, t)) if curr_del != 0: del_count += 1 curr_del = 0 if curr_ins != 0: ins_count += 1 curr_ins = 0 indel_sim_rate = 0.0 total = match_count + mis_count + del_count + ins_count if total != 0: indel_sim_rate = match_count / float(total) return 1 - indel_sim_rate def _n50(reads_file): reads_dict = fp.read_sequence_dict(reads_file) read_lengths = sorted([len(x) for x in reads_dict.values()], reverse=True) summed_len = 0 n50 = 0 for l in read_lengths: summed_len += l if summed_len >= sum(read_lengths) // 2: n50 = l break return n50 def _get_median(lst): if not lst: raise ValueError("_get_median() arg is an empty sequence") sorted_list = sorted(lst) if len(lst) % 2 == 1: return sorted_list[len(lst) // 2] else: mid1 = sorted_list[(len(lst) // 2) - 1] mid2 = sorted_list[(len(lst) // 2)] return mid1 + mid2 // 2 def _integrate_confirmed_pos(all_in_pos, all_out_pos): in_conf, in_rej, in_pos = all_in_pos out_conf, out_rej, _ = all_out_pos integrated_confirmed = {"total":[], "sub":[], "ins":[], "del":[]} integrated_rejected = {"total":[], "sub":[], "ins":[], "del":[]} for pos in sorted(in_pos["total"]): for pos_type in in_conf: if pos in in_conf[pos_type] or pos in out_conf[pos_type]: integrated_confirmed[pos_type].append(pos) elif pos in in_rej[pos_type] or pos in out_rej[pos_type]: integrated_rejected[pos_type].append(pos) return integrated_confirmed, integrated_rejected, in_pos def _get_combos(in_list, out_list): all_combos = [] for combo in _combo_helper(in_list, out_list): all_combos.append(combo) return all_combos def _combo_helper(in_list, out_list): if not in_list or not out_list: yield [] return else: in1 = in_list[0] for j in range(len(out_list)): combo = (in1, out_list[j]) for rest in _combo_helper(in_list[1:], out_list[:j] + out_list[j + 1:]): yield [combo] + rest def _get_aln_end(aln_start, aln_seq): return aln_start+len(aln_seq.replace("-","")) """ def _check_overlap(in_file, temp_file, out_file, overlap, in_start, in_end, temp_start, temp_end, out_start, out_end, new_out_start, in_qry, in_trg, out_qry, out_trg, out_trg_aln, out_aln_trg, out_qry_aln, out_aln_qry, out_trg_end, out_qry_end, in_align, out_align): in_dict = fp.read_sequence_dict(in_file) in_seq = in_dict.values()[0] temp_dict = fp.read_sequence_dict(temp_file) temp_seq = temp_dict.values()[0] out_dict = fp.read_sequence_dict(out_file) out_seq = out_dict.values()[0] for i in range(len(out_qry)/50-1, len(out_qry)/50+1): aln_ind_st = i*50 aln_ind_end = (i+1)*50 if aln_ind_end > len(out_qry): aln_ind_end = len(out_qry) print 'ALN inds', aln_ind_st, aln_ind_end qry_ind_st = out_aln_qry[aln_ind_st] if aln_ind_end < len(out_aln_qry): qry_ind_end = out_aln_qry[aln_ind_end] else: qry_ind_end = out_aln_qry[-1] print 'QRY inds', qry_ind_st, qry_ind_end trg_ind_st = out_aln_trg[aln_ind_st] if aln_ind_end < len(out_aln_trg): trg_ind_end = out_aln_trg[aln_ind_end] else: trg_ind_end = out_aln_trg[-1] print 'TRG inds', trg_ind_st, trg_ind_end print "TRG ALN", out_trg_aln[trg_ind_st:trg_ind_end] print "ALN TRG", out_aln_trg[aln_ind_st:aln_ind_end] print "QRY ALN", out_qry_aln[qry_ind_st:qry_ind_end] print "ALN QRY", out_aln_qry[aln_ind_st:aln_ind_end] print "QRY SEQ", out_qry[aln_ind_st:aln_ind_end] print "TRG SEQ", out_trg[aln_ind_st:aln_ind_end] print print 'In end, in template end',in_end,temp_start print 'AR In qry end',in_qry[-10:] print 'AR In trg end',in_trg[-10:] print 'Out old start, old end, new start, out template start', out_start, print out_end, new_out_start, temp_end print "Out_trg_end", out_trg_end print "Out_qry_end", out_qry_end print "In align qry inds", in_align.qry_start, in_align.qry_end, print in_align.qry_len print "In align trg inds", in_align.trg_start, in_align.trg_end, print in_align.trg_len print "Out align qry inds", out_align.qry_start, out_align.qry_end, print out_align.qry_len print "Out align trg inds", out_align.trg_start, out_align.trg_end, print out_align.trg_len print print "Overlap:\t{0}".format(overlap) print "In seq(-30 to end):\t{0}".format(in_seq[in_end-30:in_end]) temp_end_seq = temp_seq[temp_start-30:temp_start] print "Template seq(-30 to end):\t{0}".format(temp_end_seq) #print "Out seq:\t{0}".format(out_seq[out_start:out_end]) #print "AR In seq:\t{0}".format(in_seq[in_start-10:in_end+10]) #print "AR Template seq:\t{0}".format(temp_seq[temp_end:temp_start+10]) #print "AR Out seq:\t{0}".format(out_seq[out_start:out_end+10]) pre_new_out = out_seq[new_out_start-30:new_out_start] post_new_out = out_seq[new_out_start:new_out_start+30] print "New out seq(-30 to new start):\t{0}".format(pre_new_out) print "New out seq(new_start to +30):\t{0}".format(post_new_out) print """ def _construct_repeat_copy(in_file, temp_file, out_file, in_start, in_end, temp_start, temp_end, out_start, out_end): if (not os.path.isfile(in_file) or not os.path.isfile(temp_file) or not os.path.isfile(out_file)): return "" in_dict = fp.read_sequence_dict(in_file) temp_dict = fp.read_sequence_dict(temp_file) out_dict = fp.read_sequence_dict(out_file) if not in_dict or not temp_dict or not out_dict: return "" in_seq = list(in_dict.values())[0] temp_seq = list(temp_dict.values())[0] out_seq = list(out_dict.values())[0] seq = ''.join([in_seq[in_start:in_end], temp_seq[temp_start:temp_end], out_seq[out_start:out_end]]) return seq def init_summary(summary_file): with open(summary_file, "w") as f: summ_header_labels = ["Repeat_ID", "Path", "Template", "Cov", "#Conf_Pos", "Max_Pos_Gap", "Bridged?", "Support", "Against", "Avg_Div", "Resolution", "Sequences"] #spaced_header = map("{:13}".format, summ_header_labels) f.write(" ".join(["{:<13}".format(str(x)) for x in summ_header_labels]) + "\n") def update_summary(summ_items, summary_file): (rep_id, graph_path, template_len, avg_cov, summ_vals, avg_div, both_resolved_present) = summ_items (confirmed_pos, max_pos_gap, bridged, support, against, resolution, sequences) = tuple(summ_vals) avg_cov = "{:.4f}".format(avg_cov) avg_div = "{:.4f}".format(avg_div) graph_path = ",".join([str(p) for p in graph_path]) bridged = bridged and both_resolved_present summ_out = [rep_id, graph_path, template_len, avg_cov, confirmed_pos, max_pos_gap, bridged, support, against, avg_div, resolution, sequences] with open(summary_file, "a") as f: f.write(" ".join(["{:<13}".format(str(x)) for x in summ_out]) + "\n") def remove_unneeded_files(repeat_edges, rep, side_labels, side_it, orient_dir, template, extended, pol_temp_dir, pol_ext_dir, pre_edge_reads, pre_partitioning, pre_read_align, partitioning, cons_align, cut_cons_align, read_align, confirmed_pos_path, edge_reads, cut_cons, polishing_dir, cons_vs_cons, int_confirmed_path, repeat_reads, frequency_path, alignment_file, num_pol_iters, iter_pairs): add_dir_name = "additional_output" add_dir = os.path.join(orient_dir, add_dir_name) if not os.path.isdir(add_dir): os.mkdir(add_dir) pol_name = "polished_{0}.fasta".format(num_pol_iters) pol_template = "polished_template.fasta" pol_ext = "polished_extended.{0}.{1}.fasta" pol_temp_file = os.path.join(pol_temp_dir, pol_name) if os.path.exists(pol_temp_file): os.rename(pol_temp_file, os.path.join(add_dir, pol_template)) for side in side_labels: for edge_id in repeat_edges[rep][side]: pol_ext_file = os.path.join(pol_ext_dir.format(side, edge_id), pol_name) if os.path.exists(pol_ext_file): os.rename(pol_ext_file, os.path.join(add_dir, pol_ext.format(side, edge_id))) files_to_remove = [template] dirs_to_remove = [pol_temp_dir] files_to_move = [repeat_reads, frequency_path, alignment_file] if os.path.exists(pol_temp_dir): for fil in os.listdir(pol_temp_dir): files_to_remove.append(os.path.join(pol_temp_dir, fil)) for side in side_labels: for edge_id in repeat_edges[rep][side]: files_to_remove.append(extended.format(side, edge_id)) curr_pol_ext_dir = pol_ext_dir.format(side, edge_id) dirs_to_remove.append(curr_pol_ext_dir) if os.path.exists(curr_pol_ext_dir): for fil in os.listdir(curr_pol_ext_dir): files_to_remove.append(os.path.join(curr_pol_ext_dir, fil)) files_to_remove.append(pre_edge_reads.format(side, edge_id)) files_to_remove.append(pre_read_align.format(side, edge_id)) for it in range(1, side_it[side] + 1): files_to_remove.append(cons_align.format(it, side, edge_id)) files_to_remove.append(read_align.format(it, side, edge_id)) files_to_remove.append(edge_reads.format(it, side, edge_id)) pol_cons = polishing_dir.format(it, side, edge_id) dirs_to_remove.append(pol_cons) if os.path.exists(pol_cons): for fil in os.listdir(pol_cons): files_to_remove.append(os.path.join(pol_cons, fil)) for it in range(1, side_it[side]): files_to_remove.append(cut_cons_align.format(it, side, edge_id)) files_to_remove.append(cut_cons.format(it, side, edge_id)) it = side_it[side] files_to_move.append(cut_cons_align.format(it, side, edge_id)) files_to_move.append(cut_cons.format(it, side, edge_id)) edge_pairs = sorted(combinations(repeat_edges[rep][side], 2)) for edge_one, edge_two in edge_pairs: for it in range(1, side_it[side]): cons_cons_file = cons_vs_cons.format(it, side, edge_one, it, side, edge_two) files_to_remove.append(cons_cons_file) it = side_it[side] cons_cons_file = cons_vs_cons.format(it, side, edge_one, it, side, edge_two) files_to_move.append(cons_cons_file) files_to_remove.append(pre_partitioning.format(side)) for it in range(1, side_it[side]): files_to_remove.append(partitioning.format(it, side)) files_to_remove.append(confirmed_pos_path.format(it, side)) for it in [0, side_it[side]]: files_to_move.append(partitioning.format(it, side)) it = side_it[side] files_to_move.append(confirmed_pos_path.format(it, side)) last_conf_pos = int_confirmed_path.format(side_it[side_labels[0]], side_it[side_labels[1]]) for it1, it2 in iter_pairs: curr_conf_pos = int_confirmed_path.format(it1, it2) if curr_conf_pos != last_conf_pos: files_to_remove.append(curr_conf_pos) else: files_to_move.append(curr_conf_pos) for f in files_to_remove: if os.path.exists(f): os.remove(f) for d in dirs_to_remove: if os.path.exists(d): os.rmdir(d) for f in files_to_move: if os.path.exists(f): split_path = os.path.split(f) new_file = os.path.join(split_path[0], add_dir_name, split_path[1]) os.rename(f, new_file) def _mean(lst): if not lst: return 0 return sum(lst) / len(lst)<|fim▁end|>
if num_bases == overlap: return i + 1 return len(aln)
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#-*- coding: utf-8 -*- # version string following pep-0396 and pep-0386<|fim▁hole|>__version__ = '0.9a4.dev1' # pragma: nocover<|fim▁end|>
<|file_name|>ObservadorDoControleRemoto.java<|end_file_name|><|fim▁begin|>package controle.gui_jogo; import controle.configuradores_gui.ConfiguradorVisualizadorDeCartas; import java.awt.image.BufferedImage; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.util.List; import javax.imageio.IIOImage; import javax.imageio.ImageIO; import modelo.jogo.Jogada; import modelo.jogo.partida.InformacaoDoTurno; import modelo.jogo.servidor.controleremoto.ControleRemoto; import modelo.util.Observador; import visao.GUIJogo; import visao.GUIPortal; import visao.janelas.FormVisualizadorDeCartas; public class ObservadorDoControleRemoto implements Observador { private GUIJogo _gj; private ControleRemoto _ctr; private GUIPortal gui; public ObservadorDoControleRemoto(GUIJogo _gj, ControleRemoto _ctr, GUIPortal gui) { this._gj = _gj; this._ctr = _ctr; this.gui = gui; } @Override public void notificar(Object fonte, Object msg) { if ("iniciar_turno".equals(msg)) { _gj.habilitarMontarJogada(true); return; } if ("jogada_realizada".equals(msg)) { _gj.habilitarMontarJogada(false); return; } if ("atualizar_pontuacao".equals(msg)) { List<InformacaoDoTurno> info = _ctr.getListaInformacaoTurno(); _gj.atualizarPlacar(info); return; } if ("fim_do_jogo".equals(msg)) { _ctr.remover(this); _gj.mostrasMensagem("Fim do jogo seu brucutu"); try {<|fim▁hole|> Thread.sleep(1000); } catch (InterruptedException e) { _gj.mostrasMensagem(e.getMessage()); } _gj.fechar(); return; } } }<|fim▁end|>
<|file_name|>dev.js<|end_file_name|><|fim▁begin|>var gulp = require("gulp"); var util = require("gulp-util"); var config = require("../config") gulp.task("watch", () => { gulp.watch(`${config.src.ts}`, ["compile:ts"]).on("change", reportChange).on("error", swallowError); gulp.watch(`${config.test.files}`, ["compile:test"]).on("change", reportChange).on("error", swallowError);<|fim▁hole|> console.log(`File ${event.path} was ${event.type}, running tasks...`); } function swallowError(error) { console.log(util.colors.red(`Error occurred while running watched task...`)); }<|fim▁end|>
}); function reportChange(event) {
<|file_name|>test.go<|end_file_name|><|fim▁begin|>package main import ( "os" "os/exec" ) func runTestsCommand() { goTest := []string{"go", "test"} goTest = append(goTest, flags.Args()[1:]...) runCmd(goTest, func(c *exec.Cmd) { c.Env = appendToPathList(os.Environ(), "GOPATH", appengineDir) })<|fim▁hole|><|fim▁end|>
}
<|file_name|>ratings-counter.py<|end_file_name|><|fim▁begin|>from pyspark import SparkConf, SparkContext<|fim▁hole|>conf = SparkConf().setMaster("local").setAppName("RatingsHistogram") sc = SparkContext(conf=conf) lines = sc.textFile("file:///SparkCourse/ml-100k/u.data") ratings = lines.map(lambda x: x.split()[2]) result = ratings.countByValue() sortedResults = collections.OrderedDict(sorted(result.items())) for key, value in sortedResults.items(): print("%s %i" % (key, value))<|fim▁end|>
import collections
<|file_name|>apps.py<|end_file_name|><|fim▁begin|>from django.utils.translation import ugettext_lazy as _ app_info = { 'name': 'comments', 'author': 'Katrid', 'website': 'http://katrid.com', 'short_description': 'Enterprise Social Network',<|fim▁hole|> 'dependencies': ['keops.modules.contact'], 'category': _('Communication'), 'version': '0.2', }<|fim▁end|>
'description': _('Comments, Discussions, Mailing List, News, Document Followers'),
<|file_name|>GradientSkia.cpp<|end_file_name|><|fim▁begin|>/* * Copyright (c) 2008, Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include "Gradient.h" #include "CSSParser.h" #include "GraphicsContext.h" #include "SkColorShader.h" #include "SkGradientShader.h" #include "SkiaUtils.h" namespace WebCore { void Gradient::platformDestroy() { SkSafeUnref(m_gradient); m_gradient = 0; } static inline U8CPU F2B(float x) { return static_cast<int>(x * 255); } static SkColor makeSkColor(float a, float r, float g, float b) { return SkColorSetARGB(F2B(a), F2B(r), F2B(g), F2B(b)); } // Determine the total number of stops needed, including pseudo-stops at the // ends as necessary. static size_t totalStopsNeeded(const Gradient::ColorStop* stopData, size_t count) { // N.B.: The tests in this function should kept in sync with the ones in // fillStops(), or badness happens. const Gradient::ColorStop* stop = stopData; size_t countUsed = count; if (count < 1 || stop->stop > 0.0) countUsed++; stop += count - 1; if (count < 1 || stop->stop < 1.0) countUsed++; return countUsed; } // Collect sorted stop position and color information into the pos and colors // buffers, ensuring stops at both 0.0 and 1.0. The buffers must be large // enough to hold information for all stops, including the new endpoints if // stops at 0.0 and 1.0 aren't already included. static void fillStops(const Gradient::ColorStop* stopData, size_t count, SkScalar* pos, SkColor* colors) { <|fim▁hole|> const Gradient::ColorStop* stop = stopData; size_t start = 0; if (count < 1) { // A gradient with no stops must be transparent black. pos[0] = WebCoreFloatToSkScalar(0.0); colors[0] = makeSkColor(0.0, 0.0, 0.0, 0.0); start = 1; } else if (stop->stop > 0.0) { // Copy the first stop to 0.0. The first stop position may have a slight // rounding error, but we don't care in this float comparison, since // 0.0 comes through cleanly and people aren't likely to want a gradient // with a stop at (0 + epsilon). pos[0] = WebCoreFloatToSkScalar(0.0); colors[0] = makeSkColor(stop->alpha, stop->red, stop->green, stop->blue); start = 1; } for (size_t i = start; i < start + count; i++) { pos[i] = WebCoreFloatToSkScalar(stop->stop); colors[i] = makeSkColor(stop->alpha, stop->red, stop->green, stop->blue); ++stop; } // Copy the last stop to 1.0 if needed. See comment above about this float // comparison. if (count < 1 || (--stop)->stop < 1.0) { pos[start + count] = WebCoreFloatToSkScalar(1.0); colors[start + count] = colors[start + count - 1]; } } SkShader* Gradient::platformGradient() { if (m_gradient) return m_gradient; sortStopsIfNecessary(); ASSERT(m_stopsSorted); size_t countUsed = totalStopsNeeded(m_stops.data(), m_stops.size()); ASSERT(countUsed >= 2); ASSERT(countUsed >= m_stops.size()); // FIXME: Why is all this manual pointer math needed?! SkAutoMalloc storage(countUsed * (sizeof(SkColor) + sizeof(SkScalar))); SkColor* colors = (SkColor*)storage.get(); SkScalar* pos = (SkScalar*)(colors + countUsed); fillStops(m_stops.data(), m_stops.size(), pos, colors); SkShader::TileMode tile = SkShader::kClamp_TileMode; switch (m_spreadMethod) { case SpreadMethodReflect: tile = SkShader::kMirror_TileMode; break; case SpreadMethodRepeat: tile = SkShader::kRepeat_TileMode; break; case SpreadMethodPad: tile = SkShader::kClamp_TileMode; break; } if (m_radial) { // Since the two-point radial gradient is slower than the plain radial, // only use it if we have to. if (m_p0 == m_p1 && m_r0 <= 0.0f) { m_gradient = SkGradientShader::CreateRadial(m_p1, m_r1, colors, pos, static_cast<int>(countUsed), tile); } else { // The radii we give to Skia must be positive. If we're given a // negative radius, ask for zero instead. SkScalar radius0 = m_r0 >= 0.0f ? WebCoreFloatToSkScalar(m_r0) : 0; SkScalar radius1 = m_r1 >= 0.0f ? WebCoreFloatToSkScalar(m_r1) : 0; m_gradient = SkGradientShader::CreateTwoPointConical(m_p0, radius0, m_p1, radius1, colors, pos, static_cast<int>(countUsed), tile); } if (aspectRatio() != 1) { // CSS3 elliptical gradients: apply the elliptical scaling at the // gradient center point. m_gradientSpaceTransformation.translate(m_p0.x(), m_p0.y()); m_gradientSpaceTransformation.scale(1, 1 / aspectRatio()); m_gradientSpaceTransformation.translate(-m_p0.x(), -m_p0.y()); ASSERT(m_p0 == m_p1); } } else { SkPoint pts[2] = { m_p0, m_p1 }; m_gradient = SkGradientShader::CreateLinear(pts, colors, pos, static_cast<int>(countUsed), tile); } if (!m_gradient) // use last color, since our "geometry" was degenerate (e.g. radius==0) m_gradient = new SkColorShader(colors[countUsed - 1]); else m_gradient->setLocalMatrix(m_gradientSpaceTransformation); return m_gradient; } void Gradient::fill(GraphicsContext* context, const FloatRect& rect) { context->setFillGradient(this); context->fillRect(rect); } void Gradient::setPlatformGradientSpaceTransform(const AffineTransform& matrix) { if (m_gradient) m_gradient->setLocalMatrix(m_gradientSpaceTransformation); } } // namespace WebCore<|fim▁end|>
<|file_name|>analyze_dxp.py<|end_file_name|><|fim▁begin|>""" Some helper functions to analyze the output of sys.getdxp() (which is only available if Python was built with -DDYNAMIC_EXECUTION_PROFILE). These will tell you which opcodes have been executed most frequently in the current process, and, if Python was also built with -DDXPAIRS, will tell you which instruction _pairs_ were executed most frequently, which may help in choosing new instructions. If Python was built without -DDYNAMIC_EXECUTION_PROFILE, importing this module will raise a RuntimeError. If you're running a script you want to profile, a simple way to get the common pairs is: $ PYTHONPATH=$PYTHONPATH:<python_srcdir>/Tools/scripts \ ./python -i -O the_script.py --args ... > from analyze_dxp import * > s = render_common_pairs() > open('/tmp/some_file', 'w').write(s) """ import copy import opcode import operator import sys import threading if not hasattr(sys, "getdxp"): raise RuntimeError("Can't import analyze_dxp: Python built without" " -DDYNAMIC_EXECUTION_PROFILE.") _profile_lock = threading.RLock() _cumulative_profile = sys.getdxp() # If Python was built with -DDXPAIRS, sys.getdxp() returns a list of # lists of ints. Otherwise it returns just a list of ints. def has_pairs(profile): """Returns True if the Python that produced the argument profile was built with -DDXPAIRS.""" return len(profile) > 0 and isinstance(profile[0], list) def reset_profile(): """Forgets any execution profile that has been gathered so far.""" with _profile_lock: sys.getdxp() # Resets the internal profile global _cumulative_profile _cumulative_profile = sys.getdxp() # 0s out our copy. def merge_profile(): """Reads sys.getdxp() and merges it into this module's cached copy. We need this because sys.getdxp() 0s itself every time it's called.""" with _profile_lock: new_profile = sys.getdxp() if has_pairs(new_profile): for first_inst in range(len(_cumulative_profile)): for second_inst in range(len(_cumulative_profile[first_inst])): _cumulative_profile[first_inst][second_inst] += ( new_profile[first_inst][second_inst]) else: for inst in range(len(_cumulative_profile)): _cumulative_profile[inst] += new_profile[inst] def snapshot_profile(): """Returns the cumulative execution profile until this call.""" with _profile_lock: merge_profile() return copy.deepcopy(_cumulative_profile) def common_instructions(profile): """Returns the most common opcodes in order of descending frequency. The result is a list of tuples of the form (opcode, opname, # of occurrences) """ if has_pairs(profile) and profile: inst_list = profile[-1] else: inst_list = profile result = [(op, opcode.opname[op], count) for op, count in enumerate(inst_list) if count > 0] result.sort(key=operator.itemgetter(2), reverse=True) return result def common_pairs(profile): """Returns the most common opcode pairs in order of descending frequency. The result is a list of tuples of the form ((1st opcode, 2nd opcode), (1st opname, 2nd opname), <|fim▁hole|> """ if not has_pairs(profile): return [] result = [((op1, op2), (opcode.opname[op1], opcode.opname[op2]), count) # Drop the row of single-op profiles with [:-1] for op1, op1profile in enumerate(profile[:-1]) for op2, count in enumerate(op1profile) if count > 0] result.sort(key=operator.itemgetter(2), reverse=True) return result def render_common_pairs(profile=None): """Renders the most common opcode pairs to a string in order of descending frequency. The result is a series of lines of the form: # of occurrences: ('1st opname', '2nd opname') """ if profile is None: profile = snapshot_profile() def seq(): for _, ops, count in common_pairs(profile): yield "%s: %s\n" % (count, ops) return ''.join(seq())<|fim▁end|>
# of occurrences of the pair)
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from datetime import datetime, timedelta import logging from urllib import urlencode from django.http import Http404 from django.utils import html from django.utils.safestring import mark_safe import pytz from corehq import Domain from corehq.apps import reports from corehq.apps.app_manager.models import get_app, Form, RemoteApp from corehq.apps.app_manager.util import get_case_properties from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin from corehq.apps.domain.middleware import CCHQPRBACMiddleware from corehq.apps.export.models import FormQuestionSchema from corehq.apps.reports.display import xmlns_to_name from dimagi.ext.couchdbkit import * from corehq.apps.reports.exportfilters import form_matches_users, is_commconnect_form, default_form_filter, \ default_case_filter from corehq.apps.users.models import WebUser, CommCareUser, CouchUser from corehq.util.view_utils import absolute_reverse from couchexport.models import SavedExportSchema, GroupExportConfiguration, FakeSavedExportSchema, SplitColumn from couchexport.transforms import couch_to_excel_datetime, identity from couchexport.util import SerializableFunction import couchforms from dimagi.utils.couch.cache import cache_core from dimagi.utils.couch.database import get_db from dimagi.utils.decorators.memoized import memoized from django.conf import settings from django.core.validators import validate_email from corehq.apps.reports.dispatcher import ProjectReportDispatcher, CustomProjectReportDispatcher import json import calendar from django.utils.translation import ugettext as _ from django.utils.translation import ugettext_noop from dimagi.utils.logging import notify_exception from django_prbac.exceptions import PermissionDenied class HQUserType(object): REGISTERED = 0 DEMO_USER = 1 ADMIN = 2 UNKNOWN = 3 COMMTRACK = 4 human_readable = [settings.COMMCARE_USER_TERM, ugettext_noop("demo_user"), ugettext_noop("admin"), ugettext_noop("Unknown Users"), ugettext_noop("CommCare Supply")] toggle_defaults = (True, False, False, False, False) count = len(human_readable) included_defaults = (True, True, True, True, False) @classmethod def use_defaults(cls): return cls._get_manual_filterset(cls.included_defaults, cls.toggle_defaults) @classmethod def all_but_users(cls): no_users = [True] * cls.count no_users[cls.REGISTERED] = False return cls._get_manual_filterset(cls.included_defaults, no_users) @classmethod def commtrack_defaults(cls): # this is just a convenience method for clairty on commtrack projects return cls.all() @classmethod def all(cls): defaults = (True,) * cls.count return cls._get_manual_filterset(defaults, cls.toggle_defaults) @classmethod def _get_manual_filterset(cls, included, defaults): """ manually construct a filter set. included and defaults should both be arrays of booleans mapping to values in human_readable and whether they should be included and defaulted, respectively. """ return [HQUserToggle(i, defaults[i]) for i in range(cls.count) if included[i]] @classmethod def use_filter(cls, ufilter): return [HQUserToggle(i, unicode(i) in ufilter) for i in range(cls.count)] class HQToggle(object): type = None show = False name = None def __init__(self, type, show, name): self.type = type self.name = name self.show = show def __repr__(self): return "%(klass)s[%(type)s:%(show)s:%(name)s]" % dict( klass = self.__class__.__name__, type=self.type, name=self.name, show=self.show ) class HQUserToggle(HQToggle): def __init__(self, type, show): name = _(HQUserType.human_readable[type]) super(HQUserToggle, self).__init__(type, show, name) class TempCommCareUser(CommCareUser): filter_flag = IntegerProperty() def __init__(self, domain, username, uuid): if username == HQUserType.human_readable[HQUserType.DEMO_USER]: filter_flag = HQUserType.DEMO_USER elif username == HQUserType.human_readable[HQUserType.ADMIN]: filter_flag = HQUserType.ADMIN else: filter_flag = HQUserType.UNKNOWN super(TempCommCareUser, self).__init__( domain=domain, username=username, _id=uuid, date_joined=datetime.utcnow(), is_active=False, user_data={}, first_name='', last_name='', filter_flag=filter_flag ) def save(self, **params): raise NotImplementedError @property def userID(self): return self._id @property def username_in_report(self): if self.filter_flag == HQUserType.UNKNOWN: final = mark_safe('%s <strong>[unregistered]</strong>' % html.escape(self.username)) elif self.filter_flag == HQUserType.DEMO_USER: final = mark_safe('<strong>%s</strong>' % html.escape(self.username)) else: final = mark_safe('<strong>%s</strong> (%s)' % tuple(map(html.escape, [self.username, self.user_id]))) return final @property def raw_username(self): return self.username class Meta: app_label = 'reports' DATE_RANGE_CHOICES = ['last7', 'last30', 'lastn', 'lastmonth', 'since', 'range', ''] class ReportConfig(CachedCouchDocumentMixin, Document): domain = StringProperty() # the prefix of the report dispatcher class for this report, used to # get route name for url reversing, and report names report_type = StringProperty() report_slug = StringProperty() subreport_slug = StringProperty(default=None) name = StringProperty() description = StringProperty() owner_id = StringProperty() filters = DictProperty() date_range = StringProperty(choices=DATE_RANGE_CHOICES) days = IntegerProperty(default=None) start_date = DateProperty(default=None) end_date = DateProperty(default=None) datespan_slug = StringProperty(default=None) def delete(self, *args, **kwargs): notifications = self.view('reportconfig/notifications_by_config', reduce=False, include_docs=True, key=self._id).all() for n in notifications: n.config_ids.remove(self._id) if n.config_ids: n.save() else: n.delete() return super(ReportConfig, self).delete(*args, **kwargs) @classmethod def by_domain_and_owner(cls, domain, owner_id, report_slug=None, stale=True, skip=None, limit=None): if stale: #kwargs['stale'] = settings.COUCH_STALE_QUERY pass if report_slug is not None: key = ["name slug", domain, owner_id, report_slug] else: key = ["name", domain, owner_id] db = cls.get_db() kwargs = {} if skip is not None: kwargs['skip'] = skip if limit is not None: kwargs['limit'] = limit result = cache_core.cached_view( db, "reportconfig/configs_by_domain", reduce=False, include_docs=True, startkey=key, endkey=key + [{}], wrapper=cls.wrap, **kwargs ) return result @classmethod def default(self): return { 'name': '', 'description': '', #'date_range': 'last7', 'days': None, 'start_date': None, 'end_date': None, 'filters': {} } def to_complete_json(self): result = super(ReportConfig, self).to_json() result.update({ 'url': self.url, 'report_name': self.report_name, 'date_description': self.date_description, 'datespan_filters': self.datespan_filters, 'has_ucr_datespan': self.has_ucr_datespan, }) return result @property @memoized def _dispatcher(self): from corehq.apps.userreports.reports.view import ConfigurableReport dispatchers = [ ProjectReportDispatcher, CustomProjectReportDispatcher, ConfigurableReport, ] for dispatcher in dispatchers: if dispatcher.prefix == self.report_type: return dispatcher() raise Exception("Unknown dispatcher: %s" % self.report_type) def get_date_range(self): """Duplicated in reports.config.js""" date_range = self.date_range # allow old report email notifications to represent themselves as a # report config by leaving the default date range up to the report # dispatcher if not date_range: return {} import datetime from dateutil.relativedelta import relativedelta today = datetime.date.today() if date_range == 'since': start_date = self.start_date end_date = today elif date_range == 'range': start_date = self.start_date end_date = self.end_date elif date_range == 'lastmonth': end_date = today start_date = today - relativedelta(months=1) + timedelta(days=1) # add one day to handle inclusiveness else: end_date = today if date_range == 'last7': days = 7 elif date_range == 'last30': days = 30 elif date_range == 'lastn': days = self.days else: raise Exception("Invalid date range") start_date = today - datetime.timedelta(days=days) if start_date is None or end_date is None: # this is due to bad validation. see: http://manage.dimagi.com/default.asp?110906 logging.error('scheduled report %s is in a bad state (no startdate or enddate)' % self._id) return {} dates = { 'startdate': start_date.isoformat(), 'enddate': end_date.isoformat(), } if self.is_configurable_report: filter_slug = self.datespan_slug if filter_slug: return { '%s-start' % filter_slug: start_date.isoformat(), '%s-end' % filter_slug: end_date.isoformat(), filter_slug: '%(startdate)s to %(enddate)s' % dates, } return dates @property @memoized def query_string(self): params = {} if self._id != 'dummy': params['config_id'] = self._id if not self.is_configurable_report: params.update(self.filters) params.update(self.get_date_range()) return urlencode(params, True) @property @memoized def view_kwargs(self): kwargs = {'domain': self.domain, 'report_slug': self.report_slug} if self.subreport_slug: kwargs['subreport_slug'] = self.subreport_slug return kwargs @property @memoized def url(self): try: from django.core.urlresolvers import reverse from corehq.apps.userreports.reports.view import ConfigurableReport if self.is_configurable_report: url_base = reverse(ConfigurableReport.slug, args=[self.domain, self.subreport_slug]) else: url_base = reverse(self._dispatcher.name(), kwargs=self.view_kwargs) return url_base + '?' + self.query_string except Exception: return "#" @property @memoized def report(self): """ Returns None if no report is found for that report slug, which happens when a report is no longer available. All callers should handle this case. """ return self._dispatcher.get_report( self.domain, self.report_slug, self.subreport_slug ) @property def report_name(self): try: if self.report is None: return _("Deleted Report") else: return _(self.report.name) except Exception: return _("Unsupported Report") @property def full_name(self): if self.name: return "%s (%s)" % (self.name, self.report_name) else: return self.report_name @property def date_description(self): if self.date_range == 'lastmonth': return "Last Month" elif self.days and not self.start_date: day = 'day' if self.days == 1 else 'days' return "Last %d %s" % (self.days, day) elif self.end_date: return "From %s to %s" % (self.start_date, self.end_date) elif self.start_date: return "Since %s" % self.start_date else: return '' @property @memoized def owner(self): try: return WebUser.get_by_user_id(self.owner_id) except CouchUser.AccountTypeError: return CommCareUser.get_by_user_id(self.owner_id) def get_report_content(self, lang, attach_excel=False): """ Get the report's HTML content as rendered by the static view format. """ try: if self.report is None: return _("The report used to create this scheduled report is no" " longer available on CommCare HQ. Please delete this" " scheduled report and create a new one using an available" " report."), None except Exception: pass from django.http import HttpRequest, QueryDict request = HttpRequest() request.couch_user = self.owner request.user = self.owner.get_django_user() request.domain = self.domain request.couch_user.current_domain = self.domain request.couch_user.language = lang request.GET = QueryDict( self.query_string + '&filterSet=true' + ('&' + urlencode(self.filters, True) + '&' + urlencode(self.get_date_range(), True) if self.is_configurable_report else '') ) # Make sure the request gets processed by PRBAC Middleware CCHQPRBACMiddleware.apply_prbac(request) try: if self.is_configurable_report: response = self._dispatcher.dispatch( request, self.subreport_slug, render_as='email', **self.view_kwargs ) else: response = self._dispatcher.dispatch( request, render_as='email', permissions_check=self._dispatcher.permissions_check, **self.view_kwargs ) if attach_excel is True: if self.is_configurable_report: file_obj = self._dispatcher.dispatch( request, self.subreport_slug, render_as='excel', **self.view_kwargs ) else: file_obj = self._dispatcher.dispatch( request, render_as='excel', permissions_check=self._dispatcher.permissions_check, **self.view_kwargs ) else: file_obj = None return json.loads(response.content)['report'], file_obj except PermissionDenied: return _( "We are sorry, but your saved report '%(config_name)s' " "is no longer accessible because your subscription does " "not allow Custom Reporting. Please talk to your Project " "Administrator about enabling Custom Reports. If you " "want CommCare HQ to stop sending this message, please " "visit %(saved_reports_url)s to remove this " "Emailed Report." ) % { 'config_name': self.name, 'saved_reports_url': absolute_reverse('saved_reports', args=[request.domain]), }, None except Http404: return _("We are sorry, but your saved report '%(config_name)s' " "can not be generated since you do not have the correct permissions. " "Please talk to your Project Administrator about getting permissions for this" "report.") % {'config_name': self.name}, None except Exception: notify_exception(None, "Error generating report: {}".format(self.report_slug), details={ 'domain': self.domain, 'user': self.owner.username, 'report': self.report_slug, 'report config': self.get_id }) return _("An error occurred while generating this report."), None @property def is_configurable_report(self): from corehq.apps.userreports.reports.view import ConfigurableReport return isinstance(self._dispatcher, ConfigurableReport) @property @memoized def languages(self): if self.is_configurable_report: return self.report.spec.get_languages() return set() @property @memoized def configurable_report(self): from corehq.apps.userreports.reports.view import ConfigurableReport return ConfigurableReport.get_report( self.domain, self.report_slug, self.subreport_slug ) @property def datespan_filters(self): return (self.configurable_report.datespan_filters if self.is_configurable_report else []) @property def has_ucr_datespan(self): return self.is_configurable_report and self.datespan_filters class UnsupportedScheduledReportError(Exception): pass class ReportNotification(CachedCouchDocumentMixin, Document): domain = StringProperty() owner_id = StringProperty() recipient_emails = StringListProperty() config_ids = StringListProperty() send_to_owner = BooleanProperty() attach_excel = BooleanProperty() # language is only used if some of the config_ids refer to UCRs. language = StringProperty() hour = IntegerProperty(default=8) minute = IntegerProperty(default=0) day = IntegerProperty(default=1) interval = StringProperty(choices=["daily", "weekly", "monthly"]) @property def is_editable(self): try: self.report_slug return False except AttributeError: return True @classmethod def by_domain_and_owner(cls, domain, owner_id, stale=True, **kwargs): if stale: kwargs['stale'] = settings.COUCH_STALE_QUERY key = [domain, owner_id] db = cls.get_db() result = cache_core.cached_view(db, "reportconfig/user_notifications", reduce=False, include_docs=True, startkey=key, endkey=key + [{}], wrapper=cls.wrap, **kwargs) return result @property def all_recipient_emails(self): # handle old documents if not self.owner_id: return [self.owner.get_email()] emails = [] if self.send_to_owner: if self.owner.is_web_user(): emails.append(self.owner.username) else: email = self.owner.get_email() try: validate_email(email) emails.append(email) except Exception: pass emails.extend(self.recipient_emails) return emails @property @memoized def owner(self): id = self.owner_id try: return WebUser.get_by_user_id(id) except CouchUser.AccountTypeError: return CommCareUser.get_by_user_id(id) @property @memoized def configs(self): """ Access the notification's associated configs as a list, transparently returning an appropriate dummy for old notifications which have `report_slug` instead of `config_ids`. """ if self.config_ids: configs = ReportConfig.view('_all_docs', keys=self.config_ids, include_docs=True).all() configs = [c for c in configs if not hasattr(c, 'deleted')] elif self.report_slug == 'admin_domains': raise UnsupportedScheduledReportError("admin_domains is no longer " "supported as a schedulable report for the time being") else: # create a new ReportConfig object, useful for its methods and # calculated properties, but don't save it class ReadonlyReportConfig(ReportConfig): def save(self, *args, **kwargs): pass config = ReadonlyReportConfig() object.__setattr__(config, '_id', 'dummy') config.report_type = ProjectReportDispatcher.prefix config.report_slug = self.report_slug config.domain = self.domain config.owner_id = self.owner_id configs = [config] return configs @property def day_name(self): if self.interval == 'weekly': return calendar.day_name[self.day] return { "daily": _("Every day"), "monthly": _("Day %s of every month" % self.day), }[self.interval] @classmethod def day_choices(cls): """Tuples for day of week number and human-readable day of week""" return tuple([(val, calendar.day_name[val]) for val in range(7)]) @classmethod def hour_choices(cls): """Tuples for hour number and human-readable hour""" return tuple([(val, "%s:00" % val) for val in range(24)]) def send(self): from dimagi.utils.django.email import send_HTML_email from corehq.apps.reports.views import get_scheduled_report_response # Scenario: user has been removed from the domain that they # have scheduled reports for. Delete this scheduled report if not self.owner.is_member_of(self.domain): self.delete() return if self.all_recipient_emails: title = "Scheduled report from CommCare HQ" if hasattr(self, "attach_excel"): attach_excel = self.attach_excel else: attach_excel = False body, excel_files = get_scheduled_report_response(self.owner, self.domain, self._id, attach_excel=attach_excel) for email in self.all_recipient_emails: send_HTML_email(title, email, body.content, email_from=settings.DEFAULT_FROM_EMAIL, file_attachments=excel_files) class AppNotFound(Exception): pass class HQExportSchema(SavedExportSchema): doc_type = 'SavedExportSchema' domain = StringProperty() transform_dates = BooleanProperty(default=True) @property def global_transform_function(self): if self.transform_dates: return couch_to_excel_datetime else: return identity @classmethod def wrap(cls, data): if 'transform_dates' not in data: data['transform_dates'] = False self = super(HQExportSchema, cls).wrap(data) if not self.domain: self.domain = self.index[0] return self class FormExportSchema(HQExportSchema): doc_type = 'SavedExportSchema' app_id = StringProperty() include_errors = BooleanProperty(default=False) split_multiselects = BooleanProperty(default=False) def update_schema(self): super(FormExportSchema, self).update_schema() if self.split_multiselects: self.update_question_schema() for column in [column for table in self.tables for column in table.columns]: if isinstance(column, SplitColumn): question = self.question_schema.question_schema.get(column.index) column.options = question.options column.ignore_extras = True def update_question_schema(self): schema = self.question_schema schema.update_schema() @property def question_schema(self): return FormQuestionSchema.get_or_create(self.domain, self.app_id, self.xmlns) @property @memoized def app(self): if self.app_id: try: return get_app(self.domain, self.app_id, latest=True) except Http404: logging.error('App %s in domain %s not found for export %s' % ( self.app_id, self.domain, self.get_id )) raise AppNotFound() else: return None @classmethod def wrap(cls, data): self = super(FormExportSchema, cls).wrap(data) if self.filter_function == 'couchforms.filters.instances': # grandfather in old custom exports self.include_errors = False self.filter_function = None return self @property def filter(self): user_ids = set(CouchUser.ids_by_domain(self.domain)) user_ids.update(CouchUser.ids_by_domain(self.domain, is_active=False)) user_ids.add('demo_user') def _top_level_filter(form): # careful, closures used return form_matches_users(form, user_ids) or is_commconnect_form(form) f = SerializableFunction(_top_level_filter) if self.app_id is not None: f.add(reports.util.app_export_filter, app_id=self.app_id) if not self.include_errors: f.add(couchforms.filters.instances) actual = SerializableFunction(default_form_filter, filter=f) return actual @property def domain(self): return self.index[0] @property def xmlns(self): return self.index[1] @property def formname(self): return xmlns_to_name(self.domain, self.xmlns, app_id=self.app_id) @property @memoized def question_order(self): try: if not self.app: return [] except AppNotFound: if settings.DEBUG: return [] raise else: questions = self.app.get_questions(self.xmlns) order = [] for question in questions: if not question['value']: # question probably belongs to a broken form continue index_parts = question['value'].split('/') assert index_parts[0] == '' index_parts[1] = 'form' index = '.'.join(index_parts[1:]) order.append(index) return order def get_default_order(self): return {'#': self.question_order} def uses_cases(self): if not self.app or isinstance(self.app, RemoteApp): return False form = self.app.get_form_by_xmlns(self.xmlns) if form and isinstance(form, Form): return bool(form.active_actions()) return False class FormDeidExportSchema(FormExportSchema):<|fim▁hole|> @classmethod def get_case(cls, doc, case_id): pass class CaseExportSchema(HQExportSchema): doc_type = 'SavedExportSchema' @property def filter(self): return SerializableFunction(default_case_filter) @property def domain(self): return self.index[0] @property def domain_obj(self): return Domain.get_by_name(self.domain) @property def case_type(self): return self.index[1] @property def applications(self): return self.domain_obj.full_applications(include_builds=False) @property def case_properties(self): props = set([]) for app in self.applications: prop_map = get_case_properties(app, [self.case_type], defaults=("name",)) props |= set(prop_map[self.case_type]) return props class FakeFormExportSchema(FakeSavedExportSchema): def remap_tables(self, tables): # kill the weird confusing stuff, and rename the main table to something sane tables = _apply_removal(tables, ('#|#export_tag|#', '#|location_|#', '#|history|#')) return _apply_mapping(tables, { '#': 'Forms', }) def _apply_mapping(export_tables, mapping_dict): def _clean(tabledata): def _clean_tablename(tablename): return mapping_dict.get(tablename, tablename) return (_clean_tablename(tabledata[0]), tabledata[1]) return map(_clean, export_tables) def _apply_removal(export_tables, removal_list): return [tabledata for tabledata in export_tables if not tabledata[0] in removal_list] class HQGroupExportConfiguration(CachedCouchDocumentMixin, GroupExportConfiguration): """ HQ's version of a group export, tagged with a domain """ domain = StringProperty() def get_custom_exports(self): def _rewrap(export): # custom wrap if relevant try: return { 'form': FormExportSchema, 'case': CaseExportSchema, }[export.type].wrap(export._doc) except KeyError: return export for custom in list(self.custom_export_ids): custom_export = self._get_custom(custom) if custom_export: yield _rewrap(custom_export) def exports_of_type(self, type): return self._saved_exports_from_configs([ config for config, schema in self.all_exports if schema.type == type ]) @property @memoized def form_exports(self): return self.exports_of_type('form') @property @memoized def case_exports(self): return self.exports_of_type('case') @classmethod def by_domain(cls, domain): return cache_core.cached_view(cls.get_db(), "groupexport/by_domain", key=domain, reduce=False, include_docs=True, wrapper=cls.wrap, ) @classmethod def get_for_domain(cls, domain): """ For when we only expect there to be one of these per domain, which right now is always. """ groups = cls.by_domain(domain) if groups: if len(groups) > 1: logging.error("Domain %s has more than one group export config! This is weird." % domain) return groups[0] return HQGroupExportConfiguration(domain=domain) @classmethod def add_custom_export(cls, domain, export_id): group = cls.get_for_domain(domain) if export_id not in group.custom_export_ids: group.custom_export_ids.append(export_id) group.save() return group @classmethod def remove_custom_export(cls, domain, export_id): group = cls.get_for_domain(domain) updated = False while export_id in group.custom_export_ids: group.custom_export_ids.remove(export_id) updated = True if updated: group.save() return group<|fim▁end|>
@property def transform(self): return SerializableFunction()
<|file_name|>sync.rs<|end_file_name|><|fim▁begin|>use std::io; use std::net::SocketAddr; use ldap::Ldap; use search::{Scope, DerefAliases, SearchEntry};<|fim▁hole|> use tokio_core::reactor::{Core, Handle}; pub struct LdapSync { inner: Ldap, core: Core, } impl LdapSync { pub fn connect(addr: &SocketAddr) -> Result<LdapSync, io::Error> { // TODO better error handling let mut core = Core::new().unwrap(); let handle = core.handle(); let ldapfut = Ldap::connect(addr, &handle); let ldap = try!(core.run(ldapfut)); Ok(LdapSync { inner: ldap, core: core }) } pub fn connect_ssl(addr: &str) -> Result<LdapSync, io::Error> { // TODO better error handling let mut core = Core::new().unwrap(); let handle = core.handle(); let ldapfut = Ldap::connect_ssl(addr, &handle); let ldap = try!(core.run(ldapfut)); Ok(LdapSync { inner: ldap, core: core }) } pub fn simple_bind(&mut self, dn: String, pw: String) -> io::Result<bool> { self.core.run(self.inner.simple_bind(dn, pw)) } pub fn search(&mut self, base: String, scope: Scope, deref: DerefAliases, typesonly: bool, filter: String, attrs: Vec<String>) -> io::Result<Vec<SearchEntry>> { self.core.run(self.inner.search(base, scope, deref, typesonly, filter, attrs)) } }<|fim▁end|>
<|file_name|>FindOverriders.java<|end_file_name|><|fim▁begin|>package edu.cmu.hcii.whyline.analysis; import java.util.SortedSet; import java.util.TreeSet; import edu.cmu.hcii.whyline.bytecode.MethodInfo; import edu.cmu.hcii.whyline.source.JavaSourceFile; import edu.cmu.hcii.whyline.source.Line; import edu.cmu.hcii.whyline.source.Token; import edu.cmu.hcii.whyline.ui.WhylineUI; /** * @author Andrew J. Ko * */ public class FindOverriders implements SearchResultsInterface { private final MethodInfo method; private final WhylineUI whylineUI; private final SortedSet<Token> overriders = new TreeSet<Token>(); public FindOverriders(WhylineUI whylineUI, MethodInfo method) { this.whylineUI = whylineUI; this.method = method; for(MethodInfo m : method.getOverriders()) { JavaSourceFile source = m.getClassfile().getSourceFile(); if(source != null) { Line line = source.getTokenForMethodName(m).getLine(); overriders.addAll(line.getTokensAfterFirstNonWhitespaceToken()); } } } <|fim▁hole|> public SortedSet<Token> getResults() { return overriders; } public boolean isDone() { return true; } }<|fim▁end|>
public String getResultsDescription() { return "overriders of " + method.getInternalName(); } public String getCurrentStatus() { return "Done."; }
<|file_name|>formatPhoneNumberDefaultMetadata.test.js<|end_file_name|><|fim▁begin|>import formatPhoneNumber, { formatPhoneNumberIntl } from './formatPhoneNumberDefaultMetadata' describe('formatPhoneNumberDefaultMetadata', () => { it('should format phone numbers', () => {<|fim▁hole|> formatPhoneNumber('+12133734253', 'NATIONAL').should.equal('(213) 373-4253') formatPhoneNumber('+12133734253', 'INTERNATIONAL').should.equal('+1 213 373 4253') formatPhoneNumberIntl('+12133734253').should.equal('+1 213 373 4253') }) })<|fim▁end|>
<|file_name|>data-mapping.js<|end_file_name|><|fim▁begin|>var DataMapping = require("montage-data/logic/service/data-mapping").DataMapping, ObjectDescriptor = require("montage-data/logic/model/object-descriptor").ObjectDescriptor; describe("A DataMapping", function() { function ClassA(a, b, c, d) { this.a = a; this.b = b; this.c = c; this.d = d; } function ClassB(a, b, c, d) { this.a = a; this.b = b; this.c = c; this.d = d; } it("can be created", function () { expect(new DataMapping()).toBeDefined(); }); it("copies raw data properties by default", function () { var object = {x: 42}, random = Math.random(), data = new ClassA(1, 2, object, random),<|fim▁hole|> }); });<|fim▁end|>
mapped = new ClassB(); new DataMapping().mapFromRawData(mapped, data); expect(mapped).toEqual(new ClassB(1, 2, object, random));
<|file_name|>api_boilerplate.py<|end_file_name|><|fim▁begin|>""" This module is responsible for doing all the authentication. Adapted from the Google API Documentation. """ from __future__ import print_function import os import httplib2 import apiclient import oauth2client try: import argparse flags = argparse.ArgumentParser( parents=[oauth2client.tools.argparser]).parse_args() except ImportError: flags = None SCOPES = 'https://www.googleapis.com/auth/drive' CLIENT_SECRET_FILE = 'client_secret.json' # Enter your project name here!! APPLICATION_NAME = 'API Project' def get_credentials(): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential.<|fim▁hole|> credential_dir = os.path.join(home_dir, '.gdrive-credentials-cache') if not os.path.exists(credential_dir): os.makedirs(credential_dir) credential_path = os.path.join(credential_dir, 'gdrive-credentials.json') store = oauth2client.file.Storage(credential_path) credentials = store.get() if not credentials or credentials.invalid: flow = oauth2client.client.flow_from_clientsecrets( CLIENT_SECRET_FILE, SCOPES) flow.user_agent = APPLICATION_NAME if flags: credentials = oauth2client.tools.run_flow(flow, store, flags) else: # Needed only for compatibility with Python 2.6 credentials = oauth2client.tools.run(flow, store) print('Storing credentials to ' + credential_path) return credentials credentials = get_credentials() http = credentials.authorize(httplib2.Http()) file_service = apiclient.discovery.build('drive', 'v3', http=http).files()<|fim▁end|>
""" home_dir = os.path.expanduser('~')
<|file_name|>dispersion.py<|end_file_name|><|fim▁begin|># Natural Language Toolkit: Dispersion Plots # # Copyright (C) 2001-2015 NLTK Project # Author: Steven Bird <[email protected]> # URL: <http://nltk.org/> # For license information, see LICENSE.TXT """ A utility for displaying lexical dispersion. """ def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"): """ Generate a lexical dispersion plot. :param text: The source text :type text: list(str) or enum(str)<|fim▁hole|> :param words: The target words :type words: list of str :param ignore_case: flag to set if case should be ignored when searching text :type ignore_case: bool """ try: from matplotlib import pylab except ImportError: raise ValueError('The plot function requires matplotlib to be installed.' 'See http://matplotlib.org/') text = list(text) words.reverse() if ignore_case: words_to_comp = list(map(str.lower, words)) text_to_comp = list(map(str.lower, text)) else: words_to_comp = words text_to_comp = text points = [(x,y) for x in range(len(text_to_comp)) for y in range(len(words_to_comp)) if text_to_comp[x] == words_to_comp[y]] if points: x, y = list(zip(*points)) else: x = y = () pylab.plot(x, y, "b|", scalex=.1) pylab.yticks(list(range(len(words))), words, color="b") pylab.ylim(-1, len(words)) pylab.title(title) pylab.xlabel("Word Offset") pylab.show() if __name__ == '__main__': import nltk.compat from nltk.corpus import gutenberg words = ['Elinor', 'Marianne', 'Edward', 'Willoughby'] dispersion_plot(gutenberg.words('austen-sense.txt'), words)<|fim▁end|>
<|file_name|>BooleanType.ts<|end_file_name|><|fim▁begin|>import {Equalitable} from '../helpers/Equalitable'; import {Cloneable} from '../helpers/Cloneable'; import {Comparable} from '../helpers/Comparable'; <|fim▁hole|>/** * Utility class for boolean type * * TODO: Add boolean operators like and, or, xor? * TODO: Change DEFAULT_VALUE, TRUE and FALSE types from boolean to BooleanType? */ export class BooleanType implements Equalitable, Cloneable, Comparable<boolean | BooleanType> { /** * Type for boolean * * @type {string} */ public static readonly TYPE: string = 'boolean'; /** * Default value * * @type {boolean} */ public static readonly DEFAULT_VALUE: boolean = false; /** * True value * * @type {boolean} */ public static readonly TRUE: boolean = true; /** * False value * * @type {boolean} */ public static readonly FALSE: boolean = false; /** * Value * * @type {boolean} */ private value: boolean; /** * Constructor method * * @param {boolean} value * @constructor */ public constructor(value: boolean) { this.value = value; } /** * Get value * * @return {boolean} */ public getValue(): boolean { return this.value; } /** * Set value * * @param {boolean} value * @return {BooleanType} */ public setValue(value: boolean): BooleanType { this.value = value; return this; } public clone(): BooleanType { return new BooleanType(this.value); } public equals(other: boolean | BooleanType): boolean { return this.value === (BooleanType.is(other) ? other : (<BooleanType> other).getValue()); } public compareTo(other: boolean | BooleanType): number { other = BooleanType.valueOf(other).getValue(); if(this.value && !other) { return 1; } if(!this.value && other) { return -1; } return 0; } /** * Transform value to string * * @return {string} */ public toString(): string { return this.value.toString(); } /** * Get if value is boolean * * @param {*} value * @return {boolean} */ public static is(value: any): boolean { return typeof value === this.TYPE; } /** * Parse boolean * * @param {*} value * @return {boolean} */ public static parse(value: any): boolean { if(value instanceof BooleanType) { return value.getValue(); } if(value === true || value === 'true') { return true; } //TODO: Use types to check this if(typeof value === 'number' || (!isNaN(value) && typeof value === 'string')) { return value != 0 && !isNaN(<any> value); } return BooleanType.DEFAULT_VALUE; } /** * Get boolean value of some value * * @param {*} value * @return {BooleanType} */ public static valueOf(value: any): BooleanType { return new BooleanType(BooleanType.parse(value)); } /** * Get string representation of value * * @param {boolean | BooleanType} value * @return {string} * TODO: This is realy necessary? */ public static toString(value: boolean | BooleanType): string { return value.toString(); } }<|fim▁end|>
<|file_name|>factories.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import unicode_literals import factory from udata.factories import ModelFactory from .models import Dataset, Resource, Checksum, CommunityResource, License from udata.core.organization.factories import OrganizationFactory from udata.core.spatial.factories import SpatialCoverageFactory class DatasetFactory(ModelFactory): class Meta: model = Dataset title = factory.Faker('sentence') description = factory.Faker('text') frequency = 'unknown' class Params: geo = factory.Trait( spatial=factory.SubFactory(SpatialCoverageFactory) ) visible = factory.Trait( resources=factory.LazyAttribute(lambda o: [ResourceFactory()]) ) org = factory.Trait( organization=factory.SubFactory(OrganizationFactory), ) class VisibleDatasetFactory(DatasetFactory): @factory.lazy_attribute def resources(self): return [ResourceFactory()] class ChecksumFactory(ModelFactory): class Meta:<|fim▁hole|> type = 'sha1' value = factory.Faker('sha1') class BaseResourceFactory(ModelFactory): title = factory.Faker('sentence') description = factory.Faker('text') filetype = 'file' url = factory.Faker('url') checksum = factory.SubFactory(ChecksumFactory) mime = factory.Faker('mime_type', category='text') filesize = factory.Faker('pyint') class CommunityResourceFactory(BaseResourceFactory): class Meta: model = CommunityResource class ResourceFactory(BaseResourceFactory): class Meta: model = Resource class LicenseFactory(ModelFactory): class Meta: model = License id = factory.Faker('unique_string') title = factory.Faker('sentence') url = factory.Faker('uri')<|fim▁end|>
model = Checksum
<|file_name|>UProveToken.java<|end_file_name|><|fim▁begin|>//********************************************************* // // Copyright (c) Microsoft. All rights reserved. // This code is licensed under the Apache License Version 2.0. // THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF // ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY // IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR // PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. // //********************************************************* package com.microsoft.uprove; import java.util.Arrays; /** * Specifies a U-Prove token. */ public class UProveToken { private byte[] issuerParametersUID; private byte[] publicKey; private byte[] tokenInformation; private byte[] proverInformation; private byte[] sigmaZ; private byte[] sigmaC; private byte[] sigmaR; private boolean isDeviceProtected = false; /** * Constructs a new U-Prove token. */ public UProveToken() { super(); } /** * Constructs a new U-Prove token. * @param issuerParametersUID an issuer parameters UID. * @param publicKey a public key. * @param tokenInformation a token information value. * @param proverInformation a prover information value. * @param sigmaZ a sigmaZ value. * @param sigmaC a sigmaC value. * @param sigmaR a sigmaR value. * @param isDeviceProtected indicates if the token is Device-protected. */ public UProveToken(byte[] issuerParametersUID, byte[] publicKey, byte[] tokenInformation, byte[] proverInformation, byte[] sigmaZ, byte[] sigmaC, byte[] sigmaR, boolean isDeviceProtected) { super(); this.issuerParametersUID = issuerParametersUID; this.publicKey = publicKey; this.tokenInformation = tokenInformation; this.proverInformation = proverInformation; this.sigmaZ = sigmaZ; this.sigmaC = sigmaC; this.sigmaR = sigmaR; this.isDeviceProtected = isDeviceProtected; } /** * Gets the issuer parameters UID value. * @return the issuerParameters UID value. */ public byte[] getIssuerParametersUID() { return issuerParametersUID; } /** * Sets the issuer parameters UID value. * @param issuerParametersUID the issuerParameters UID value to set. */ public void setIssuerParametersUID(byte[] issuerParametersUID) { this.issuerParametersUID = issuerParametersUID; } /** * Gets the public key value. * @return the publicKey value. */ public byte[] getPublicKey() { return publicKey; } /** * Sets the public key value. * @param publicKey the public key value to set. */ public void setPublicKey(byte[] publicKey) { this.publicKey = publicKey; } /** * Gets the token information value. * @return the token information value. */ public byte[] getTokenInformation() { return tokenInformation; } /** * Sets the token information value. * @param tokenInformation the token information value to set. */ public void setTokenInformation(byte[] tokenInformation) { this.tokenInformation = tokenInformation; } /** * Gets the prover information value. * @return the prover information value. */ public byte[] getProverInformation() { return proverInformation; } /** * Sets the prover information value. * @param proverInformation the prover information value to set. */ <|fim▁hole|> this.proverInformation = proverInformation; } /** * Gets the sigmaZ value. * @return the sigmaZ value. */ public byte[] getSigmaZ() { return sigmaZ; } /** * Sets the sigmaZ value. * @param sigmaZ the sigmaZ value to set. */ public void setSigmaZ(byte[] sigmaZ) { this.sigmaZ = sigmaZ; } /** * Gets the sigmaC value. * @return the sigmaC value. */ public byte[] getSigmaC() { return sigmaC; } /** * Sets the sigmaC value. * @param sigmaC the sigmaC value to set. */ public void setSigmaC(byte[] sigmaC) { this.sigmaC = sigmaC; } /** * Gets the sigmaR value. * @return the sigmaR value. */ public byte[] getSigmaR() { return sigmaR; } /** * Sets the sigmaR value. * @param sigmaR the sigmaR value to set. */ public void setSigmaR(byte[] sigmaR) { this.sigmaR = sigmaR; } /** * Returns true if the token is Device-protected, false otherwise. * @return the Device-protected boolean. */ boolean isDeviceProtected() { return isDeviceProtected; } /** * Sets the boolean indicating if the token is Device-protected. * @param isDeviceProtected true if the token is Device-protected. */ void setIsDeviceProtected(boolean isDeviceProtected) { this.isDeviceProtected = isDeviceProtected; } /** * Indicates whether some other object is "equal to" this one. * @param o the reference object with which to compare. * @return <code>true</code> if this object is the same as the * <code>o</code> argument; <code>false</code> otherwise. */ public boolean equals(final Object o) { if (o == this) { return true; } if (!(o instanceof UProveToken)) { return false; } UProveToken upt = (UProveToken) o; return Arrays.equals(this.issuerParametersUID, upt.issuerParametersUID) && Arrays.equals(this.publicKey, upt.publicKey) && Arrays.equals(this.tokenInformation, upt.tokenInformation) && Arrays.equals(this.proverInformation, upt.proverInformation) && Arrays.equals(this.sigmaZ, upt.sigmaZ) && Arrays.equals(this.sigmaC, upt.sigmaC) && Arrays.equals(this.sigmaR, upt.sigmaR) && this.isDeviceProtected == upt.isDeviceProtected; } /** * Returns a hash code value for the object. * @return a hash code value for the object. */ public int hashCode() { int result = 237; result = 201 * result + Arrays.hashCode(this.issuerParametersUID); result = 201 * result + Arrays.hashCode(this.publicKey); result = 201 * result + Arrays.hashCode(this.tokenInformation); result = 201 * result + Arrays.hashCode(this.proverInformation); result = 201 * result + Arrays.hashCode(this.sigmaZ); result = 201 * result + Arrays.hashCode(this.sigmaC); result = 201 * result + Arrays.hashCode(this.sigmaR); result = result + (this.isDeviceProtected ? 201 : 0); return result; } }<|fim▁end|>
public void setProverInformation(byte[] proverInformation) {
<|file_name|>prng4.js.uncompressed.js<|end_file_name|><|fim▁begin|>// AMD-ID "dojox/math/random/prng4" define("dojox/math/random/prng4", ["dojo", "dojox"], function(dojo, dojox) { <|fim▁hole|>// All Rights Reserved. // See "LICENSE-BigInteger" for details. // prng4.js - uses Arcfour as a PRNG function Arcfour() { this.i = 0; this.j = 0; this.S = new Array(256); } dojo.extend(Arcfour, { init: function(key){ // summary: // Initialize arcfour context // key: int[] // an array of ints, each from [0..255] var i, j, t, S = this.S, len = key.length; for(i = 0; i < 256; ++i){ S[i] = i; } j = 0; for(i = 0; i < 256; ++i){ j = (j + S[i] + key[i % len]) & 255; t = S[i]; S[i] = S[j]; S[j] = t; } this.i = 0; this.j = 0; }, next: function(){ var t, i, j, S = this.S; this.i = i = (this.i + 1) & 255; this.j = j = (this.j + S[i]) & 255; t = S[i]; S[i] = S[j]; S[j] = t; return S[(t + S[i]) & 255]; } }); dojox.math.random.prng4 = function(){ return new Arcfour(); }; // Pool size must be a multiple of 4 and greater than 32. // An array of bytes the size of the pool will be passed to init() dojox.math.random.prng4.size = 256; return dojox.math.random.prng4; });<|fim▁end|>
dojo.getObject("math.random.prng4", true, dojox); // Copyright (c) 2005 Tom Wu
<|file_name|>disablenonworkingunits.cpp<|end_file_name|><|fim▁begin|>#include "disablenonworkingunits.h" #include "wololo/datPatch.h" namespace wololo { void disableNonWorkingUnitsPatch(genie::DatFile *aocDat, std::map<int, std::string> *langReplacement) {<|fim▁hole|> for (size_t civIndex = 0; civIndex < aocDat->Civs.size(); civIndex++) { aocDat->Civs[civIndex].Units[1119].HideInEditor = 1; aocDat->Civs[civIndex].Units[1145].HideInEditor = 1; aocDat->Civs[civIndex].Units[1147].HideInEditor = 1; aocDat->Civs[civIndex].Units[1221].HideInEditor = 1; aocDat->Civs[civIndex].Units[1401].HideInEditor = 1; for (size_t unitIndex = 1224; unitIndex <= 1390; unitIndex++) { aocDat->Civs[civIndex].Units[unitIndex].HideInEditor = 1; } } } DatPatch disableNonWorkingUnits = { &disableNonWorkingUnitsPatch, "Hide units in the scenario editor" }; }<|fim▁end|>
/* * Disabling units that are not supposed to show in the scenario editor */
<|file_name|>courses.py<|end_file_name|><|fim▁begin|># pylint: disable=missing-docstring # pylint: disable=redefined-outer-name # pylint: disable=unused-argument from lettuce import step, world from common import * ############### ACTIONS #################### @step('There are no courses$') def no_courses(step):<|fim▁hole|> @step('I click the New Course button$') def i_click_new_course(step): world.css_click('.new-course-button') @step('I fill in the new course information$') def i_fill_in_a_new_course_information(step): fill_in_course_info() @step('I create a course with "([^"]*)", "([^"]*)", "([^"]*)", and "([^"]*)"') def i_create_course(step, name, org, number, run): fill_in_course_info(name=name, org=org, num=number, run=run) @step('I create a new course$') def i_create_a_course(step): create_a_course() @step('I click the course link in Studio Home$') def i_click_the_course_link_in_studio_home(step): # pylint: disable=invalid-name course_css = 'a.course-link' world.css_click(course_css) @step('I see an error about the length of the org/course/run tuple') def i_see_error_about_length(step): assert world.css_has_text( '#course_creation_error', 'The combined length of the organization, course number, ' 'and course run fields cannot be more than 65 characters.' ) ############ ASSERTIONS ################### @step('the Courseware page has loaded in Studio$') def courseware_page_has_loaded_in_studio(step): course_title_css = 'span.course-title' assert world.is_css_present(course_title_css) @step('I see the course listed in Studio Home$') def i_see_the_course_in_studio_home(step): course_css = 'h3.class-title' assert world.css_has_text(course_css, world.scenario_dict['COURSE'].display_name) @step('I am on the "([^"]*)" tab$') def i_am_on_tab(step, tab_name): header_css = 'div.inner-wrapper h1' assert world.css_has_text(header_css, tab_name) @step('I see a link for adding a new section$') def i_see_new_section_link(step): link_css = '.outline .button-new' assert world.css_has_text(link_css, 'New Section')<|fim▁end|>
world.clear_courses() create_studio_user()
<|file_name|>elementexe.cpp<|end_file_name|><|fim▁begin|>/* * Script element for command execution. * Copyright (C) 2009-2010 Petr Kubanek <[email protected]> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "elementexe.h" #include "rts2script/execcli.h" #include "rts2script/script.h" using namespace rts2script; using namespace rts2image; ConnExecute::ConnExecute (Execute *_masterElement, rts2core::Block *_master, const char *_exec):ConnExe (_master, _exec, true) { masterElement = _masterElement; exposure_started = 0; keep_next_image = false; waitTargetMove = false; } ConnExecute::~ConnExecute () { if (masterElement != NULL) { if (masterElement->getClient () != NULL) masterElement->getClient ()->postEvent (new rts2core::Event (EVENT_COMMAND_OK)); masterElement->deleteExecConn (); } for (std::list <Image *>::iterator iter = images.begin (); iter != images.end (); iter++) { logStream (MESSAGE_WARNING) << "removing image " << (*iter)->getAbsoluteFileName () << ", you probably don't want this - please make sure images are processed in script" << sendLog; (*iter)->deleteImage (); deleteImage (*iter); } } void ConnExecute::postEvent (rts2core::Event *event) { switch (event->getType ()) { case EVENT_MOVE_OK: if (waitTargetMove) writeToProcess ("0"); if (masterElement && masterElement->getConnection ()) logStream (MESSAGE_DEBUG) << masterElement->getConnection ()->getName () << " elementexe get EVENT_MOVE_OK" << sendLog; break; case EVENT_MOVE_FAILED: if (waitTargetMove) { writeToProcess ("! move failed"); writeToProcess ("ERR"); } if (masterElement && masterElement->getConnection ()) logStream (MESSAGE_DEBUG) << masterElement->getConnection ()->getName () << " elementexe get EVENT_MOVE_FAILED" << sendLog; break; } ConnExe::postEvent (event); } void ConnExecute::notActive () { // waiting for image - this will not be returned switch (exposure_started) { case 1: writeToProcess ("& exposure interrupted"); exposure_started = -5; break; case 2: writeToProcess ("& readout interruped"); exposure_started = -6; break; } ConnExe::notActive (); } void ConnExecute::processCommand (char *cmd) { char *imagename; char *expandPath; char *device; char *value; char *operat; char *operand; char *comm; if (!strcasecmp (cmd, "exposure")) { if (!checkActive (true)) return; if (masterElement == NULL || masterElement->getConnection () == NULL || masterElement->getClient () == NULL) return; masterElement->getConnection ()->queCommand (new rts2core::CommandExposure (getMaster (), (rts2core::DevClientCamera *) masterElement->getClient (), BOP_EXPOSURE)); exposure_started = 1; } else if (!strcasecmp (cmd, "exposure_wfn") || !strcasecmp (cmd, "exposure_overwrite")) { if (!checkActive (true)) return; if (paramNextString (&imagename)) return; if (masterElement == NULL || masterElement->getConnection () == NULL || masterElement->getClient () == NULL) return; ((rts2script::DevClientCameraExec *) masterElement->getClient ())->setExpandPath (imagename); ((rts2script::DevClientCameraExec *) masterElement->getClient ())->setOverwrite (!strcasecmp (cmd, "exposure_overwrite")); masterElement->getConnection ()->queCommand (new rts2core::CommandExposure (getMaster (), (rts2core::DevClientCamera *) masterElement->getClient (), BOP_EXPOSURE)); keep_next_image = true; exposure_started = 1; } else if (!strcasecmp (cmd, "progress")) { double start,end; if (paramNextDouble (&start) || paramNextDouble (&end) || !paramEnd ()) return; if (masterElement == NULL || masterElement->getClient () == NULL) return; ((DevClientCameraExec *) masterElement->getClient ())->scriptProgress (start, end); } else if (!strcasecmp (cmd, "radec")) { if (!checkActive ()) return; struct ln_equ_posn radec; if (paramNextHMS (&radec.ra) || paramNextDMS (&radec.dec) || !paramEnd ()) return; master->postEvent (new rts2core::Event (EVENT_CHANGE_TARGET, (void *) &radec)); } else if (!strcasecmp (cmd, "newobs")) { if (!checkActive ()) return; struct ln_equ_posn radec; if (paramNextHMS (&radec.ra) || paramNextDMS (&radec.dec) || !paramEnd ()) return; master->postEvent (new rts2core::Event (EVENT_NEW_TARGET, (void *) &radec)); } else if (!strcasecmp (cmd, "altaz")) { if (!checkActive (false)) return; struct ln_hrz_posn hrz; if (paramNextDMS (&hrz.alt) || paramNextDMS (&hrz.az) || !paramEnd ()) return; master->postEvent (new rts2core::Event (EVENT_CHANGE_TARGET_ALTAZ, (void *) &hrz)); } else if (!strcasecmp (cmd, "newaltaz")) { if (!checkActive (false)) return; struct ln_hrz_posn hrz; if (paramNextDMS (&hrz.alt) || paramNextDMS (&hrz.az) || !paramEnd ()) return; master->postEvent (new rts2core::Event (EVENT_NEW_TARGET_ALTAZ, (void *) &hrz)); } else if (!strcmp (cmd, "dark")) { if (paramNextString (&imagename)) return; std::list <Image *>::iterator iter = findImage (imagename); if (iter != images.end ()) { (*iter)->toDark (); writeToProcess ((*iter)->getAbsoluteFileName ()); if (masterElement != NULL && masterElement->getClient () != NULL) ((DevClientCameraExec *) masterElement->getClient ())->queImage (*iter); deleteImage (*iter); images.erase (iter); } else { logStream (MESSAGE_ERROR) << "cannot move " << imagename << " to dark path, image was probably already handled (renamed,..)" << sendLog; } } else if (!strcmp (cmd, "flat")) {<|fim▁hole|> return; std::list <Image *>::iterator iter = findImage (imagename); if (iter != images.end ()) { (*iter)->toFlat (); writeToProcess ((*iter)->getAbsoluteFileName ()); if (masterElement != NULL && masterElement->getClient () != NULL) ((DevClientCameraExec *) masterElement->getClient ())->queImage (*iter); deleteImage (*iter); images.erase (iter); } else { logStream (MESSAGE_ERROR) << "cannot move " << imagename << " to flat path, image was probably already handled (renamed,..)" << sendLog; } } else if (!strcmp (cmd, "archive")) { if (paramNextString (&imagename)) return; std::list <Image *>::iterator iter = findImage (imagename); if (iter != images.end ()) { (*iter)->toArchive (); writeToProcess ((*iter)->getAbsoluteFileName ()); deleteImage (*iter); images.erase (iter); } else { logStream (MESSAGE_ERROR) << "cannot move " << imagename << " to archive path, image was probably already handled (renamed,..)" << sendLog; } } else if (!strcmp (cmd, "trash")) { if (paramNextString (&imagename)) return; std::list <Image *>::iterator iter = findImage (imagename); if (iter != images.end ()) { (*iter)->toTrash (); writeToProcess ((*iter)->getAbsoluteFileName ()); deleteImage (*iter); images.erase (iter); } else { logStream (MESSAGE_ERROR) << "cannot move " << imagename << " to trash path, image was probably already handled (renamed,..)" << sendLog; } } else if (!strcmp (cmd, "rename")) { if (paramNextString (&imagename) || paramNextString (&expandPath)) return; try { std::list <Image *>::iterator iter = findImage (imagename); if (iter != images.end ()) { (*iter)->renameImageExpand (expandPath); writeToProcess ((*iter)->getAbsoluteFileName ()); deleteImage (*iter); images.erase (iter); } else { logStream (MESSAGE_ERROR) << "cannot rename " << imagename << ", image was probably already handled (renamed,..)" << sendLog; } } catch (rts2core::Error &er) { writeToProcess ((std::string ("E failed ") + er.what ()).c_str ()); } } else if (!strcmp (cmd, "move")) { if (paramNextString (&imagename) || paramNextString (&expandPath)) return; std::list <Image *>::iterator iter = findImage (imagename); if (iter != images.end ()) { (*iter)->renameImageExpand (expandPath); writeToProcess ((*iter)->getAbsoluteFileName ()); (*iter)->deleteFromDB (); deleteImage (*iter); images.erase (iter); } else { logStream (MESSAGE_ERROR) << "cannot move " << imagename << ", image was probably already handled (renamed,..)" << sendLog; } } else if (!strcmp (cmd, "copy")) { if (paramNextString (&imagename) || paramNextString (&expandPath)) return; std::list <Image *>::iterator iter = findImage (imagename); if (iter != images.end ()) { (*iter)->copyImageExpand (expandPath); writeToProcess ((*iter)->getAbsoluteFileName ()); } else { logStream (MESSAGE_ERROR) << "cannot copy " << imagename << ", image was probably already handled (renamed,..)" << sendLog; } } else if (!strcmp (cmd, "delete")) { if (paramNextString (&imagename)) return; std::list <Image *>::iterator iter = findImage (imagename); if (iter != images.end ()) { (*iter)->deleteImage (); deleteImage (*iter); images.erase (iter); } } else if (!strcmp (cmd, "process")) { if (paramNextString (&imagename) || masterElement == NULL || masterElement->getClient () == NULL) return; std::list <Image *>::iterator iter = findImage (imagename); if (iter != images.end ()) { ((DevClientCameraExec *) masterElement->getClient ())->queImage (*iter); deleteImage (*iter); images.erase (iter); } else { logStream (MESSAGE_ERROR) << "cannot process " << imagename << ", image was probably already handled (renamed,..)" << sendLog; } } else if (!strcmp (cmd, "?")) { if (paramNextString (&value) || masterElement == NULL || masterElement->getConnection () == NULL) return; rts2core::Value *val = masterElement->getConnection()->getValue (value); if (val) { writeToProcess (val->getValue ()); return; } writeToProcess ("ERR"); } else if (!strcmp (cmd, "command")) { if (!checkActive (false)) return; if ((comm = paramNextWholeString ()) == NULL || masterElement == NULL || masterElement->getConnection () == NULL) return; masterElement->getConnection ()->queCommand (new rts2core::Command (getMaster (), comm)); } else if (!strcmp (cmd, "VT")) { if (!checkActive (false)) return; if (paramNextString (&device) || paramNextString (&value) || paramNextString (&operat) || (operand = paramNextWholeString ()) == NULL || masterElement == NULL || masterElement->getClient () == NULL) return; int deviceTypeNum = getDeviceType (device); rts2core::CommandChangeValue cmdch (masterElement->getClient (), std::string (value), *operat, std::string (operand), true); getMaster ()->queueCommandForType (deviceTypeNum, cmdch); } else if (!strcmp (cmd, "value")) { if (paramNextString (&value) || paramNextString (&operat) || (operand = paramNextWholeString ()) == NULL || masterElement == NULL || masterElement->getConnection () == NULL || masterElement->getClient () == NULL) return; masterElement->getConnection ()->queCommand (new rts2core::CommandChangeValue (masterElement->getClient (), std::string (value), *operat, std::string (operand), true)); } else if (!strcmp (cmd, "device_by_type")) { if (paramNextString (&device)) return; rts2core::connections_t::iterator iter = getMaster ()->getConnections ()->begin (); getMaster ()->getOpenConnectionType (getDeviceType (device), iter); if (iter != getMaster ()->getConnections ()->end ()) writeToProcess ((*iter)->getName ()); else writeToProcess ("! cannot find device with given name"); } else if (!strcmp (cmd, "end_script")) { masterElement->requestEndScript (); notActive (); } else if (!strcmp (cmd, "end_target")) { notActive (); master->postEvent (new rts2core::Event (EVENT_STOP_OBSERVATION)); } else if (!strcmp (cmd, "stop_target")) { notActive (); master->postEvent (new rts2core::Event (EVENT_STOP_TARGET)); } else if (!strcmp (cmd, "wait_target_move")) { if (masterElement->getTarget ()) { if (masterElement->getTarget ()->wasMoved ()) { writeToProcess ("0"); } else { waitTargetMove = true; } } else { writeToProcess ("! there isn't target to wait for"); writeToProcess ("ERR"); } } else if (!strcmp (cmd, "target_disable")) { if (masterElement->getTarget ()) { masterElement->getTarget ()->setTargetEnabled (false); masterElement->getTarget ()->save (true); } } else if (!strcmp (cmd, "target_tempdisable")) { int ti; if (paramNextInteger (&ti) || masterElement->getTarget () == NULL) return; time_t now; time (&now); now += ti; masterElement->getTarget ()->setNextObservable (&now); masterElement->getTarget ()->save (true); } else if (!strcmp (cmd, "loopcount")) { std::ostringstream os; if (masterElement == NULL || masterElement->getScript () == NULL) os << "-1"; else os << masterElement->getScript ()->getLoopCount (); writeToProcess (os.str ().c_str ()); } else if (!strcmp (cmd, "run_device")) { if (masterElement == NULL || masterElement->getConnection () == NULL) writeToProcess ("& not active"); else writeToProcess (masterElement->getConnection ()->getName ()); } else { ConnExe::processCommand (cmd); } } void ConnExecute::connectionError (int last_data_size) { rts2core::ConnFork::connectionError (last_data_size); // inform master to delete us.. if (masterElement != NULL) { if (masterElement->getClient () != NULL) masterElement->getClient ()->postEvent (new rts2core::Event (EVENT_COMMAND_OK)); if (masterElement) masterElement->deleteExecConn (); } masterElement = NULL; } void ConnExecute::errorReported (int current_state, int old_state) { switch (exposure_started) { case 0: writeToProcess ("! error detected while running the script"); break; case 1: writeToProcess ("exposure_failed"); exposure_started = -1; break; case 2: writeToProcess ("! device failed"); writeToProcess ("ERR"); exposure_started = -2; break; } } void ConnExecute::exposureEnd (bool expectImage) { if (exposure_started == 1) { if (expectImage) { writeToProcess ("exposure_end"); exposure_started = 2; } else { writeToProcess ("exposure_end_noimage"); exposure_started = 0; } } else { logStream (MESSAGE_WARNING) << "script received end-of-exposure without starting it. This probably signal out-of-sync communication between executor and camera" << sendLog; } } void ConnExecute::exposureFailed () { switch (exposure_started) { case 1: writeToProcess ("exposure_failed"); exposure_started = -3; break; case 2: writeToProcess ("! exposure failed"); writeToProcess ("ERR"); exposure_started = -4; break; default: logStream (MESSAGE_WARNING) << "script received failure of exposure without starting one. This probably signal out-of-sync communication" << sendLog; } } int ConnExecute::processImage (Image *image) { if (exposure_started == 2) { std::string imgn = image->getAbsoluteFileName (); if (keep_next_image) { keep_next_image = false; } else { images.push_back (image); } image->saveImage (); writeToProcess ((std::string ("image ") + imgn).c_str ()); exposure_started = 0; } else { logStream (MESSAGE_WARNING) << "script executes method to start image processing without trigerring an exposure (" << exposure_started << ")" << sendLog; return -1; } return 1; } bool ConnExecute::knowImage (Image * image) { return (std::find (images.begin (), images.end (), image) != images.end ()); } std::list <Image *>::iterator ConnExecute::findImage (const char *path) { std::list <Image *>::iterator iter; for (iter = images.begin (); iter != images.end (); iter++) { if (!strcmp (path, (*iter)->getAbsoluteFileName ())) return iter; } return iter; } Execute::Execute (Script * _script, rts2core::Block * _master, const char *_exec, Rts2Target *_target): Element (_script) { connExecute = NULL; client = NULL; master = _master; exec = _exec; target = _target; endScript = false; } Execute::~Execute () { if (connExecute) { errno = 0; connExecute->nullMasterElement (); connExecute->endConnection (); deleteExecConn (); } client = NULL; } void Execute::errorReported (int current_state, int old_state) { if (connExecute) { connExecute->errorReported (current_state, old_state); } Element::errorReported (current_state, old_state); } void Execute::exposureEnd (bool expectImage) { if (connExecute) { connExecute->exposureEnd (expectImage); return; } Element::exposureEnd (expectImage); } void Execute::exposureFailed () { if (connExecute) { connExecute->exposureFailed (); return; } Element::exposureFailed (); } void Execute::notActive () { if (connExecute) connExecute->notActive (); } int Execute::processImage (Image *image) { if (connExecute) return connExecute->processImage (image); return Element::processImage (image); } bool Execute::knowImage (Image *image) { if (connExecute) return connExecute->knowImage (image); return Element::knowImage (image); } int Execute::defnextCommand (rts2core::DevClient * _client, rts2core::Command ** new_command, char new_device[DEVICE_NAME_SIZE]) { if (connExecute == NULL) { connExecute = new ConnExecute (this, master, exec); int ret = connExecute->init (); if (ret) { logStream (MESSAGE_ERROR) << "Cannot execute script control command, ending script. Script will not be executed again." << sendLog; return NEXT_COMMAND_STOP_TARGET; } client = _client; client->getMaster ()->addConnection (connExecute); } if (endScript) { connExecute->nullMasterElement (); connExecute = NULL; return NEXT_COMMAND_END_SCRIPT; } if (connExecute->getConnState () == CONN_DELETE) { connExecute->nullMasterElement (); // connExecute will be deleted by rts2core::Block holding connection connExecute = NULL; client = NULL; return NEXT_COMMAND_NEXT; } return NEXT_COMMAND_KEEP; }<|fim▁end|>
if (paramNextString (&imagename))
<|file_name|>gallery-tipsy.js<|end_file_name|><|fim▁begin|>var Lang = Y.Lang, getCN = Y.ClassNameManager.getClassName, //HTML5 Data Attributes DATA_CONTENT = 'data-content', DATA_PLACEMENT = 'data-placement', //Classes TIPSY = 'tipsy', FADE = 'fade', IN = 'in', CLASSES = { fade: getCN(TIPSY, FADE), fadeIn: getCN(TIPSY, IN) }; Y.Tipsy = Y.Base.create("tipsy", Y.Widget, [Y.WidgetPointer, Y.WidgetPosition, Y.WidgetPositionAlign, Y.WidgetStack], { _handles : [], _timer : null, //constructor initializer : function(config) { }, //clean up on destruction destructor : function() { Y.each(this._handles, function(v, k, o) { v.detach(); }); }, renderUI : function () { this.get('boundingBox').addClass(CLASSES.fade).setAttribute('role', 'tooltip'); }, bindUI : function () { var del = this.get('delegate'), selector = this.get('selector'), showOn = this.get('showOn'); //showOn = ['event1', 'event2'] if (Lang.isArray(showOn) || Lang.isString(showOn)) { this._handles.push(del.delegate(showOn, this._handleDelegateStart, selector, this)); } //showOn = { events: ['event1', 'event2'] } else if (Lang.isObject(showOn) && !showOn.node) { this._handles.push(del.delegate(showOn.events, this._handleDelegateStart, selector, this)); } //showOn = { node: '#selector', events: ['event1', 'event2'] } else if (Lang.isObject(showOn) && showOn.node && showOn.events) { this._handles.push(Y.one(showOn.selector).on(showOn.events, this._handleDelegateStart, this)); } else { Y.log('The showOn attribute should contain an array of events, or an object with keys "selector" (string), and "events" (array of events)'); } }, _handleDelegateStart : function (e) { var del = this.get('delegate'), delay = this.get('delay' ), selector = this.get('selector'), hideOn = this.get('hideOn'), node = e.currentTarget; if (Lang.isArray(hideOn) || Lang.isString(hideOn)) { this._handles.push(del.delegate(hideOn, this._handleDelegateEnd, selector, this)); } //hideOn = { events: ['event1', 'event2'] } else if (Lang.isObject(hideOn) && !hideOn.selector) { this._handles.push(del.delegate(hideOn.events, this._handleDelegateEnd, selector, this)); } //hideOn = { node: '#selector', events: ['event1', 'event2'] } else if (Lang.isObject(hideOn) && hideOn.selector && hideOn.events) { this._handles.push(Y.one(hideOn.selector).on(hideOn.events, this._handleDelegateEnd, this)); } else { Y.log('The hideOn attribute should contain an array of events, or an object with keys "selector" (string), and "events" (array of events)'); } if (delay) { this._timer = Y.later(delay*1000, this, 'showTooltip', node); } else { this.showTooltip(node); } }, _handleDelegateEnd: function (e) { this.hideTooltip(); if (this._timer) { this._timer.cancel(); this._timer = null; } }, showTooltip : function (node) { this._setTooltipContent(node); this._alignTooltip(node); this.alignPointer(node); this._showTooltip(); this.get('boundingBox').addClass(CLASSES.fadeIn).setAttribute('aria-hidden', 'false'); node.setAttribute('aria-describedby', this.get('boundingBox').getAttribute('id')); }, _showTooltip: function () { this.set('visible', true); }, hideTooltip : function () { this.get('boundingBox').removeClass(CLASSES.fadeIn).setAttrs({ 'aria-hidden': 'true', //clear out all inline styles 'styles': '' }); this._hideTooltip(); }, _hideTooltip: function () { this.set('visible', false); }, _setTooltipContent: function (node) { var content = (node.hasAttribute(DATA_CONTENT)) ? node.getAttribute(DATA_CONTENT) : this.get('content'), contentBox = this.get('contentBox'); contentBox.setHTML(content); }, _alignTooltip : function (node) { var placement = (node.hasAttribute(DATA_PLACEMENT)) ? node.getAttribute(DATA_PLACEMENT) : this.get('placement'); switch (placement) { case "above": this.align(node, ["bc", "tc"]); break; case "left": this.align(node, ["rc", "lc"]); break; case "below": this.align(node, ["tc", "bc"]); break; case "right": this.align(node, ["lc", "rc"]); break; default: break; } } }, { NS : "tipsy", ATTRS : { content : { value : '' }, selector: { value: null }, zIndex: { value: 2 }, delay: { value: 0 }, showOn: { value: ['mouseover', 'touchstart', 'focus'] },<|fim▁hole|> hideOn: { value: ['mouseout', 'touchend', 'blur'] }, delegate: { value: null, setter: function(val) { return Y.one(val) || Y.one("document"); } } } });<|fim▁end|>
<|file_name|>_templateitemname.py<|end_file_name|><|fim▁begin|>import _plotly_utils.basevalidators class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator): def __init__(<|fim▁hole|> **kwargs ): super(TemplateitemnameValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "colorbars"), **kwargs )<|fim▁end|>
self, plotly_name="templateitemname", parent_name="histogram2d.colorbar.tickformatstop",
<|file_name|>errors.go<|end_file_name|><|fim▁begin|>// Copyright 2015 Bowery, Inc. package errors import ( "errors" "fmt" "reflect" "runtime" "strconv" "strings" ) // New creates a new error, this solves issue of name collision with // errors pkg. func New(args ...interface{}) error { return errors.New(strings.TrimRight(fmt.Sprintln(args...), "\n")) } // Newf creates a new error, from an existing error template. func Newf(format string, args ...interface{}) error { return fmt.Errorf(format, args...) } // StackError is an error with stack information. type StackError struct { Err error Trace *Trace } // IsStackError returns the error as a StackError if it's a StackError, nil // otherwise. func IsStackError(err error) *StackError { se, ok := err.(*StackError) if ok { return se } return nil } // NewStackError creates a stack error including the stack. func NewStackError(err error) error { se := &StackError{ Err: err, Trace: &Trace{ Frames: make([]*Frame, 0), Exception: &Exception{Message: err.Error(), Class: errClass(err)}, }, } // Get stack frames excluding the current one. for i := 1; ; i++ { pc, file, line, ok := runtime.Caller(i) if !ok { // Couldn't get another frame, so we're finished. break } f := &Frame{File: file, Line: line, Method: routineName(pc)} se.Trace.Frames = append(se.Trace.Frames, f) } return se } func (se *StackError) Error() string { return se.Err.Error() } // Stack prints the stack trace in a readable format. func (se *StackError) Stack() string { stack := "" for i, frame := range se.Trace.Frames { stack += strconv.Itoa(i+1) + ": File \"" + frame.File + "\" line " stack += strconv.Itoa(frame.Line) + " in " + frame.Method + "\n" } stack += se.Trace.Exception.Class + ": " + se.Trace.Exception.Message return stack } // Trace contains the stack frames, and the exception information. type Trace struct { Frames []*Frame `json:"frames"` Exception *Exception `json:"exception"` } // Exception contains the error message and it's class origin. type Exception struct { Class string `json:"class"` Message string `json:"message"` } // Frame contains line, file and method info for a stack frame. type Frame struct { File string `json:"filename"` Line int `json:"lineno"` Method string `json:"method"` } // errClass retrieves the string representation for the errors type. func errClass(err error) string { class := strings.TrimPrefix(reflect.TypeOf(err).String(), "*") if class == "" { class = "panic" } return class } // routineName returns the routines name for a given program counter. func routineName(pc uintptr) string { fc := runtime.FuncForPC(pc) if fc == nil { return "???" }<|fim▁hole|> return fc.Name() // Includes the package info. }<|fim▁end|>
<|file_name|>gulpfile.babel.js<|end_file_name|><|fim▁begin|>'use strict'; // Proxy URL (optional) const proxyUrl = 'drupal.dev'; // API keys const TINYPNG_KEY = ''; // fonts const fontList = []; // vendors const jsVendorList = []; const cssVendorList = []; // paths to relevant directories const dirs = { src: './src', dest: './dist' }; // paths to file sources const sources = { js: `${dirs.src}/**/*.js`,<|fim▁hole|> coreScss: `${dirs.src}/scss/main.scss`, img: `./img/**/*.{png,jpg}`, font: fontList, jsVendor: jsVendorList, cssVendor: cssVendorList }; // paths to file destinations const dests = { js: `${dirs.dest}/js`, css: `${dirs.dest}/css`, img: `${dirs.dest}/img`, sigFile: `./img/.tinypng-sigs`, font: `${dirs.dest}/fonts`, vendor: `${dirs.dest}/vendors` }; // plugins import gulp from 'gulp'; import browserSync from 'browser-sync'; import gulpLoadPlugins from 'gulp-load-plugins'; // auto-load plugins const $ = gulpLoadPlugins(); /**************************************** Gulp Tasks *****************************************/ // launch browser sync as a standalone local server gulp.task('browser-sync-local', browserSyncLocal()); // browser-sync task for starting the server by proxying a local url gulp.task('browser-sync-proxy', browserSyncProxy()); // copy vendor CSS gulp.task('css-vendors', cssVendors()); // copy fonts gulp.task('fonts', fonts()); // Lint Javascript Task gulp.task('js-lint', javascriptLint()); // Concatenate and Minify Vendor JS gulp.task('js-vendors', javascriptVendors()); // lint sass task gulp.task('sass-lint', sassLint()); // Concatenate & Minify JS gulp.task('scripts', ['js-lint'], scripts()); // compile, prefix, and minify the sass gulp.task('styles', styles()); // compress and combine svg icons gulp.task('svg', svg()); // Unit testing gulp.task('test', test()); // compress png and jpg images via tinypng API gulp.task('tinypng', tinypng()); // Watch Files For Changes gulp.task('watch', watch()); // default task builds src, opens up a standalone server, and watches for changes gulp.task('default', [ 'fonts', 'styles', 'scripts', 'browser-sync-local', 'watch' ]); // local task builds src, opens up a standalone server, and watches for changes gulp.task('local', [ 'fonts', 'styles', 'scripts', 'browser-sync-local', 'watch' ]); // proxy task builds src, opens up a proxy server, and watches for changes gulp.task('proxy', [ 'fonts', 'styles', 'scripts', 'browser-sync-proxy', 'watch' ]); // builds everything gulp.task('build', [ 'fonts', 'styles', 'scripts', 'css-vendors', 'js-vendors' ]); // builds the vendor files gulp.task('vendors', [ 'css-vendors', 'js-vendors' ]); // compresses imagery gulp.task('images', [ 'svg', 'tinypng' ]); /**************************************** Task Logic *****************************************/ function browserSyncLocal () { return () => { browserSync.init({ server: '../../../../' }); }; } function browserSyncProxy () { return () => { browserSync.init({ proxy: proxyUrl }); }; } function cssVendors () { return () => { return gulp.src(sources.cssVendor) .pipe(gulp.dest(dests.vendor)); }; } function fonts () { return () => { gulp.src(sources.font) .pipe(gulp.dest(dests.font)); }; } function javascriptLint () { return () => { return gulp.src(sources.js) .pipe($.jshint({esversion: 6})) .pipe($.jshint.reporter('jshint-stylish')); }; } function javascriptVendors () { return () => { return gulp.src(sources.jsVendor) .pipe($.plumber()) .pipe($.concat('vendors.min.js')) .pipe($.uglify()) .pipe(gulp.dest(dests.vendor)); }; } function sassLint () { return () => { return gulp.src(sources.scss) .pipe($.sassLint()) .pipe($.sassLint.format()) .pipe($.sassLint.failOnError()); }; } function scripts () { return () => { return gulp.src(sources.js) .pipe($.plumber()) .pipe($.sourcemaps.init()) .pipe($.concat('main.js')) .pipe($.babel()) .pipe(gulp.dest(dests.js)) .pipe($.rename({suffix: '.min'})) .pipe($.uglify()) .pipe($.sourcemaps.write('.')) .pipe(gulp.dest(dests.js)) .pipe(browserSync.stream()); }; } function styles () { return () => { return gulp.src(sources.coreScss) .pipe($.sourcemaps.init()) .pipe($.sass().on('error', $.sass.logError)) .pipe($.autoprefixer(["> 1%", "last 2 versions"], { cascade: true })) .pipe(gulp.dest(dests.css)) .pipe($.rename({suffix: '.min'})) .pipe($.cleanCss()) .pipe($.sourcemaps.write('.')) .pipe(gulp.dest(dests.css)) .pipe(browserSync.stream()); }; } function svg () { return () => { return gulp.src('./img/icons/*.svg') .pipe($.svgmin()) .pipe($.svgstore()) .pipe(gulp.dest('./img/icons')); }; } function test (done) { return () => { let server = new karma.Server('./karma.conf.js', done); server.start(); }; } function tinypng () { return () => { return gulp.src(sources.img) .pipe($.tinypngCompress({ key: TINYPNG_KEY, sigFile: dests.sigFile })) .pipe(gulp.dest(dests.img)); }; } function watch () { return () => { gulp.watch(sources.js, ['scripts']); gulp.watch(sources.scss, ['styles']); gulp.watch('**/*.php', browserSync.reload); }; }<|fim▁end|>
scss: `${dirs.src}/**/*.scss`,
<|file_name|>clientcache.go<|end_file_name|><|fim▁begin|>/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "k8s.io/kubernetes/pkg/api/registered" "k8s.io/kubernetes/pkg/client" "k8s.io/kubernetes/pkg/client/clientcmd" ) func NewClientCache(loader clientcmd.ClientConfig) *clientCache { return &clientCache{ clients: make(map[string]*client.Client), configs: make(map[string]*client.Config), loader: loader, } } // clientCache caches previously loaded clients for reuse, and ensures MatchServerVersion // is invoked only once type clientCache struct { loader clientcmd.ClientConfig clients map[string]*client.Client configs map[string]*client.Config defaultConfig *client.Config defaultClient *client.Client matchVersion bool } // ClientConfigForVersion returns the correct config for a server func (c *clientCache) ClientConfigForVersion(version string) (*client.Config, error) { if c.defaultConfig == nil { config, err := c.loader.ClientConfig() if err != nil { return nil, err } c.defaultConfig = config if c.matchVersion { if err := client.MatchesServerVersion(c.defaultClient, config); err != nil { return nil, err } } }<|fim▁hole|> return config, nil } // TODO: have a better config copy method config := *c.defaultConfig negotiatedVersion, err := client.NegotiateVersion(c.defaultClient, &config, version, registered.RegisteredVersions) if err != nil { return nil, err } config.Version = negotiatedVersion client.SetKubernetesDefaults(&config) c.configs[version] = &config return &config, nil } // ClientForVersion initializes or reuses a client for the specified version, or returns an // error if that is not possible func (c *clientCache) ClientForVersion(version string) (*client.Client, error) { if client, ok := c.clients[version]; ok { return client, nil } config, err := c.ClientConfigForVersion(version) if err != nil { return nil, err } client, err := client.New(config) if err != nil { return nil, err } c.clients[config.Version] = client return client, nil }<|fim▁end|>
if config, ok := c.configs[version]; ok {
<|file_name|>change-password-prompt.component.ts<|end_file_name|><|fim▁begin|>/** * Created by Андрей on 01.07.2017. */ import { Component } from '@angular/core'; import { DialogComponent, DialogService } from 'ng2-bootstrap-modal'; import { FormBuilder, FormControl, Validators } from '@angular/forms'; import { passConfirmValidation } from '../../auth-page/validators/pass-confirm.validator'; import { UserProfileService } from '../../core/user-profile.service'; import { Router } from '@angular/router'; import { MyAuthService } from '../../core/my-auth.service'; export interface PromptModel { title: string; question: string; } @Component({ selector: 'prompt', templateUrl: './change-password-prompt.component.html', styleUrls: ['./change-password-prompt.component.scss'] }) export class ChangePasswordPromptComponent extends DialogComponent<PromptModel, string> { public title: string; public message: string = ''; public serverError; public form = this.fb.group({ /* tslint:disable */ old_password: new FormControl(null, [Validators.required, Validators.minLength(6), Validators.maxLength(25)]), new_password1: new FormControl(null, [Validators.required, Validators.minLength(6), Validators.maxLength(25)]), new_password2: new FormControl(null, [Validators.required]), }, {validator: passConfirmValidation('new_password1', 'new_password2')}); constructor(dialogService: DialogService, private fb: FormBuilder, private userProfile: UserProfileService, private router: Router) { super(dialogService); } /** * Change new password * @param $event. This parameter contains data of the event. * @param value. This parameter contains data of the form for new password. */ public changePassword($event, value) { $event.preventDefault(); this.userProfile.changePassword(value) .subscribe(() => { this.close();<|fim▁hole|> } ); } /** * Close modal window */ public close() { this.dialogService.removeDialog(this); } }<|fim▁end|>
this.router.navigate(['/sign-in']); }, (error) => { this.serverError = JSON.parse(error._body);
<|file_name|>bitcoin_pt_PT.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="pt_PT" version="2.1"> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About Summitcoin</source> <translation type="unfinished"/> </message> <message> <location line="+39"/> <source>&lt;b&gt;Summitcoin&lt;/b&gt; version</source> <translation type="unfinished"/> </message> <message> <location line="+41"/> <source>Copyright © 2009-2014 The Bitcoin developers Copyright © 2012-2014 The NovaCoin developers Copyright © 2014 The Summitcoin developers</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or &lt;a href=&quot;http://www.opensource.org/licenses/mit-license.php&quot;&gt;http://www.opensource.org/licenses/mit-license.php&lt;/a&gt;. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (&lt;a href=&quot;https://www.openssl.org/&quot;&gt;https://www.openssl.org/&lt;/a&gt;) and cryptographic software written by Eric Young (&lt;a href=&quot;mailto:[email protected]&quot;&gt;[email protected]&lt;/a&gt;) and UPnP software written by Thomas Bernard.</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <source>Double-click to edit address or label</source> <translation>Clique duas vezes para editar o endereço ou o rótulo</translation> </message> <message> <location line="+24"/> <source>Create a new address</source> <translation>Criar um novo endereço</translation> </message> <message> <location line="+10"/> <source>Copy the currently selected address to the system clipboard</source> <translation>Copie o endereço selecionado para a área de transferência</translation> </message> <message> <location line="-7"/> <source>&amp;New Address</source> <translation type="unfinished"/> </message> <message> <location line="-43"/> <source>These are your Summitcoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation type="unfinished"/> </message> <message> <location line="+53"/> <source>&amp;Copy Address</source> <translation>&amp;Copiar Endereço</translation> </message> <message> <location line="+7"/> <source>Show &amp;QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Sign a message to prove you own a Summitcoin address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Delete the currently selected address from the list</source> <translation>Apagar o endereço selecionado da lista</translation> </message> <message> <location line="-10"/> <source>Verify a message to ensure it was signed with a specified Summitcoin address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>&amp;Delete</source> <translation>E&amp;liminar</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+65"/> <source>Copy &amp;Label</source> <translation>Copiar &amp;Rótulo</translation> </message> <message> <location line="+2"/> <source>&amp;Edit</source> <translation>&amp;Editar</translation> </message> <message> <location line="+250"/> <source>Export Address Book Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Ficheiro separado por vírgulas (*.csv)</translation> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+145"/> <source>Label</source> <translation>Rótulo</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Endereço</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(sem rótulo)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation>Diálogo de Frase-Passe</translation> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>Escreva a frase de segurança</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>Nova frase de segurança</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>Repita a nova frase de segurança</translation> </message> <message> <location line="+33"/> <source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>For staking only</source> <translation type="unfinished"/> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+38"/> <source>Encrypt wallet</source> <translation>Encriptar carteira</translation> </message> <message> <location line="+7"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>A sua frase de segurança é necessária para desbloquear a carteira.</translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation>Desbloquear carteira</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>A sua frase de segurança é necessária para desencriptar a carteira.</translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation>Desencriptar carteira</translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation>Alterar frase de segurança</translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation>Escreva a frase de segurança antiga seguida da nova para a carteira.</translation> </message> <message> <location line="+45"/> <source>Confirm wallet encryption</source> <translation>Confirmar encriptação da carteira</translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR COINS&lt;/b&gt;!</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation>Tem a certeza que deseja encriptar a carteira?</translation> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>IMPORTANTE: Qualquer cópia de segurança anterior da carteira deverá ser substituída com o novo, actualmente encriptado, ficheiro de carteira. Por razões de segurança, cópias de segurança não encriptadas efectuadas anteriormente do ficheiro da carteira tornar-se-ão inúteis assim que começar a usar a nova carteira encriptada.</translation> </message> <message> <location line="+103"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation>Atenção: A tecla Caps Lock está activa!</translation> </message> <message> <location line="-133"/> <location line="+60"/> <source>Wallet encrypted</source> <translation>Carteira encriptada</translation> </message> <message> <location line="-140"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;ten or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation type="unfinished"/> </message> <message> <location line="+82"/> <source>Summitcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+44"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation>A encriptação da carteira falhou</translation> </message> <message> <location line="-56"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>A encriptação da carteira falhou devido a um erro interno. A carteira não foi encriptada.</translation> </message> <message> <location line="+7"/> <location line="+50"/> <source>The supplied passphrases do not match.</source> <translation>As frases de segurança fornecidas não coincidem.</translation> </message> <message> <location line="-38"/> <source>Wallet unlock failed</source> <translation>O desbloqueio da carteira falhou</translation> </message> <message> <location line="+1"/> <location line="+12"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>A frase de segurança introduzida para a desencriptação da carteira estava incorreta.</translation> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation>A desencriptação da carteira falhou</translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation>A frase de segurança da carteira foi alterada com êxito.</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+297"/> <source>Sign &amp;message...</source> <translation>Assinar &amp;mensagem...</translation> </message> <message> <location line="-64"/> <source>Show general overview of wallet</source> <translation>Mostrar visão geral da carteira</translation> </message> <message> <location line="+17"/> <source>&amp;Transactions</source> <translation>&amp;Transações</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>Navegar pelo histórico de transações</translation> </message> <message> <location line="+5"/> <source>&amp;Address Book</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Edit the list of stored addresses and labels</source> <translation type="unfinished"/> </message> <message> <location line="-18"/> <source>Show the list of addresses for receiving payments</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <source>E&amp;xit</source> <translation>Fec&amp;har</translation> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>Sair da aplicação</translation> </message> <message> <location line="+4"/> <source>Show information about Summitcoin</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation>Sobre &amp;Qt</translation> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation>Mostrar informação sobre Qt</translation> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation>&amp;Opções...</translation> </message> <message> <location line="+4"/> <source>&amp;Encrypt Wallet...</source> <translation>E&amp;ncriptar Carteira...</translation> </message> <message> <location line="+2"/> <source>&amp;Backup Wallet...</source> <translation>&amp;Guardar Carteira...</translation> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation>Mudar &amp;Palavra-passe...</translation> </message> <message> <location line="+9"/> <source>&amp;Export...</source> <translation type="unfinished"/> </message> <message> <location line="-55"/> <source>Send coins to a Summitcoin address</source> <translation type="unfinished"/> </message> <message> <location line="+39"/> <source>Modify configuration options for Summitcoin</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Export the data in the current tab to a file</source> <translation type="unfinished"/> </message> <message> <location line="-13"/> <source>Encrypt or decrypt wallet</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Backup wallet to another location</source> <translation>Faça uma cópia de segurança da carteira para outra localização</translation> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>Mudar a frase de segurança utilizada na encriptação da carteira</translation> </message> <message> <location line="+10"/> <source>&amp;Debug window</source> <translation>Janela de &amp;depuração</translation> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation>Abrir consola de diagnóstico e depuração</translation> </message> <message> <location line="-5"/> <source>&amp;Verify message...</source> <translation>&amp;Verificar mensagem...</translation> </message> <message> <location line="-214"/> <location line="+551"/> <source>Summitcoin</source> <translation type="unfinished"/> </message> <message> <location line="-551"/> <source>Wallet</source> <translation>Carteira</translation> </message> <message> <location line="+193"/> <source>&amp;About Summitcoin</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation>Mo&amp;strar / Ocultar</translation> </message> <message> <location line="+8"/> <source>Unlock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>&amp;Lock Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Lock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>&amp;File</source> <translation>&amp;Ficheiro</translation> </message> <message> <location line="+8"/> <source>&amp;Settings</source> <translation>Con&amp;figurações</translation> </message> <message> <location line="+8"/> <source>&amp;Help</source> <translation>A&amp;juda</translation> </message> <message> <location line="+17"/> <source>Tabs toolbar</source> <translation>Barra de separadores</translation> </message> <message> <location line="+46"/> <location line="+9"/> <source>[testnet]</source> <translation>[rede de testes]</translation> </message> <message> <location line="+0"/> <location line="+58"/> <source>Summitcoin client</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+70"/> <source>%n active connection(s) to Summitcoin network</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+488"/> <source>Staking.&lt;br&gt;Your weight is %1&lt;br&gt;Network weight is %2&lt;br&gt;Expected time to earn reward is %3</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Not staking because wallet is locked</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is offline</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is syncing</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because you don&apos;t have mature coins</source> <translation type="unfinished"/> </message> <message> <location line="-808"/> <source>&amp;Dashboard</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>&amp;Receive</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>&amp;Send</source> <translation type="unfinished"/> </message> <message> <location line="+49"/> <source>&amp;Unlock Wallet...</source> <translation type="unfinished"/> </message> <message> <location line="+273"/> <source>Up to date</source> <translation>Atualizado</translation> </message> <message> <location line="+43"/> <source>Catching up...</source> <translation>Recuperando...</translation> </message> <message> <location line="+113"/> <source>Confirm transaction fee</source> <translation type="unfinished"/> </message> <message> <location line="+27"/> <source>Sent transaction</source> <translation>Transação enviada</translation> </message> <message> <location line="+1"/> <source>Incoming transaction</source> <translation>Transação recebida</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation>Data: %1 Quantia: %2 Tipo: %3 Endereço: %4 </translation> </message> <message> <location line="+100"/> <location line="+15"/> <source>URI handling</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <location line="+15"/> <source>URI can not be parsed! This can be caused by an invalid Summitcoin address or malformed URI parameters.</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Wallet is &lt;b&gt;not encrypted&lt;/b&gt;</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>A carteira está &lt;b&gt;encriptada&lt;/b&gt; e atualmente &lt;b&gt;desbloqueada&lt;/b&gt;</translation> </message> <message> <location line="+8"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>A carteira está &lt;b&gt;encriptada&lt;/b&gt; e atualmente &lt;b&gt;bloqueada&lt;/b&gt;</translation> </message> <message> <location line="+24"/> <source>Backup Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+91"/> <source>%n second(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n minute(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="-429"/> <location line="+433"/> <source>%n hour(s)</source> <translation><numerusform>%n hora</numerusform><numerusform>%n horas</numerusform></translation> </message> <message> <location line="-456"/> <source>Processed %1 blocks of transaction history.</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+27"/> <location line="+433"/> <source>%n day(s)</source> <translation><numerusform>%n dia</numerusform><numerusform>%n dias</numerusform></translation> </message> <message numerus="yes"> <location line="-429"/> <location line="+6"/> <source>%n week(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+0"/> <source>%1 and %2</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+0"/> <source>%n year(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+5"/> <source>%1 behind</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Last received block was generated %1 ago.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Transactions after this will not yet be visible.</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Error</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Information</source> <translation type="unfinished"/> </message> <message> <location line="+69"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation type="unfinished"/> </message> <message> <location line="+324"/> <source>Not staking</source> <translation type="unfinished"/> </message> <message> <location filename="../bitcoin.cpp" line="+104"/> <source>A fatal error occurred. Summitcoin can no longer continue safely and will quit.</source> <translation type="unfinished"/> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+110"/> <source>Network Alert</source> <translation>Alerta da Rede</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <location filename="../forms/coincontroldialog.ui" line="+14"/> <source>Coin Control</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Quantity:</source> <translation>Quantidade:</translation> </message> <message> <location line="+32"/> <source>Bytes:</source> <translation>Bytes:</translation> </message> <message> <location line="+48"/> <source>Amount:</source> <translation>Quantia:</translation> </message> <message> <location line="+32"/> <source>Priority:</source> <translation>Prioridade:</translation> </message> <message> <location line="+48"/> <source>Fee:</source> <translation>Taxa:</translation> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation>Saída Baixa:</translation> </message> <message> <location filename="../coincontroldialog.cpp" line="+552"/> <source>no</source> <translation>não</translation> </message> <message> <location filename="../forms/coincontroldialog.ui" line="+51"/> <source>After Fee:</source> <translation>Depois de taxas:</translation> </message> <message> <location line="+35"/> <source>Change:</source> <translation>Troco:</translation> </message> <message> <location line="+69"/> <source>(un)select all</source> <translation>(des)seleccionar todos</translation> </message> <message> <location line="+13"/> <source>Tree mode</source> <translation>Modo de árvore</translation> </message> <message> <location line="+16"/> <source>List mode</source> <translation>Modo lista</translation> </message> <message> <location line="+45"/> <source>Amount</source> <translation>Quantia</translation> </message> <message> <location line="+5"/> <source>Label</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Address</source> <translation>Endereço</translation> </message> <message> <location line="+5"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+5"/> <source>Confirmations</source> <translation>Confirmados</translation> </message> <message> <location line="+3"/> <source>Confirmed</source> <translation>Confirmada</translation> </message> <message> <location line="+5"/> <source>Priority</source> <translation>Prioridade</translation> </message> <message> <location filename="../coincontroldialog.cpp" line="-515"/> <source>Copy address</source> <translation>Copiar endereço</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Copiar rótulo</translation> </message> <message> <location line="+1"/> <location line="+26"/> <source>Copy amount</source> <translation>Copiar quantia</translation> </message> <message> <location line="-25"/> <source>Copy transaction ID</source> <translation>Copiar ID da Transação</translation> </message> <message> <location line="+24"/> <source>Copy quantity</source> <translation>Copiar quantidade</translation> </message> <message> <location line="+2"/> <source>Copy fee</source> <translation>Taxa de cópia</translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation>Taxa depois de cópia</translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation>Copiar bytes</translation> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation>Prioridade de Cópia</translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation>Copiar output baixo</translation> </message> <message> <location line="+1"/> <source>Copy change</source> <translation>Copiar alteração</translation> </message> <message> <location line="+317"/> <source>highest</source> <translation>o maior</translation> </message> <message> <location line="+1"/> <source>high</source> <translation>alto</translation> </message> <message> <location line="+1"/> <source>medium-high</source> <translation>médio-alto</translation> </message> <message> <location line="+1"/> <source>medium</source> <translation>médio</translation> </message> <message> <location line="+4"/> <source>low-medium</source> <translation>baixo-médio</translation> </message> <message> <location line="+1"/> <source>low</source> <translation>baixo</translation> </message> <message> <location line="+1"/> <source>lowest</source> <translation>O mais baixo</translation> </message> <message> <location line="+155"/> <source>DUST</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>yes</source> <translation>sim</translation> </message> <message> <location line="+10"/> <source>This label turns red, if the transaction size is bigger than 10000 bytes. This means a fee of at least %1 per kb is required. Can vary +/- 1 Byte per input.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Transactions with higher priority get more likely into a block. This label turns red, if the priority is smaller than &quot;medium&quot;. This means a fee of at least %1 per kb is required.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if any recipient receives an amount smaller than %1. This means a fee of at least %2 is required. Amounts below 0.546 times the minimum relay fee are shown as DUST.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if the change is smaller than %1. This means a fee of at least %2 is required.</source> <translation type="unfinished"/> </message> <message> <location line="+36"/> <location line="+66"/> <source>(no label)</source> <translation>(Sem rótulo)</translation> </message> <message> <location line="-9"/> <source>change from %1 (%2)</source> <translation>Alteração de %1 (%2)</translation> </message> <message> <location line="+1"/> <source>(change)</source> <translation>(Alteração)</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>Editar Endereço</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>&amp;Rótulo</translation> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>E&amp;ndereço</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation type="unfinished"/> </message> <message> <location filename="../editaddressdialog.cpp" line="+21"/> <source>New receiving address</source> <translation>Novo endereço de entrada</translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation>Novo endereço de saída</translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation>Editar endereço de entrada</translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation>Editar endereço de saída</translation> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>O endereço introduzido &quot;%1&quot; já se encontra no livro de endereços.</translation> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid Summitcoin address.</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation>Impossível desbloquear carteira.</translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation>Falha ao gerar nova chave.</translation> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+426"/> <location line="+12"/> <source>Summitcoin-Qt</source> <translation type="unfinished"/> </message> <message> <location line="-12"/> <source>version</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Usage:</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>UI options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation type="unfinished"/> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>Opções</translation> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation>&amp;Principal</translation> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation>Pagar &amp;taxa de transação</translation> </message> <message> <location line="+31"/> <source>Reserved amount does not participate in staking and is therefore spendable at any time.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Reserve</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Automatically start Summitcoin after logging in to the system.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Start Summitcoin on system login</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>&amp;Network</source> <translation>&amp;Rede</translation> </message> <message> <location line="+6"/> <source>Automatically open the Summitcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation>Mapear porta usando &amp;UPnP</translation> </message> <message> <location line="+7"/> <source>Connect to the Summitcoin network through a SOCKS proxy (e.g. when connecting through Tor).</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy:</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Proxy &amp;IP:</source> <translation>&amp;IP do proxy:</translation> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation>&amp;Porta:</translation> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation>Porta do proxy (p.ex. 9050)</translation> </message> <message> <location line="+7"/> <source>SOCKS &amp;Version:</source> <translation>&amp;Versão SOCKS:</translation> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation>Versão do proxy SOCKS (p.ex. 5)</translation> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation>&amp;Janela</translation> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation>Apenas mostrar o ícone da bandeja após minimizar a janela.</translation> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;Minimizar para a bandeja e não para a barra de ferramentas</translation> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation>Minimize ao invés de sair da aplicação quando a janela é fechada. Com esta opção selecionada, a aplicação apenas será encerrada quando escolher Sair da aplicação no menú.</translation> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation>M&amp;inimizar ao fechar</translation> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation>Vis&amp;ualização</translation> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation>&amp;Linguagem da interface de utilizador:</translation> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting Summitcoin.</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation>&amp;Unidade a usar em quantias:</translation> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Escolha a subdivisão unitária a ser mostrada por defeito na aplicação e ao enviar moedas.</translation> </message> <message> <location line="+9"/> <source>Whether to show coin control features or not.</source> <translation>Escolha para mostrar funcionalidades de controlo &quot;coin&quot; ou não.</translation> </message> <message> <location line="+3"/> <source>Display coin &amp;control features (experts only!)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Whether to select the coin outputs randomly or with minimal coin age.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Minimize weight consumption (experimental)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Use black visual theme (requires restart)</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation>&amp;OK</translation> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation>&amp;Cancelar</translation> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation type="unfinished"/> </message> <message> <location filename="../optionsdialog.cpp" line="+53"/> <source>default</source> <translation>padrão</translation> </message> <message> <location line="+149"/> <location line="+9"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting Summitcoin.</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation>O endereço de proxy introduzido é inválido. </translation> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation>Formulário</translation> </message> <message> <location line="+46"/> <location line="+247"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Summitcoin network after a connection is established, but this process has not completed yet.</source> <translation type="unfinished"/> </message> <message> <location line="-173"/> <source>Stake:</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Unconfirmed:</source> <translation type="unfinished"/> </message> <message> <location line="-113"/> <source>Wallet</source> <translation>Carteira</translation> </message> <message> <location line="+49"/> <source>Spendable:</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Your current spendable balance</source> <translation>O seu saldo disponível para gastar</translation> </message> <message> <location line="+80"/> <source>Immature:</source> <translation>Imaturo:</translation> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation>O saldo minado ainda não maturou</translation> </message> <message> <location line="+23"/> <source>Total:</source> <translation>Total:</translation> </message> <message> <location line="+16"/> <source>Your current total balance</source> <translation>O seu saldo total actual</translation> </message> <message> <location line="+50"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;Transações recentes&lt;/b&gt;</translation> </message> <message> <location line="-118"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location line="-32"/> <source>Total of coins that was staked, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location filename="../overviewpage.cpp" line="+116"/> <location line="+1"/> <source>out of sync</source> <translation>fora de sincronia</translation> </message> </context> <context> <name>PaymentServer</name> <message> <location filename="../paymentserver.cpp" line="+107"/> <source>Cannot start Summitcoin: click-to-pay handler</source> <translation type="unfinished"/> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation type="unfinished"/> </message> <message> <location line="+56"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="-44"/> <source>Label:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Message:</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation type="unfinished"/> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation type="unfinished"/> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation>Nome do Cliente</translation> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <source>N/A</source> <translation>N/D</translation> </message> <message> <location line="-194"/> <source>Client version</source> <translation>Versão do Cliente</translation> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation>&amp;Informação</translation> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation>Usando versão OpenSSL</translation> </message> <message> <location line="+49"/> <source>Startup time</source> <translation>Tempo de início</translation> </message> <message> <location line="+29"/> <source>Network</source> <translation>Rede</translation> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation>Número de ligações</translation> </message> <message> <location line="+23"/> <source>On testnet</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Block chain</source> <translation>Cadeia de blocos</translation> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation>Número actual de blocos</translation> </message> <message> <location line="+197"/> <source>&amp;Network Traffic</source> <translation type="unfinished"/> </message> <message> <location line="+52"/> <source>&amp;Clear</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Totals</source> <translation type="unfinished"/> </message> <message> <location line="+64"/> <source>In:</source> <translation type="unfinished"/> </message> <message> <location line="+80"/> <source>Out:</source> <translation type="unfinished"/> </message> <message> <location line="-383"/> <source>Last block time</source> <translation>Tempo do último bloco</translation> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation>&amp;Abrir</translation> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Show the Summitcoin-Qt help message to get a list with possible Summitcoin command-line options.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation>&amp;Consola</translation> </message> <message> <location line="-237"/> <source>Build date</source> <translation>Data de construção</translation> </message> <message> <location line="-104"/> <source>Summitcoin - Debug window</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Summitcoin Core</source> <translation type="unfinished"/> </message> <message> <location line="+256"/> <source>Debug log file</source> <translation>Ficheiro de registo de depuração</translation> </message> <message> <location line="+7"/> <source>Open the Summitcoin debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation type="unfinished"/> </message> <message> <location line="+102"/> <source>Clear console</source> <translation>Limpar consola</translation> </message> <message> <location filename="../rpcconsole.cpp" line="+325"/> <source>Welcome to the Summitcoin RPC console.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation>Use as setas para cima e para baixo para navegar no histórico e &lt;b&gt;Ctrl-L&lt;/b&gt; para limpar o ecrã.</translation> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation>Digite &lt;b&gt;help&lt;/b&gt; para visualizar os comandos disponíveis.</translation> </message> <message> <location line="+127"/> <source>%1 B</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 KB</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 MB</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 GB</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>%1 m</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>%1 h</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 h %2 m</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+181"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation>Enviar Moedas</translation> </message> <message> <location line="+76"/> <source>Coin Control Features</source> <translation>Funcionalidades de Coin Controlo:</translation> </message> <message> <location line="+20"/> <source>Inputs...</source> <translation>Entradas</translation> </message> <message> <location line="+7"/> <source>automatically selected</source> <translation>Selecção automática</translation> </message> <message> <location line="+19"/> <source>Insufficient funds!</source> <translation>Fundos insuficientes!</translation> </message> <message> <location line="+77"/> <source>Quantity:</source> <translation>Quantidade:</translation> </message> <message> <location line="+22"/> <location line="+35"/> <source>0</source> <translation type="unfinished"/> </message> <message> <location line="-19"/> <source>Bytes:</source> <translation>Bytes:</translation> </message> <message> <location line="+51"/> <source>Amount:</source> <translation>Quantia:</translation> </message> <message> <location line="+22"/> <location line="+86"/> <location line="+86"/> <location line="+32"/> <source>0.00 BC</source> <translation type="unfinished"/> </message> <message> <location line="-191"/> <source>Priority:</source> <translation>Prioridade:</translation> </message> <message> <location line="+19"/> <source>medium</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Fee:</source> <translation>Taxa:</translation> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation>Output Baixo:</translation> </message> <message> <location line="+19"/> <source>no</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>After Fee:</source> <translation>Depois de taxas:</translation> </message> <message> <location line="+35"/> <source>Change</source> <translation type="unfinished"/> </message> <message> <location line="+50"/> <source>custom change address</source> <translation type="unfinished"/> </message> <message> <location line="+106"/> <source>Send to multiple recipients at once</source> <translation>Enviar para múltiplos destinatários de uma vez</translation> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation>Adicionar &amp;Destinatário</translation> </message> <message> <location line="+16"/> <source>Remove all transaction fields</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation>&amp;Limpar Tudo</translation> </message> <message> <location line="+24"/> <source>Balance:</source> <translation>Saldo:</translation> </message> <message> <location line="+16"/> <source>123.456 BC</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Confirm the send action</source> <translation>Confirme ação de envio</translation> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation>&amp;Enviar</translation> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-173"/> <source>Enter a Summitcoin address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Copy quantity</source> <translation>Copiar quantidade</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Copiar quantia</translation> </message> <message> <location line="+1"/> <source>Copy fee</source> <translation>Taxa de cópia</translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation>Taxa depois de cópia</translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation>Copiar bytes</translation> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation>Prioridade de Cópia</translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation>Copiar output baixo</translation> </message> <message> <location line="+1"/> <source>Copy change</source> <translation>Copiar alteração</translation> </message> <message> <location line="+86"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation>Confirme envio de moedas</translation> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source> and </source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The recipient address is not valid, please recheck.</source> <translation>O endereço de destino não é válido, por favor verifique.</translation> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation>A quantia a pagar deverá ser maior que 0.</translation> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation>A quantia excede o seu saldo.</translation> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>O total excede o seu saldo quando a taxa de transação de %1 for incluída.</translation> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation>Endereço duplicado encontrado, apenas poderá enviar uma vez para cada endereço por cada operação de envio.</translation> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed!</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="+247"/> <source>WARNING: Invalid Summitcoin address</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>(no label)</source> <translation>(Sem rótulo)</translation> </message> <message> <location line="+4"/> <source>WARNING: unknown change address</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation>Qu&amp;antia:</translation> </message> <message> <location line="+13"/> <source>Pay &amp;To:</source> <translation>&amp;Pagar A:</translation> </message> <message> <location line="+34"/> <source>The address to send the payment to (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+60"/> <location filename="../sendcoinsentry.cpp" line="+26"/> <source>Enter a label for this address to add it to your address book</source> <translation>Escreva um rótulo para este endereço para o adicionar ao seu livro de endereços</translation> </message> <message> <location line="-78"/> <source>&amp;Label:</source> <translation>Rótu&amp;lo:</translation> </message> <message> <location line="+28"/> <source>Choose address from address book</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation>Cole endereço da área de transferência</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation type="unfinished"/> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a Summitcoin address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation>Assinaturas - Assinar / Verificar uma Mensagem</translation> </message> <message> <location line="+13"/> <location line="+124"/> <source>&amp;Sign Message</source> <translation>A&amp;ssinar Mensagem</translation> </message> <message> <location line="-118"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>Pode assinar mensagens com os seus endereços para provar que são seus. Tenha atenção ao assinar mensagens ambíguas, pois ataques de phishing podem tentar enganá-lo, de modo a assinar a sua identidade para os atacantes. Apenas assine declarações completamente detalhadas com as quais concorde.</translation> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <location line="+203"/> <source>Choose an address from the address book</source> <translation type="unfinished"/> </message> <message> <location line="-193"/> <location line="+203"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="-193"/> <source>Paste address from clipboard</source> <translation>Cole endereço da área de transferência</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation>Escreva aqui a mensagem que deseja assinar</translation> </message> <message> <location line="+24"/> <source>Copy the current signature to the system clipboard</source> <translation>Copiar a assinatura actual para a área de transferência</translation> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this Summitcoin address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all sign message fields</source> <translation>Repôr todos os campos de assinatura de mensagem</translation> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation>Limpar &amp;Tudo</translation> </message> <message> <location line="-87"/> <location line="+70"/> <source>&amp;Verify Message</source> <translation>&amp;Verificar Mensagem</translation> </message> <message> <location line="-64"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation>Introduza o endereço de assinatura, mensagem (assegure-se de copiar quebras de linha, espaços, tabuladores, etc. exactamente) e assinatura abaixo para verificar a mensagem. Tenha atenção para não ler mais na assinatura do que o que estiver na mensagem assinada, para evitar ser enganado por um atacante que se encontre entre si e quem assinou a mensagem.</translation> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified Summitcoin address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all verify message fields</source> <translation>Repôr todos os campos de verificação de mensagem</translation> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a Summitcoin address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation>Clique &quot;Assinar mensagem&quot; para gerar a assinatura</translation> </message> <message> <location line="+3"/> <source>Enter Summitcoin signature</source> <translation type="unfinished"/> </message> <message> <location line="+82"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation>O endereço introduzido é inválido. </translation> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation>Por favor verifique o endereço e tente de novo.</translation> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation>O endereço introduzido não refere a chave alguma.</translation> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation>O desbloqueio da carteira foi cancelado.</translation> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation>A chave privada para o endereço introduzido não está disponível.</translation> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation>Assinatura de mensagem falhou.</translation> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation>Mensagem assinada.</translation> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation>A assinatura não pôde ser descodificada.</translation> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation>Por favor verifique a assinatura e tente de novo.</translation> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation>A assinatura não condiz com o conteúdo da mensagem.</translation> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation>Verificação da mensagem falhou.</translation> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation>Mensagem verificada.</translation> </message> </context> <context> <name>TrafficGraphWidget</name> <message> <location filename="../trafficgraphwidget.cpp" line="+75"/> <source>KB/s</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+25"/> <source>Open until %1</source> <translation>Aberto até %1</translation> </message><|fim▁hole|> <location line="+6"/> <source>conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1/offline</source> <translation>%1/desligado</translation> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation>%1/não confirmada</translation> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation>%1 confirmações</translation> </message> <message> <location line="+17"/> <source>Status</source> <translation>Estado</translation> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation><numerusform>, transmitida através de %n nó</numerusform><numerusform>, transmitida através de %n nós</numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation>Origem</translation> </message> <message> <location line="+0"/> <source>Generated</source> <translation>Gerado</translation> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation>De</translation> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation>Para</translation> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation>endereço próprio</translation> </message> <message> <location line="-2"/> <source>label</source> <translation>rótulo</translation> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation>Crédito</translation> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation><numerusform>matura daqui por %n bloco</numerusform><numerusform>matura daqui por %n blocos</numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation>não aceite</translation> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation>Débito</translation> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation>Taxa de transação</translation> </message> <message> <location line="+16"/> <source>Net amount</source> <translation>Valor líquido</translation> </message> <message> <location line="+6"/> <source>Message</source> <translation>Mensagem</translation> </message> <message> <location line="+2"/> <source>Comment</source> <translation>Comentário</translation> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation>ID da Transação</translation> </message> <message> <location line="+3"/> <source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Debug information</source> <translation>Informação de depuração</translation> </message> <message> <location line="+8"/> <source>Transaction</source> <translation>Transação</translation> </message> <message> <location line="+5"/> <source>Inputs</source> <translation>Entradas</translation> </message> <message> <location line="+21"/> <source>Amount</source> <translation>Quantia</translation> </message> <message> <location line="+1"/> <source>true</source> <translation>verdadeiro</translation> </message> <message> <location line="+0"/> <source>false</source> <translation>falso</translation> </message> <message> <location line="-209"/> <source>, has not been successfully broadcast yet</source> <translation>, ainda não foi transmitida com sucesso</translation> </message> <message numerus="yes"> <location line="-36"/> <source>Open for %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+71"/> <source>unknown</source> <translation>desconhecido</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation>Detalhes da transação</translation> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation>Esta janela mostra uma descrição detalhada da transação</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+231"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation>Tipo</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Endereço</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation>Quantia</translation> </message> <message> <location line="+52"/> <source>Open until %1</source> <translation>Aberto até %1</translation> </message> <message> <location line="+12"/> <source>Confirmed (%1 confirmations)</source> <translation>Confirmada (%1 confirmações)</translation> </message> <message numerus="yes"> <location line="-15"/> <source>Open for %n more block(s)</source> <translation><numerusform>Aberta por mais %n bloco</numerusform><numerusform>Aberta por mais %n blocos</numerusform></translation> </message> <message> <location line="+6"/> <source>Offline</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Unconfirmed</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Confirming (%1 of %2 recommended confirmations)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Immature (%1 confirmations, will be available after %2)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation>Este bloco não foi recebido por outros nós e provavelmente não será aceite pela rede!</translation> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation>Gerado mas não aceite</translation> </message> <message> <location line="+42"/> <source>Received with</source> <translation>Recebido com</translation> </message> <message> <location line="+2"/> <source>Received from</source> <translation>Recebido de</translation> </message> <message> <location line="+3"/> <source>Sent to</source> <translation>Enviado para</translation> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation>Pagamento ao próprio</translation> </message> <message> <location line="+2"/> <source>Mined</source> <translation>Minadas</translation> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation>(n/d)</translation> </message> <message> <location line="+194"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>Estado da transação. Pairar por cima deste campo para mostrar o número de confirmações.</translation> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation>Data e hora a que esta transação foi recebida.</translation> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation>Tipo de transação.</translation> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation>Endereço de destino da transação.</translation> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation>Quantia retirada ou adicionada ao saldo.</translation> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+54"/> <location line="+17"/> <source>All</source> <translation>Todas</translation> </message> <message> <location line="-16"/> <source>Today</source> <translation>Hoje</translation> </message> <message> <location line="+1"/> <source>This week</source> <translation>Esta semana</translation> </message> <message> <location line="+1"/> <source>This month</source> <translation>Este mês</translation> </message> <message> <location line="+1"/> <source>Last month</source> <translation>Mês passado</translation> </message> <message> <location line="+1"/> <source>This year</source> <translation>Este ano</translation> </message> <message> <location line="+1"/> <source>Range...</source> <translation>Período...</translation> </message> <message> <location line="+12"/> <source>Received with</source> <translation>Recebida com</translation> </message> <message> <location line="+2"/> <source>Sent to</source> <translation>Enviada para</translation> </message> <message> <location line="+2"/> <source>To yourself</source> <translation>Para si</translation> </message> <message> <location line="+1"/> <source>Mined</source> <translation>Minadas</translation> </message> <message> <location line="+1"/> <source>Other</source> <translation>Outras</translation> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation>Escreva endereço ou rótulo a procurar</translation> </message> <message> <location line="+7"/> <source>Min amount</source> <translation>Quantia mínima</translation> </message> <message> <location line="+34"/> <source>Copy address</source> <translation>Copiar endereço</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Copiar rótulo</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Copiar quantia</translation> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation>Copiar ID da Transação</translation> </message> <message> <location line="+1"/> <source>Edit label</source> <translation>Editar rótulo</translation> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation>Mostrar detalhes da transação</translation> </message> <message> <location line="+138"/> <source>Export Transaction Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Ficheiro separado por vírgula (*.csv)</translation> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation>Confirmada</translation> </message> <message> <location line="+1"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation>Tipo</translation> </message> <message> <location line="+1"/> <source>Label</source> <translation>Rótulo</translation> </message> <message> <location line="+1"/> <source>Address</source> <translation>Endereço</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation>Quantia</translation> </message> <message> <location line="+1"/> <source>ID</source> <translation>ID</translation> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> <message> <location line="+100"/> <source>Range:</source> <translation>Período:</translation> </message> <message> <location line="+8"/> <source>to</source> <translation>até</translation> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+208"/> <source>Sending...</source> <translation type="unfinished"/> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+173"/> <source>Summitcoin version</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Usage:</source> <translation>Utilização:</translation> </message> <message> <location line="+1"/> <source>Send command to -server or Summitcoind</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>List commands</source> <translation>Listar comandos</translation> </message> <message> <location line="+1"/> <source>Get help for a command</source> <translation>Obter ajuda para um comando</translation> </message> <message> <location line="-147"/> <source>Options:</source> <translation>Opções:</translation> </message> <message> <location line="+2"/> <source>Specify configuration file (default: Summitcoin.conf)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Specify pid file (default: Summitcoind.pid)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Specify wallet file (within data directory)</source> <translation>Especifique ficheiro de carteira (dentro da pasta de dados)</translation> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation>Especificar pasta de dados</translation> </message> <message> <location line="-25"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=Summitcoinrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;Summitcoin Alert&quot; [email protected] </source> <translation type="unfinished"/> </message> <message> <location line="+27"/> <source>Set database cache size in megabytes (default: 25)</source> <translation>Definir o tamanho da cache de base de dados em megabytes (por defeito: 25)</translation> </message> <message> <location line="+1"/> <source>Set database disk log size in megabytes (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Listen for connections on &lt;port&gt; (default: 15714 or testnet: 25714)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation>Manter no máximo &lt;n&gt; ligações a outros nós da rede (por defeito: 125)</translation> </message> <message> <location line="+3"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation>Ligar a um nó para recuperar endereços de pares, e desligar</translation> </message> <message> <location line="+1"/> <source>Specify your own public address</source> <translation>Especifique o seu endereço público</translation> </message> <message> <location line="+4"/> <source>Bind to given address. Use [host]:port notation for IPv6</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Always query for peer addresses via DNS lookup (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation>Tolerância para desligar nós mal-formados (por defeito: 100)</translation> </message> <message> <location line="+1"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation>Número de segundos a impedir que nós mal-formados se liguem de novo (por defeito: 86400)</translation> </message> <message> <location line="-37"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation>Ocorreu um erro ao definir a porta %u do serviço RPC a escutar em IPv4: %s</translation> </message> <message> <location line="+65"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 15715 or testnet: 25715)</source> <translation type="unfinished"/> </message> <message> <location line="-17"/> <source>Accept command line and JSON-RPC commands</source> <translation>Aceitar comandos da consola e JSON-RPC</translation> </message> <message> <location line="+1"/> <source>Run in the background as a daemon and accept commands</source> <translation>Correr o processo como um daemon e aceitar comandos</translation> </message> <message> <location line="+1"/> <source>Use the test network</source> <translation>Utilizar a rede de testes - testnet</translation> </message> <message> <location line="-24"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation>Aceitar ligações externas (padrão: 1 sem -proxy ou -connect)</translation> </message> <message> <location line="-29"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation>Ocorreu um erro ao definir a porta %u do serviço RPC a escutar em IPv6, a usar IPv4: %s</translation> </message> <message> <location line="+96"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation>Atenção: -paytxfee está definida com um valor muito alto! Esta é a taxa que irá pagar se enviar uma transação.</translation> </message> <message> <location line="-103"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong Summitcoin will not work properly.</source> <translation type="unfinished"/> </message> <message> <location line="+132"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>Atenção: erro ao ler wallet.dat! Todas as chaves foram lidas correctamente, mas dados de transação ou do livro de endereços podem estar em falta ou incorrectos.</translation> </message> <message> <location line="-18"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation>Atenção: wallet.dat corrupto, dados recuperados! wallet.dat original salvo como wallet.{timestamp}.bak em %s; se o seu saldo ou transações estiverem incorrectos deverá recuperar de uma cópia de segurança.</translation> </message> <message> <location line="-31"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation>Tentar recuperar chaves privadas de um wallet.dat corrupto</translation> </message> <message> <location line="+5"/> <source>Block creation options:</source> <translation>Opções de criação de bloco:</translation> </message> <message> <location line="-69"/> <source>Connect only to the specified node(s)</source> <translation>Apenas ligar ao(s) nó(s) especificado(s)</translation> </message> <message> <location line="+4"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation>Descobrir endereço IP próprio (padrão: 1 ao escutar e sem -externalip)</translation> </message> <message> <location line="+101"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>Falhou a escutar em qualquer porta. Use -listen=0 se quer isto.</translation> </message> <message> <location line="-91"/> <source>Sync checkpoints policy (default: strict)</source> <translation type="unfinished"/> </message> <message> <location line="+89"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Invalid amount for -reservebalance=&lt;amount&gt;</source> <translation type="unfinished"/> </message> <message> <location line="-88"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation>Armazenamento intermédio de recepção por ligação, &lt;n&gt;*1000 bytes (por defeito: 5000)</translation> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation>Armazenamento intermédio de envio por ligação, &lt;n&gt;*1000 bytes (por defeito: 1000)</translation> </message> <message> <location line="-17"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation>Apenas ligar a nós na rede &lt;net&gt; (IPv4, IPv6 ou Tor)</translation> </message> <message> <location line="+31"/> <source>Prepend debug output with timestamp</source> <translation type="unfinished"/> </message> <message> <location line="+41"/> <source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source> <translation>Opções SSL: (ver a Wiki Bitcoin para instruções de configuração SSL)</translation> </message> <message> <location line="-81"/> <source>Select the version of socks proxy to use (4-5, default: 5)</source> <translation type="unfinished"/> </message> <message> <location line="+42"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation>Enviar informação de rastreio/depuração para a consola e não para o ficheiro debug.log</translation> </message> <message> <location line="+5"/> <source>Send trace/debug info to debugger</source> <translation type="unfinished"/> </message> <message> <location line="+30"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation>Definir tamanho minímo de um bloco em bytes (por defeito: 0)</translation> </message> <message> <location line="-35"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation>Encolher ficheiro debug.log ao iniciar o cliente (por defeito: 1 sem -debug definido)</translation> </message> <message> <location line="-43"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation>Especificar tempo de espera da ligação em millisegundos (por defeito: 5000)</translation> </message> <message> <location line="+116"/> <source>Unable to sign checkpoint, wrong checkpointkey? </source> <translation type="unfinished"/> </message> <message> <location line="-86"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation>Usar UPnP para mapear a porta de escuta (padrão: 0)</translation> </message> <message> <location line="-1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation>Usar UPnP para mapear a porta de escuta (padrão: 1 ao escutar)</translation> </message> <message> <location line="-26"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation type="unfinished"/> </message> <message> <location line="+47"/> <source>Username for JSON-RPC connections</source> <translation>Nome de utilizador para ligações JSON-RPC</translation> </message> <message> <location line="+51"/> <source>Verifying database integrity...</source> <translation type="unfinished"/> </message> <message> <location line="+44"/> <source>Error: Wallet locked, unable to create transaction!</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Error: Transaction creation failed!</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Information</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>WARNING: syncronized checkpoint violation detected, but skipped!</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation>Atenção: Esta versão está obsoleta, é necessário actualizar!</translation> </message> <message> <location line="-54"/> <source>wallet.dat corrupt, salvage failed</source> <translation>wallet.dat corrupta, recuperação falhou</translation> </message> <message> <location line="-56"/> <source>Password for JSON-RPC connections</source> <translation>Palavra-passe para ligações JSON-RPC</translation> </message> <message> <location line="-32"/> <source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Output debugging information (default: 0, supplying &lt;category&gt; is optional)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>If &lt;category&gt; is not supplied, output all debugging information.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>&lt;category&gt; can be:</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly. This is intended for regression testing tools and app development.</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation>Permitir ligações JSON-RPC do endereço IP especificado</translation> </message> <message> <location line="+1"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation>Enviar comandos para o nó a correr em &lt;ip&gt; (por defeito: 127.0.0.1)</translation> </message> <message> <location line="+1"/> <source>Wait for RPC server to start</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set the number of threads to service RPC calls (default: 4)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation>Executar comando quando mudar o melhor bloco (no comando, %s é substituído pela hash do bloco)</translation> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation>Executar comando quando uma das transações na carteira mudar (no comando, %s é substituído pelo ID da Transação)</translation> </message> <message> <location line="+3"/> <source>Require a confirmations for change (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Upgrade wallet to latest format</source> <translation>Atualize a carteira para o formato mais recente</translation> </message> <message> <location line="+1"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation>Definir o tamanho da memória de chaves para &lt;n&gt; (por defeito: 100)</translation> </message> <message> <location line="+1"/> <source>Rescan the block chain for missing wallet transactions</source> <translation>Reexaminar a cadeia de blocos para transações em falta na carteira</translation> </message> <message> <location line="+3"/> <source>How thorough the block verification is (0-6, default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Imports blocks from external blk000?.dat file</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation>Usar OpenSSL (https) para ligações JSON-RPC</translation> </message> <message> <location line="+1"/> <source>Server certificate file (default: server.cert)</source> <translation>Ficheiro de certificado do servidor (por defeito: server.cert)</translation> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation>Chave privada do servidor (por defeito: server.pem)</translation> </message> <message> <location line="+10"/> <source>Initialization sanity check failed. Summitcoin is shutting down.</source> <translation type="unfinished"/> </message> <message> <location line="+50"/> <source>Error: Wallet unlocked for staking only, unable to create transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Error: Disk space is low!</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source> <translation type="unfinished"/> </message> <message> <location line="-174"/> <source>This help message</source> <translation>Esta mensagem de ajuda</translation> </message> <message> <location line="+104"/> <source>Wallet %s resides outside data directory %s.</source> <translation type="unfinished"/> </message> <message> <location line="+37"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation>Incapaz de vincular a %s neste computador (vínculo retornou erro %d, %s)</translation> </message> <message> <location line="-133"/> <source>Connect through socks proxy</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation>Permitir procuras DNS para -addnode, -seednode e -connect</translation> </message> <message> <location line="+126"/> <source>Loading addresses...</source> <translation>Carregar endereços...</translation> </message> <message> <location line="-12"/> <source>Error loading blkindex.dat</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation>Erro ao carregar wallet.dat: Carteira danificada</translation> </message> <message> <location line="+4"/> <source>Error loading wallet.dat: Wallet requires newer version of Summitcoin</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Wallet needed to be rewritten: restart Summitcoin to complete</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Error loading wallet.dat</source> <translation>Erro ao carregar wallet.dat</translation> </message> <message> <location line="-16"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation>Endereço -proxy inválido: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation>Rede desconhecida especificada em -onlynet: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown -socks proxy version requested: %i</source> <translation>Versão desconhecida de proxy -socks requisitada: %i</translation> </message> <message> <location line="+4"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation>Não conseguiu resolver endereço -bind: &apos;%s&apos;</translation> </message> <message> <location line="+2"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation>Não conseguiu resolver endereço -externalip: &apos;%s&apos;</translation> </message> <message> <location line="-23"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Quantia inválida para -paytxfee=&lt;amount&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+60"/> <source>Sending...</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Invalid amount</source> <translation>Quantia inválida</translation> </message> <message> <location line="+1"/> <source>Insufficient funds</source> <translation>Fundos insuficientes</translation> </message> <message> <location line="-40"/> <source>Loading block index...</source> <translation>Carregar índice de blocos...</translation> </message> <message> <location line="-110"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation>Adicione um nó ao qual se ligar e tentar manter a ligação aberta</translation> </message> <message> <location line="+125"/> <source>Unable to bind to %s on this computer. Summitcoin is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-101"/> <source>Fee per KB to add to transactions you send</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <source>Minimize weight consumption (experimental) (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>How many blocks to check at startup (default: 500, 0 = all)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Keep at most &lt;n&gt; unconnectable blocks in memory (default: %u)</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Warning: Deprecated argument -debugnet ignored, use -debug=net</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Invalid amount for -mininput=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Cannot obtain a lock on data directory %s. Summitcoin is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Error initializing wallet database environment %s!</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Loading wallet...</source> <translation>Carregar carteira...</translation> </message> <message> <location line="+8"/> <source>Cannot downgrade wallet</source> <translation>Impossível mudar a carteira para uma versão anterior</translation> </message> <message> <location line="+1"/> <source>Cannot write default address</source> <translation>Impossível escrever endereço por defeito</translation> </message> <message> <location line="+1"/> <source>Rescanning...</source> <translation>Reexaminando...</translation> </message> <message> <location line="+2"/> <source>Done loading</source> <translation>Carregamento completo</translation> </message> <message> <location line="-161"/> <source>To use the %s option</source> <translation>Para usar a opção %s</translation> </message> <message> <location line="+188"/> <source>Error</source> <translation>Erro</translation> </message> <message> <location line="-18"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation>Deverá definir rpcpassword=&lt;password&gt; no ficheiro de configuração: %s Se o ficheiro não existir, crie-o com permissões de leitura apenas para o dono.</translation> </message> </context> </TS><|fim▁end|>
<message>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals __license__ = 'Public Domain' import codecs import io import os import random import sys from .options import ( parseOpts, ) from .compat import ( compat_expanduser, compat_getpass, compat_shlex_split, workaround_optparse_bug9161, ) from .utils import ( DateRange, decodeOption, DEFAULT_OUTTMPL, DownloadError, match_filter_func, MaxDownloadsReached, preferredencoding, read_batch_urls, SameFileError, setproctitle, std_headers, write_string, render_table, ) from .update import update_self from .downloader import ( FileDownloader, ) from .extractor import gen_extractors, list_extractors from .extractor.adobepass import MSO_INFO from .YoutubeDL import YoutubeDL def _real_main(argv=None): # Compatibility fixes for Windows if sys.platform == 'win32': # https://github.com/rg3/youtube-dl/issues/820 codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None) workaround_optparse_bug9161() setproctitle('youtube-dl') parser, opts, args = parseOpts(argv) # Set user agent if opts.user_agent is not None: std_headers['User-Agent'] = opts.user_agent # Set referer if opts.referer is not None: std_headers['Referer'] = opts.referer # Custom HTTP headers if opts.headers is not None: for h in opts.headers: if ':' not in h: parser.error('wrong header formatting, it should be key:value, not "%s"' % h) key, value = h.split(':', 1) if opts.verbose: write_string('[debug] Adding header from command line option %s:%s\n' % (key, value)) std_headers[key] = value # Dump user agent if opts.dump_user_agent: write_string(std_headers['User-Agent'] + '\n', out=sys.stdout) sys.exit(0) # Batch file verification batch_urls = [] if opts.batchfile is not None: try: if opts.batchfile == '-': batchfd = sys.stdin else: batchfd = io.open( compat_expanduser(opts.batchfile), 'r', encoding='utf-8', errors='ignore') batch_urls = read_batch_urls(batchfd) if opts.verbose: write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n') except IOError: sys.exit('ERROR: batch file could not be read') all_urls = batch_urls + args all_urls = [url.strip() for url in all_urls] _enc = preferredencoding() all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls] if opts.list_extractors: for ie in list_extractors(opts.age_limit): write_string(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '') + '\n', out=sys.stdout) matchedUrls = [url for url in all_urls if ie.suitable(url)] for mu in matchedUrls: write_string(' ' + mu + '\n', out=sys.stdout) sys.exit(0) if opts.list_extractor_descriptions: for ie in list_extractors(opts.age_limit): if not ie._WORKING: continue desc = getattr(ie, 'IE_DESC', ie.IE_NAME) if desc is False: continue if hasattr(ie, 'SEARCH_KEY'): _SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow') _COUNTS = ('', '5', '10', 'all') desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES)) write_string(desc + '\n', out=sys.stdout) sys.exit(0) if opts.ap_list_mso: table = [[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()] write_string('Supported TV Providers:\n' + render_table(['mso', 'mso name'], table) + '\n', out=sys.stdout) sys.exit(0) # Conflicting, missing and erroneous options if opts.usenetrc and (opts.username is not None or opts.password is not None): parser.error('using .netrc conflicts with giving username/password') if opts.password is not None and opts.username is None: parser.error('account username missing\n') if opts.ap_password is not None and opts.ap_username is None: parser.error('TV Provider account username missing\n') if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid): parser.error('using output template conflicts with using title, video ID or auto number') if opts.usetitle and opts.useid: parser.error('using title conflicts with using video ID')<|fim▁hole|> if opts.ap_username is not None and opts.ap_password is None: opts.ap_password = compat_getpass('Type TV provider account password and press [Return]: ') if opts.ratelimit is not None: numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) if numeric_limit is None: parser.error('invalid rate limit specified') opts.ratelimit = numeric_limit if opts.min_filesize is not None: numeric_limit = FileDownloader.parse_bytes(opts.min_filesize) if numeric_limit is None: parser.error('invalid min_filesize specified') opts.min_filesize = numeric_limit if opts.max_filesize is not None: numeric_limit = FileDownloader.parse_bytes(opts.max_filesize) if numeric_limit is None: parser.error('invalid max_filesize specified') opts.max_filesize = numeric_limit if opts.sleep_interval is not None: if opts.sleep_interval < 0: parser.error('sleep interval must be positive or 0') if opts.max_sleep_interval is not None: if opts.max_sleep_interval < 0: parser.error('max sleep interval must be positive or 0') if opts.max_sleep_interval < opts.sleep_interval: parser.error('max sleep interval must be greater than or equal to min sleep interval') else: opts.max_sleep_interval = opts.sleep_interval if opts.ap_mso and opts.ap_mso not in MSO_INFO: parser.error('Unsupported TV Provider, use --ap-list-mso to get a list of supported TV Providers') def parse_retries(retries): if retries in ('inf', 'infinite'): parsed_retries = float('inf') else: try: parsed_retries = int(retries) except (TypeError, ValueError): parser.error('invalid retry count specified') return parsed_retries if opts.retries is not None: opts.retries = parse_retries(opts.retries) if opts.fragment_retries is not None: opts.fragment_retries = parse_retries(opts.fragment_retries) if opts.buffersize is not None: numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize) if numeric_buffersize is None: parser.error('invalid buffer size specified') opts.buffersize = numeric_buffersize if opts.playliststart <= 0: raise ValueError('Playlist start must be positive') if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart: raise ValueError('Playlist end must be greater than playlist start') if opts.extractaudio: if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']: parser.error('invalid audio format specified') if opts.audioquality: opts.audioquality = opts.audioquality.strip('k').strip('K') if not opts.audioquality.isdigit(): parser.error('invalid audio quality specified') if opts.recodevideo is not None: if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']: parser.error('invalid video recode format specified') if opts.convertsubtitles is not None: if opts.convertsubtitles not in ['srt', 'vtt', 'ass']: parser.error('invalid subtitle format specified') if opts.date is not None: date = DateRange.day(opts.date) else: date = DateRange(opts.dateafter, opts.datebefore) # Do not download videos when there are audio-only formats if opts.extractaudio and not opts.keepvideo and opts.format is None: opts.format = 'bestaudio/best' # --all-sub automatically sets --write-sub if --write-auto-sub is not given # this was the old behaviour if only --all-sub was given. if opts.allsubtitles and not opts.writeautomaticsub: opts.writesubtitles = True outtmpl = ((opts.outtmpl is not None and opts.outtmpl) or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or (opts.usetitle and '%(title)s-%(id)s.%(ext)s') or (opts.useid and '%(id)s.%(ext)s') or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') or DEFAULT_OUTTMPL) if not os.path.splitext(outtmpl)[1] and opts.extractaudio: parser.error('Cannot download a video and extract audio into the same' ' file! Use "{0}.%(ext)s" instead of "{0}" as the output' ' template'.format(outtmpl)) any_getting = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json any_printing = opts.print_json download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive # PostProcessors postprocessors = [] # Add the metadata pp first, the other pps will copy it if opts.metafromtitle: postprocessors.append({ 'key': 'MetadataFromTitle', 'titleformat': opts.metafromtitle }) if opts.addmetadata: postprocessors.append({'key': 'FFmpegMetadata'}) if opts.extractaudio: postprocessors.append({ 'key': 'FFmpegExtractAudio', 'preferredcodec': opts.audioformat, 'preferredquality': opts.audioquality, 'nopostoverwrites': opts.nopostoverwrites, }) if opts.recodevideo: postprocessors.append({ 'key': 'FFmpegVideoConvertor', 'preferedformat': opts.recodevideo, }) if opts.convertsubtitles: postprocessors.append({ 'key': 'FFmpegSubtitlesConvertor', 'format': opts.convertsubtitles, }) if opts.embedsubtitles: postprocessors.append({ 'key': 'FFmpegEmbedSubtitle', }) if opts.embedthumbnail: already_have_thumbnail = opts.writethumbnail or opts.write_all_thumbnails postprocessors.append({ 'key': 'EmbedThumbnail', 'already_have_thumbnail': already_have_thumbnail }) if not already_have_thumbnail: opts.writethumbnail = True # XAttrMetadataPP should be run after post-processors that may change file # contents if opts.xattrs: postprocessors.append({'key': 'XAttrMetadata'}) # Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way. # So if the user is able to remove the file before your postprocessor runs it might cause a few problems. if opts.exec_cmd: postprocessors.append({ 'key': 'ExecAfterDownload', 'exec_cmd': opts.exec_cmd, }) external_downloader_args = None if opts.external_downloader_args: external_downloader_args = compat_shlex_split(opts.external_downloader_args) postprocessor_args = None if opts.postprocessor_args: postprocessor_args = compat_shlex_split(opts.postprocessor_args) match_filter = ( None if opts.match_filter is None else match_filter_func(opts.match_filter)) ydl_opts = { 'usenetrc': opts.usenetrc, 'username': opts.username, 'password': opts.password, 'twofactor': opts.twofactor, 'videopassword': opts.videopassword, 'ap_mso': opts.ap_mso, 'ap_username': opts.ap_username, 'ap_password': opts.ap_password, 'quiet': (opts.quiet or any_getting or any_printing), 'no_warnings': opts.no_warnings, 'forceurl': opts.geturl, 'forcetitle': opts.gettitle, 'forceid': opts.getid, 'forcethumbnail': opts.getthumbnail, 'forcedescription': opts.getdescription, 'forceduration': opts.getduration, 'forcefilename': opts.getfilename, 'forceformat': opts.getformat, 'forcejson': opts.dumpjson or opts.print_json, 'dump_single_json': opts.dump_single_json, 'simulate': opts.simulate or any_getting, 'skip_download': opts.skip_download, 'format': opts.format, 'listformats': opts.listformats, 'outtmpl': outtmpl, 'autonumber_size': opts.autonumber_size, 'restrictfilenames': opts.restrictfilenames, 'ignoreerrors': opts.ignoreerrors, 'force_generic_extractor': opts.force_generic_extractor, 'ratelimit': opts.ratelimit, 'nooverwrites': opts.nooverwrites, 'retries': opts.retries, 'fragment_retries': opts.fragment_retries, 'skip_unavailable_fragments': opts.skip_unavailable_fragments, 'buffersize': opts.buffersize, 'noresizebuffer': opts.noresizebuffer, 'continuedl': opts.continue_dl, 'noprogress': opts.noprogress, 'progress_with_newline': opts.progress_with_newline, 'playliststart': opts.playliststart, 'playlistend': opts.playlistend, 'playlistreverse': opts.playlist_reverse, 'noplaylist': opts.noplaylist, 'logtostderr': opts.outtmpl == '-', 'consoletitle': opts.consoletitle, 'nopart': opts.nopart, 'updatetime': opts.updatetime, 'writedescription': opts.writedescription, 'writeannotations': opts.writeannotations, 'writeinfojson': opts.writeinfojson, 'writethumbnail': opts.writethumbnail, 'write_all_thumbnails': opts.write_all_thumbnails, 'writesubtitles': opts.writesubtitles, 'writeautomaticsub': opts.writeautomaticsub, 'allsubtitles': opts.allsubtitles, 'listsubtitles': opts.listsubtitles, 'subtitlesformat': opts.subtitlesformat, 'subtitleslangs': opts.subtitleslangs, 'matchtitle': decodeOption(opts.matchtitle), 'rejecttitle': decodeOption(opts.rejecttitle), 'max_downloads': opts.max_downloads, 'prefer_free_formats': opts.prefer_free_formats, 'verbose': opts.verbose, 'dump_intermediate_pages': opts.dump_intermediate_pages, 'write_pages': opts.write_pages, 'test': opts.test, 'keepvideo': opts.keepvideo, 'min_filesize': opts.min_filesize, 'max_filesize': opts.max_filesize, 'min_views': opts.min_views, 'max_views': opts.max_views, 'daterange': date, 'cachedir': opts.cachedir, 'youtube_print_sig_code': opts.youtube_print_sig_code, 'age_limit': opts.age_limit, 'download_archive': download_archive_fn, 'cookiefile': opts.cookiefile, 'nocheckcertificate': opts.no_check_certificate, 'prefer_insecure': opts.prefer_insecure, 'proxy': opts.proxy, 'socket_timeout': opts.socket_timeout, 'bidi_workaround': opts.bidi_workaround, 'debug_printtraffic': opts.debug_printtraffic, 'prefer_ffmpeg': opts.prefer_ffmpeg, 'include_ads': opts.include_ads, 'default_search': opts.default_search, 'youtube_include_dash_manifest': opts.youtube_include_dash_manifest, 'encoding': opts.encoding, 'extract_flat': opts.extract_flat, 'mark_watched': opts.mark_watched, 'merge_output_format': opts.merge_output_format, 'postprocessors': postprocessors, 'fixup': opts.fixup, 'source_address': opts.source_address, 'call_home': opts.call_home, 'sleep_interval': opts.sleep_interval, 'max_sleep_interval': opts.max_sleep_interval, 'external_downloader': opts.external_downloader, 'list_thumbnails': opts.list_thumbnails, 'playlist_items': opts.playlist_items, 'xattr_set_filesize': opts.xattr_set_filesize, 'match_filter': match_filter, 'no_color': opts.no_color, 'ffmpeg_location': opts.ffmpeg_location, 'hls_prefer_native': opts.hls_prefer_native, 'hls_use_mpegts': opts.hls_use_mpegts, 'external_downloader_args': external_downloader_args, 'postprocessor_args': postprocessor_args, 'cn_verification_proxy': opts.cn_verification_proxy, 'geo_verification_proxy': opts.geo_verification_proxy, } with YoutubeDL(ydl_opts) as ydl: # Update version if opts.update_self: update_self(ydl.to_screen, opts.verbose, ydl._opener) # Remove cache dir if opts.rm_cachedir: ydl.cache.remove() # Maybe do nothing if (len(all_urls) < 1) and (opts.load_info_filename is None): if opts.update_self or opts.rm_cachedir: sys.exit() ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv) parser.error( 'You must provide at least one URL.\n' 'Type youtube-dl --help to see a list of all options.') try: if opts.load_info_filename is not None: retcode = ydl.download_with_info_file(compat_expanduser(opts.load_info_filename)) else: retcode = ydl.download(all_urls) except MaxDownloadsReached: ydl.to_screen('--max-download limit reached, aborting.') retcode = 101 sys.exit(retcode) def main(argv=None): try: _real_main(argv) except DownloadError: sys.exit(1) except SameFileError: sys.exit('ERROR: fixed output name but more than one file to download') except KeyboardInterrupt: sys.exit('\nERROR: Interrupted by user') __all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors']<|fim▁end|>
if opts.username is not None and opts.password is None: opts.password = compat_getpass('Type account password and press [Return]: ')
<|file_name|>bridges.py<|end_file_name|><|fim▁begin|>import threading import upnp import nupnp class DiscoveryThread(threading.Thread): def __init__(self, bridges): super(DiscoveryThread, self).__init__() self.bridges = bridges <|fim▁hole|> def run(self): self.upnp_thread.start() self.nupnp_thread.start() self.upnp_thread.join() self.nupnp_thread.join() def discover(): bridges = set() discovery_thread = DiscoveryThread(bridges) discovery_thread.start() discovery_thread.join() return bridges<|fim▁end|>
self.upnp_thread = upnp.UPnPDiscoveryThread(self.bridges) self.nupnp_thread = nupnp.NUPnPDiscoveryThread(self.bridges)
<|file_name|>type_coercion.rs<|end_file_name|><|fim▁begin|>// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! Type coercion rules for functions with multiple valid signatures //! //! Coercion is performed automatically by DataFusion when the types //! of arguments passed to a function do not exacty match the types //! required by that function. In this case, DataFusion will attempt to //! *coerce* the arguments to types accepted by the function by //! inserting CAST operations. //! //! CAST operations added by coercion are lossless and never discard //! information. For example coercion from i32 -> i64 might be //! performed because all valid i32 values can be represented using an //! i64. However, i64 -> i32 is never performed as there are i64 //! values which can not be represented by i32 values. use std::sync::Arc; use arrow::datatypes::{DataType, Schema}; use super::{functions::Signature, PhysicalExpr}; use crate::error::{DataFusionError, Result}; use crate::physical_plan::expressions::cast; /// Returns `expressions` coerced to types compatible with /// `signature`, if possible. /// /// See the module level documentation for more detail on coercion. pub fn coerce( expressions: &Vec<Arc<dyn PhysicalExpr>>, schema: &Schema, signature: &Signature, ) -> Result<Vec<Arc<dyn PhysicalExpr>>> { let current_types = expressions .iter() .map(|e| e.data_type(schema)) .collect::<Result<Vec<_>>>()?; let new_types = data_types(&current_types, signature)?; expressions .iter() .enumerate() .map(|(i, expr)| cast(expr.clone(), &schema, new_types[i].clone())) .collect::<Result<Vec<_>>>() } /// Returns the data types that each argument must be coerced to match /// `signature`. /// /// See the module level documentation for more detail on coercion. pub fn data_types( current_types: &Vec<DataType>, signature: &Signature, ) -> Result<Vec<DataType>> { let valid_types = match signature { Signature::Variadic(valid_types) => valid_types .iter() .map(|valid_type| current_types.iter().map(|_| valid_type.clone()).collect()) .collect(), Signature::Uniform(number, valid_types) => valid_types .iter() .map(|valid_type| (0..*number).map(|_| valid_type.clone()).collect()) .collect(), Signature::VariadicEqual => { // one entry with the same len as current_types, whose type is `current_types[0]`. vec![current_types .iter() .map(|_| current_types[0].clone()) .collect()] } Signature::Exact(valid_types) => vec![valid_types.clone()], Signature::Any(number) => { if current_types.len() != *number { return Err(DataFusionError::Plan(format!( "The function expected {} arguments but received {}", number, current_types.len() ))); } vec![(0..*number).map(|i| current_types[i].clone()).collect()] } }; if valid_types.contains(current_types) { return Ok(current_types.clone()); } for valid_types in valid_types { if let Some(types) = maybe_data_types(&valid_types, &current_types) { return Ok(types); } } // none possible -> Error Err(DataFusionError::Plan(format!( "Coercion from {:?} to the signature {:?} failed.", current_types, signature ))) } /// Try to coerce current_types into valid_types. fn maybe_data_types( valid_types: &Vec<DataType>, current_types: &Vec<DataType>, ) -> Option<Vec<DataType>> { if valid_types.len() != current_types.len() { return None; } let mut new_type = Vec::with_capacity(valid_types.len()); for (i, valid_type) in valid_types.iter().enumerate() { let current_type = &current_types[i]; if current_type == valid_type { new_type.push(current_type.clone()) } else { // attempt to coerce if can_coerce_from(valid_type, &current_type) { new_type.push(valid_type.clone()) } else { // not possible return None; } } } Some(new_type) } /// Return true if a value of type `type_from` can be coerced /// (losslessly converted) into a value of `type_to` /// /// See the module level documentation for more detail on coercion. pub fn can_coerce_from(type_into: &DataType, type_from: &DataType) -> bool { use self::DataType::*; match type_into { Int8 => match type_from { Int8 => true, _ => false, }, Int16 => match type_from { Int8 | Int16 | UInt8 => true, _ => false, }, Int32 => match type_from { Int8 | Int16 | Int32 | UInt8 | UInt16 => true, _ => false, }, Int64 => match type_from { Int8 | Int16 | Int32 | Int64 | UInt8 | UInt16 | UInt32 => true, _ => false, }, UInt8 => match type_from { UInt8 => true, _ => false, }, UInt16 => match type_from { UInt8 | UInt16 => true, _ => false, }, UInt32 => match type_from { UInt8 | UInt16 | UInt32 => true, _ => false, }, UInt64 => match type_from { UInt8 | UInt16 | UInt32 | UInt64 => true, _ => false, }, Float32 => match type_from { Int8 | Int16 | Int32 | Int64 => true, UInt8 | UInt16 | UInt32 | UInt64 => true, Float32 => true, _ => false, }, Float64 => match type_from { Int8 | Int16 | Int32 | Int64 => true, UInt8 | UInt16 | UInt32 | UInt64 => true, Float32 | Float64 => true, _ => false, }, Utf8 => true, _ => false, } } #[cfg(test)] mod tests { use super::*; use crate::physical_plan::expressions::col; use arrow::datatypes::{DataType, Field, Schema}; #[test] fn test_maybe_data_types() -> Result<()> { // this vec contains: arg1, arg2, expected result let cases = vec![ // 2 entries, same values ( vec![DataType::UInt8, DataType::UInt16], vec![DataType::UInt8, DataType::UInt16], Some(vec![DataType::UInt8, DataType::UInt16]), ), // 2 entries, can coerse values ( vec![DataType::UInt16, DataType::UInt16], vec![DataType::UInt8, DataType::UInt16], Some(vec![DataType::UInt16, DataType::UInt16]), ), // 0 entries, all good (vec![], vec![], Some(vec![])), // 2 entries, can't coerce ( vec![DataType::Boolean, DataType::UInt16], vec![DataType::UInt8, DataType::UInt16],<|fim▁hole|> ), // u32 -> u16 is possible ( vec![DataType::Boolean, DataType::UInt32], vec![DataType::Boolean, DataType::UInt16], Some(vec![DataType::Boolean, DataType::UInt32]), ), ]; for case in cases { assert_eq!(maybe_data_types(&case.0, &case.1), case.2) } Ok(()) } #[test] fn test_coerce() -> Result<()> { // create a schema let schema = |t: Vec<DataType>| { Schema::new( t.iter() .enumerate() .map(|(i, t)| Field::new(&*format!("c{}", i), t.clone(), true)) .collect(), ) }; // create a vector of expressions let expressions = |t: Vec<DataType>, schema| -> Result<Vec<_>> { t.iter() .enumerate() .map(|(i, t)| cast(col(&format!("c{}", i)), &schema, t.clone())) .collect::<Result<Vec<_>>>() }; // create a case: input + expected result let case = |observed: Vec<DataType>, valid, expected: Vec<DataType>| -> Result<_> { let schema = schema(observed.clone()); let expr = expressions(observed, schema.clone())?; let expected = expressions(expected, schema.clone())?; Ok((expr.clone(), schema, valid, expected)) }; let cases = vec![ // u16 -> u32 case( vec![DataType::UInt16], Signature::Uniform(1, vec![DataType::UInt32]), vec![DataType::UInt32], )?, // same type case( vec![DataType::UInt32, DataType::UInt32], Signature::Uniform(2, vec![DataType::UInt32]), vec![DataType::UInt32, DataType::UInt32], )?, case( vec![DataType::UInt32], Signature::Uniform(1, vec![DataType::Float32, DataType::Float64]), vec![DataType::Float32], )?, // u32 -> f32 case( vec![DataType::UInt32, DataType::UInt32], Signature::Variadic(vec![DataType::Float32]), vec![DataType::Float32, DataType::Float32], )?, // u32 -> f32 case( vec![DataType::Float32, DataType::UInt32], Signature::VariadicEqual, vec![DataType::Float32, DataType::Float32], )?, // common type is u64 case( vec![DataType::UInt32, DataType::UInt64], Signature::Variadic(vec![DataType::UInt32, DataType::UInt64]), vec![DataType::UInt64, DataType::UInt64], )?, // f32 -> f32 case( vec![DataType::Float32], Signature::Any(1), vec![DataType::Float32], )?, ]; for case in cases { let observed = format!("{:?}", coerce(&case.0, &case.1, &case.2)?); let expected = format!("{:?}", case.3); assert_eq!(observed, expected); } // now cases that are expected to fail let cases = vec![ // we do not know how to cast bool to UInt16 => fail case( vec![DataType::Boolean], Signature::Uniform(1, vec![DataType::UInt16]), vec![], )?, // u32 and bool are not uniform case( vec![DataType::UInt32, DataType::Boolean], Signature::VariadicEqual, vec![], )?, // bool is not castable to u32 case( vec![DataType::Boolean, DataType::Boolean], Signature::Variadic(vec![DataType::UInt32]), vec![], )?, // expected two arguments case(vec![DataType::UInt32], Signature::Any(2), vec![])?, ]; for case in cases { if let Ok(_) = coerce(&case.0, &case.1, &case.2) { return Err(DataFusionError::Plan(format!( "Error was expected in {:?}", case ))); } } Ok(()) } }<|fim▁end|>
None,
<|file_name|>sale.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from datetime import datetime, timedelta import time from openerp.osv import fields, osv from openerp.tools.translate import _ from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT import openerp.addons.decimal_precision as dp from openerp import workflow class res_company(osv.Model): _inherit = "res.company" _columns = { 'sale_note': fields.text('Default Terms and Conditions', translate=True, help="Default terms and conditions for quotations."), } class sale_order(osv.osv): _name = "sale.order" _inherit = ['mail.thread', 'ir.needaction_mixin'] _description = "Sales Order" _track = { 'state': { 'sale.mt_order_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state in ['manual', 'progress'], 'sale.mt_order_sent': lambda self, cr, uid, obj, ctx=None: obj.state in ['sent'] }, } def _amount_line_tax(self, cr, uid, line, context=None): val = 0.0 for c in self.pool.get('account.tax').compute_all(cr, uid, line.tax_id, line.price_unit * (1-(line.discount or 0.0)/100.0), line.product_uom_qty, line.product_id, line.order_id.partner_id)['taxes']: val += c.get('amount', 0.0) return val def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None): """ Wrapper because of direct method passing as parameter for function fields """ return self._amount_all(cr, uid, ids, field_name, arg, context=context) def _amount_all(self, cr, uid, ids, field_name, arg, context=None): cur_obj = self.pool.get('res.currency') res = {} for order in self.browse(cr, uid, ids, context=context): res[order.id] = { 'amount_untaxed': 0.0, 'amount_tax': 0.0, 'amount_total': 0.0, } val = val1 = 0.0 cur = order.pricelist_id.currency_id for line in order.order_line: val1 += line.price_subtotal val += self._amount_line_tax(cr, uid, line, context=context) res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val) res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1) res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax'] return res def _invoiced_rate(self, cursor, user, ids, name, arg, context=None): res = {} for sale in self.browse(cursor, user, ids, context=context): if sale.invoiced: res[sale.id] = 100.0 continue tot = 0.0 for invoice in sale.invoice_ids: if invoice.state not in ('draft', 'cancel'): tot += invoice.amount_untaxed if tot: res[sale.id] = min(100.0, tot * 100.0 / (sale.amount_untaxed or 1.00)) else: res[sale.id] = 0.0 return res def _invoice_exists(self, cursor, user, ids, name, arg, context=None): res = {} for sale in self.browse(cursor, user, ids, context=context): res[sale.id] = False if sale.invoice_ids: res[sale.id] = True return res def _invoiced(self, cursor, user, ids, name, arg, context=None): res = {} for sale in self.browse(cursor, user, ids, context=context): res[sale.id] = True invoice_existence = False for invoice in sale.invoice_ids: if invoice.state!='cancel': invoice_existence = True if invoice.state != 'paid': res[sale.id] = False break if not invoice_existence or sale.state == 'manual': res[sale.id] = False return res def _invoiced_search(self, cursor, user, obj, name, args, context=None): if not len(args): return [] clause = '' sale_clause = '' no_invoiced = False for arg in args: if (arg[1] == '=' and arg[2]) or (arg[1] == '!=' and not arg[2]): clause += 'AND inv.state = \'paid\'' else: clause += 'AND inv.state != \'cancel\' AND sale.state != \'cancel\' AND inv.state <> \'paid\' AND rel.order_id = sale.id ' sale_clause = ', sale_order AS sale ' no_invoiced = True cursor.execute('SELECT rel.order_id ' \ 'FROM sale_order_invoice_rel AS rel, account_invoice AS inv '+ sale_clause + \ 'WHERE rel.invoice_id = inv.id ' + clause) res = cursor.fetchall() if no_invoiced: cursor.execute('SELECT sale.id ' \ 'FROM sale_order AS sale ' \ 'WHERE sale.id NOT IN ' \ '(SELECT rel.order_id ' \ 'FROM sale_order_invoice_rel AS rel) and sale.state != \'cancel\'') res.extend(cursor.fetchall()) if not res: return [('id', '=', 0)] return [('id', 'in', [x[0] for x in res])] def _get_order(self, cr, uid, ids, context=None): result = {} for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context): result[line.order_id.id] = True return result.keys() def _get_default_company(self, cr, uid, context=None): company_id = self.pool.get('res.users')._get_company(cr, uid, context=context) if not company_id: raise osv.except_osv(_('Error!'), _('There is no default company for the current user!')) return company_id def _get_default_section_id(self, cr, uid, context=None): """ Gives default section by checking if present in the context """ section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False if not section_id: section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False return section_id def _resolve_section_id_from_context(self, cr, uid, context=None): """ Returns ID of section based on the value of 'section_id' context key, or None if it cannot be resolved to a single Sales Team. """ if context is None: context = {} if type(context.get('default_section_id')) in (int, long): return context.get('default_section_id') if isinstance(context.get('default_section_id'), basestring): section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context) if len(section_ids) == 1: return int(section_ids[0][0]) return None _columns = { 'name': fields.char('Order Reference', required=True, copy=False, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True), 'origin': fields.char('Source Document', help="Reference of the document that generated this sales order request."), 'client_order_ref': fields.char('Reference/Description', copy=False), 'state': fields.selection([ ('draft', 'Draft Quotation'), ('sent', 'Quotation Sent'), ('cancel', 'Cancelled'), ('waiting_date', 'Waiting Schedule'), ('progress', 'Sales Order'), ('manual', 'Sale to Invoice'), ('shipping_except', 'Shipping Exception'), ('invoice_except', 'Invoice Exception'), ('done', 'Done'), ], 'Status', readonly=True, copy=False, help="Gives the status of the quotation or sales order.\ \nThe exception status is automatically set when a cancel operation occurs \ in the invoice validation (Invoice Exception) or in the picking list process (Shipping Exception).\nThe 'Waiting Schedule' status is set when the invoice is confirmed\ but waiting for the scheduler to run on the order date.", select=True), 'date_order': fields.datetime('Date', required=True, readonly=True, select=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, copy=False), 'create_date': fields.datetime('Creation Date', readonly=True, select=True, help="Date on which sales order is created."), 'date_confirm': fields.date('Confirmation Date', readonly=True, select=True, help="Date on which sales order is confirmed.", copy=False), 'user_id': fields.many2one('res.users', 'Salesperson', states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True, track_visibility='onchange'), 'partner_id': fields.many2one('res.partner', 'Customer', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, required=True, change_default=True, select=True, track_visibility='always'), 'partner_invoice_id': fields.many2one('res.partner', 'Invoice Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Invoice address for current sales order."), 'partner_shipping_id': fields.many2one('res.partner', 'Delivery Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Delivery address for current sales order."), 'order_policy': fields.selection([ ('manual', 'On Demand'), ], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="""This field controls how invoice and delivery operations are synchronized."""), 'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Pricelist for current sales order."), 'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency", readonly=True, required=True), 'project_id': fields.many2one('account.analytic.account', 'Contract / Analytic', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="The analytic account related to a sales order."), 'order_line': fields.one2many('sale.order.line', 'order_id', 'Order Lines', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, copy=True), 'invoice_ids': fields.many2many('account.invoice', 'sale_order_invoice_rel', 'order_id', 'invoice_id', 'Invoices', readonly=True, copy=False, help="This is the list of invoices that have been generated for this sales order. The same sales order may have been invoiced in several times (by line for example)."), 'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced Ratio', type='float'), 'invoiced': fields.function(_invoiced, string='Paid', fnct_search=_invoiced_search, type='boolean', help="It indicates that an invoice has been paid."), 'invoice_exists': fields.function(_invoice_exists, string='Invoiced', fnct_search=_invoiced_search, type='boolean', help="It indicates that sales order has at least one invoice."), 'note': fields.text('Terms and conditions'), 'amount_untaxed': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Untaxed Amount', store={ 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10), 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10), }, multi='sums', help="The amount without tax.", track_visibility='always'), 'amount_tax': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Taxes', store={ 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10), 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10), }, multi='sums', help="The tax amount."), 'amount_total': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Total', store={ 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10), 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10), }, multi='sums', help="The total amount."), 'payment_term': fields.many2one('account.payment.term', 'Payment Term'), 'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'), 'company_id': fields.many2one('res.company', 'Company'), 'section_id': fields.many2one('crm.case.section', 'Sales Team'), 'procurement_group_id': fields.many2one('procurement.group', 'Procurement group', copy=False), 'product_id': fields.related('order_line', 'product_id', type='many2one', relation='product.product', string='Product'), } _defaults = { 'date_order': fields.datetime.now, 'order_policy': 'manual', 'company_id': _get_default_company, 'state': 'draft', 'user_id': lambda obj, cr, uid, context: uid, 'name': lambda obj, cr, uid, context: '/', 'partner_invoice_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['invoice'])['invoice'], 'partner_shipping_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['delivery'])['delivery'], 'note': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.sale_note, 'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, c), } _sql_constraints = [ ('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'), ] _order = 'date_order desc, id desc' # Form filling def unlink(self, cr, uid, ids, context=None): sale_orders = self.read(cr, uid, ids, ['state'], context=context) unlink_ids = [] for s in sale_orders: if s['state'] in ['draft', 'cancel']: unlink_ids.append(s['id']) else: raise osv.except_osv(_('Invalid Action!'), _('In order to delete a confirmed sales order, you must cancel it before!')) return osv.osv.unlink(self, cr, uid, unlink_ids, context=context) def copy_quotation(self, cr, uid, ids, context=None): id = self.copy(cr, uid, ids[0], context=context) view_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form') view_id = view_ref and view_ref[1] or False, return { 'type': 'ir.actions.act_window', 'name': _('Sales Order'), 'res_model': 'sale.order', 'res_id': id, 'view_type': 'form', 'view_mode': 'form', 'view_id': view_id, 'target': 'current', 'nodestroy': True, } def onchange_pricelist_id(self, cr, uid, ids, pricelist_id, order_lines, context=None): context = context or {} if not pricelist_id: return {} value = { 'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id } if not order_lines or order_lines == [(6, 0, [])]: return {'value': value} warning = { 'title': _('Pricelist Warning!'), 'message' : _('If you change the pricelist of this order (and eventually the currency), prices of existing order lines will not be updated.') } return {'warning': warning, 'value': value} def get_salenote(self, cr, uid, ids, partner_id, context=None): context_lang = context.copy() if partner_id: partner_lang = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context).lang context_lang.update({'lang': partner_lang}) return self.pool.get('res.users').browse(cr, uid, uid, context=context_lang).company_id.sale_note def onchange_delivery_id(self, cr, uid, ids, company_id, partner_id, delivery_id, fiscal_position, context=None): r = {'value': {}} if not company_id: company_id = self._get_default_company(cr, uid, context=context) fiscal_position = self.pool['account.fiscal.position'].get_fiscal_position(cr, uid, company_id, partner_id, delivery_id, context=context) if fiscal_position: r['value']['fiscal_position'] = fiscal_position return r def onchange_partner_id(self, cr, uid, ids, part, context=None): if not part: return {'value': {'partner_invoice_id': False, 'partner_shipping_id': False, 'payment_term': False, 'fiscal_position': False}} part = self.pool.get('res.partner').browse(cr, uid, part, context=context) addr = self.pool.get('res.partner').address_get(cr, uid, [part.id], ['delivery', 'invoice', 'contact']) pricelist = part.property_product_pricelist and part.property_product_pricelist.id or False invoice_part = self.pool.get('res.partner').browse(cr, uid, addr['invoice'], context=context) payment_term = invoice_part.property_payment_term and invoice_part.property_payment_term.id or False dedicated_salesman = part.user_id and part.user_id.id or uid val = { 'partner_invoice_id': addr['invoice'], 'partner_shipping_id': addr['delivery'], 'payment_term': payment_term, 'user_id': dedicated_salesman, } delivery_onchange = self.onchange_delivery_id(cr, uid, ids, False, part.id, addr['delivery'], False, context=context) val.update(delivery_onchange['value']) if pricelist: val['pricelist_id'] = pricelist if not self._get_default_section_id(cr, uid, context=context) and part.section_id: val['section_id'] = part.section_id.id sale_note = self.get_salenote(cr, uid, ids, part.id, context=context) if sale_note: val.update({'note': sale_note}) return {'value': val} def create(self, cr, uid, vals, context=None): if context is None: context = {} if vals.get('name', '/') == '/': vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'sale.order', context=context) or '/' if vals.get('partner_id') and any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id', 'fiscal_position']): defaults = self.onchange_partner_id(cr, uid, [], vals['partner_id'], context=context)['value'] if not vals.get('fiscal_position') and vals.get('partner_shipping_id'): delivery_onchange = self.onchange_delivery_id(cr, uid, [], vals.get('company_id'), None, vals['partner_id'], vals.get('partner_shipping_id'), context=context) defaults.update(delivery_onchange['value']) vals = dict(defaults, **vals) ctx = dict(context or {}, mail_create_nolog=True) new_id = super(sale_order, self).create(cr, uid, vals, context=ctx) self.message_post(cr, uid, [new_id], body=_("Quotation created"), context=ctx) return new_id def button_dummy(self, cr, uid, ids, context=None): return True # FIXME: deprecated method, overriders should be using _prepare_invoice() instead. # can be removed after 6.1. def _inv_get(self, cr, uid, order, context=None): return {} def _prepare_invoice(self, cr, uid, order, lines, context=None): """Prepare the dict of values to create the new invoice for a sales order. This method may be overridden to implement custom invoice generation (making sure to call super() to establish a clean extension chain). :param browse_record order: sale.order record to invoice :param list(int) line: list of invoice line IDs that must be attached to the invoice :return: dict of value to create() the invoice """ if context is None: context = {} journal_ids = self.pool.get('account.journal').search(cr, uid, [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)], limit=1) if not journal_ids: raise osv.except_osv(_('Error!'), _('Please define sales journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id)) invoice_vals = { 'name': order.client_order_ref or '', 'origin': order.name, 'type': 'out_invoice', 'reference': order.client_order_ref or order.name, 'account_id': order.partner_invoice_id.property_account_receivable.id, 'partner_id': order.partner_invoice_id.id, 'journal_id': journal_ids[0], 'invoice_line': [(6, 0, lines)], 'currency_id': order.pricelist_id.currency_id.id, 'comment': order.note, 'payment_term': order.payment_term and order.payment_term.id or False, 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id, 'date_invoice': context.get('date_invoice', False), 'company_id': order.company_id.id, 'user_id': order.user_id and order.user_id.id or False, 'section_id' : order.section_id.id } # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1 invoice_vals.update(self._inv_get(cr, uid, order, context=context)) return invoice_vals def _make_invoice(self, cr, uid, order, lines, context=None): inv_obj = self.pool.get('account.invoice') obj_invoice_line = self.pool.get('account.invoice.line') if context is None: context = {} invoiced_sale_line_ids = self.pool.get('sale.order.line').search(cr, uid, [('order_id', '=', order.id), ('invoiced', '=', True)], context=context) from_line_invoice_ids = [] for invoiced_sale_line_id in self.pool.get('sale.order.line').browse(cr, uid, invoiced_sale_line_ids, context=context): for invoice_line_id in invoiced_sale_line_id.invoice_lines: if invoice_line_id.invoice_id.id not in from_line_invoice_ids: from_line_invoice_ids.append(invoice_line_id.invoice_id.id) for preinv in order.invoice_ids: if preinv.state not in ('cancel',) and preinv.id not in from_line_invoice_ids: for preline in preinv.invoice_line: inv_line_id = obj_invoice_line.copy(cr, uid, preline.id, {'invoice_id': False, 'price_unit': -preline.price_unit}) lines.append(inv_line_id) inv = self._prepare_invoice(cr, uid, order, lines, context=context) inv_id = inv_obj.create(cr, uid, inv, context=context) data = inv_obj.onchange_payment_term_date_invoice(cr, uid, [inv_id], inv['payment_term'], time.strftime(DEFAULT_SERVER_DATE_FORMAT)) if data.get('value', False): inv_obj.write(cr, uid, [inv_id], data['value'], context=context) inv_obj.button_compute(cr, uid, [inv_id]) return inv_id def print_quotation(self, cr, uid, ids, context=None): ''' This function prints the sales order and mark it as sent, so that we can see more easily the next step of the workflow ''' assert len(ids) == 1, 'This option should only be used for a single id at a time' self.signal_workflow(cr, uid, ids, 'quotation_sent') return self.pool['report'].get_action(cr, uid, ids, 'sale.report_saleorder', context=context) def manual_invoice(self, cr, uid, ids, context=None): """ create invoices for the given sales orders (ids), and open the form view of one of the newly created invoices """ mod_obj = self.pool.get('ir.model.data') # create invoices through the sales orders' workflow inv_ids0 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids) self.signal_workflow(cr, uid, ids, 'manual_invoice') inv_ids1 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids) # determine newly created invoices new_inv_ids = list(inv_ids1 - inv_ids0) res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form') res_id = res and res[1] or False, return { 'name': _('Customer Invoices'), 'view_type': 'form', 'view_mode': 'form', 'view_id': [res_id], 'res_model': 'account.invoice', 'context': "{'type':'out_invoice'}", 'type': 'ir.actions.act_window', 'nodestroy': True, 'target': 'current', 'res_id': new_inv_ids and new_inv_ids[0] or False, } def action_view_invoice(self, cr, uid, ids, context=None): ''' This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show. ''' mod_obj = self.pool.get('ir.model.data') act_obj = self.pool.get('ir.actions.act_window') result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree1') id = result and result[1] or False result = act_obj.read(cr, uid, [id], context=context)[0] #compute the number of invoices to display inv_ids = [] for so in self.browse(cr, uid, ids, context=context): inv_ids += [invoice.id for invoice in so.invoice_ids] #choose the view_mode accordingly if len(inv_ids)>1: result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]" else: res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form') result['views'] = [(res and res[1] or False, 'form')] result['res_id'] = inv_ids and inv_ids[0] or False return result def test_no_product(self, cr, uid, order, context): for line in order.order_line: if line.state == 'cancel': continue if line.product_id and (line.product_id.type<>'service'): return False return True def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice = False, context=None): if states is None: states = ['confirmed', 'done', 'exception'] res = False invoices = {} invoice_ids = [] invoice = self.pool.get('account.invoice') obj_sale_order_line = self.pool.get('sale.order.line') partner_currency = {} # If date was specified, use it as date invoiced, usefull when invoices are generated this month and put the # last day of the last month as invoice date if date_invoice: context = dict(context or {}, date_invoice=date_invoice) for o in self.browse(cr, uid, ids, context=context): currency_id = o.pricelist_id.currency_id.id if (o.partner_id.id in partner_currency) and (partner_currency[o.partner_id.id] <> currency_id): raise osv.except_osv( _('Error!'), _('You cannot group sales having different currencies for the same partner.')) partner_currency[o.partner_id.id] = currency_id lines = [] for line in o.order_line: if line.invoiced: continue elif (line.state in states): lines.append(line.id) created_lines = obj_sale_order_line.invoice_line_create(cr, uid, lines) if created_lines: invoices.setdefault(o.partner_invoice_id.id or o.partner_id.id, []).append((o, created_lines)) if not invoices: for o in self.browse(cr, uid, ids, context=context): for i in o.invoice_ids: if i.state == 'draft': return i.id for val in invoices.values(): if grouped: res = self._make_invoice(cr, uid, val[0][0], reduce(lambda x, y: x + y, [l for o, l in val], []), context=context) invoice_ref = '' origin_ref = '' for o, l in val: invoice_ref += (o.client_order_ref or o.name) + '|' origin_ref += (o.origin or o.name) + '|' self.write(cr, uid, [o.id], {'state': 'progress'}) cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (o.id, res)) self.invalidate_cache(cr, uid, ['invoice_ids'], [o.id], context=context) #remove last '|' in invoice_ref if len(invoice_ref) >= 1: invoice_ref = invoice_ref[:-1] if len(origin_ref) >= 1: origin_ref = origin_ref[:-1] invoice.write(cr, uid, [res], {'origin': origin_ref, 'name': invoice_ref}) else: for order, il in val: res = self._make_invoice(cr, uid, order, il, context=context) invoice_ids.append(res) self.write(cr, uid, [order.id], {'state': 'progress'}) cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (order.id, res)) self.invalidate_cache(cr, uid, ['invoice_ids'], [order.id], context=context) return res def action_invoice_cancel(self, cr, uid, ids, context=None): self.write(cr, uid, ids, {'state': 'invoice_except'}, context=context) return True def action_invoice_end(self, cr, uid, ids, context=None): for this in self.browse(cr, uid, ids, context=context): for line in this.order_line: if line.state == 'exception': line.write({'state': 'confirmed'}) if this.state == 'invoice_except': this.write({'state': 'progress'}) return True def action_cancel(self, cr, uid, ids, context=None): if context is None: context = {} sale_order_line_obj = self.pool.get('sale.order.line') account_invoice_obj = self.pool.get('account.invoice') for sale in self.browse(cr, uid, ids, context=context): for inv in sale.invoice_ids: if inv.state not in ('draft', 'cancel'): raise osv.except_osv( _('Cannot cancel this sales order!'), _('First cancel all invoices attached to this sales order.')) inv.signal_workflow('invoice_cancel') line_ids = [l.id for l in sale.order_line if l.state != 'cancel'] sale_order_line_obj.button_cancel(cr, uid, line_ids, context=context) self.write(cr, uid, ids, {'state': 'cancel'}) return True def action_button_confirm(self, cr, uid, ids, context=None): assert len(ids) == 1, 'This option should only be used for a single id at a time.' self.signal_workflow(cr, uid, ids, 'order_confirm') return True def action_wait(self, cr, uid, ids, context=None): context = context or {} for o in self.browse(cr, uid, ids): if not any(line.state != 'cancel' for line in o.order_line): raise osv.except_osv(_('Error!'),_('You cannot confirm a sales order which has no line.')) noprod = self.test_no_product(cr, uid, o, context) if (o.order_policy == 'manual') or noprod: self.write(cr, uid, [o.id], {'state': 'manual', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)}) else: self.write(cr, uid, [o.id], {'state': 'progress', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)}) self.pool.get('sale.order.line').button_confirm(cr, uid, [x.id for x in o.order_line if x.state != 'cancel']) return True def action_quotation_send(self, cr, uid, ids, context=None): ''' This function opens a window to compose an email, with the edi sale template message loaded by default ''' assert len(ids) == 1, 'This option should only be used for a single id at a time.' ir_model_data = self.pool.get('ir.model.data') try: template_id = ir_model_data.get_object_reference(cr, uid, 'sale', 'email_template_edi_sale')[1] except ValueError: template_id = False try: compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1] except ValueError: compose_form_id = False ctx = dict() ctx.update({ 'default_model': 'sale.order', 'default_res_id': ids[0], 'default_use_template': bool(template_id), 'default_template_id': template_id, 'default_composition_mode': 'comment', 'mark_so_as_sent': True }) return { 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'mail.compose.message', 'views': [(compose_form_id, 'form')], 'view_id': compose_form_id, 'target': 'new', 'context': ctx, } def action_done(self, cr, uid, ids, context=None): for order in self.browse(cr, uid, ids, context=context): self.pool.get('sale.order.line').write(cr, uid, [line.id for line in order.order_line if line.state != 'cancel'], {'state': 'done'}, context=context) return self.write(cr, uid, ids, {'state': 'done'}, context=context) def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None): date_planned = self._get_date_planned(cr, uid, order, line, order.date_order, context=context) return { 'name': line.name, 'origin': order.name, 'date_planned': date_planned, 'product_id': line.product_id.id, 'product_qty': line.product_uom_qty, 'product_uom': line.product_uom.id, 'product_uos_qty': (line.product_uos and line.product_uos_qty) or line.product_uom_qty, 'product_uos': (line.product_uos and line.product_uos.id) or line.product_uom.id, 'company_id': order.company_id.id, 'group_id': group_id, 'invoice_state': (order.order_policy == 'picking') and '2binvoiced' or 'none', 'sale_line_id': line.id } def _get_date_planned(self, cr, uid, order, line, start_date, context=None): date_planned = datetime.strptime(start_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=line.delay or 0.0) return date_planned def _prepare_procurement_group(self, cr, uid, order, context=None): return {'name': order.name, 'partner_id': order.partner_shipping_id.id} def procurement_needed(self, cr, uid, ids, context=None): #when sale is installed only, there is no need to create procurements, that's only #further installed modules (sale_service, sale_stock) that will change this. sale_line_obj = self.pool.get('sale.order.line') res = [] for order in self.browse(cr, uid, ids, context=context): res.append(sale_line_obj.need_procurement(cr, uid, [line.id for line in order.order_line if line.state != 'cancel'], context=context)) return any(res) def action_ignore_delivery_exception(self, cr, uid, ids, context=None): for sale_order in self.browse(cr, uid, ids, context=context): self.write(cr, uid, ids, {'state': 'progress' if sale_order.invoice_exists else 'manual'}, context=context) return True def action_ship_create(self, cr, uid, ids, context=None): """Create the required procurements to supply sales order lines, also connecting the procurements to appropriate stock moves in order to bring the goods to the sales order's requested location. :return: True """ context = context or {} context['lang'] = self.pool['res.users'].browse(cr, uid, uid).lang procurement_obj = self.pool.get('procurement.order') sale_line_obj = self.pool.get('sale.order.line') for order in self.browse(cr, uid, ids, context=context): proc_ids = [] vals = self._prepare_procurement_group(cr, uid, order, context=context) if not order.procurement_group_id: group_id = self.pool.get("procurement.group").create(cr, uid, vals, context=context) order.write({'procurement_group_id': group_id}) for line in order.order_line: if line.state == 'cancel': continue #Try to fix exception procurement (possible when after a shipping exception the user choose to recreate) if line.procurement_ids: #first check them to see if they are in exception or not (one of the related moves is cancelled) procurement_obj.check(cr, uid, [x.id for x in line.procurement_ids if x.state not in ['cancel', 'done']]) line.refresh() #run again procurement that are in exception in order to trigger another move except_proc_ids = [x.id for x in line.procurement_ids if x.state in ('exception', 'cancel')] procurement_obj.reset_to_confirmed(cr, uid, except_proc_ids, context=context) proc_ids += except_proc_ids elif sale_line_obj.need_procurement(cr, uid, [line.id], context=context): if (line.state == 'done') or not line.product_id: continue vals = self._prepare_order_line_procurement(cr, uid, order, line, group_id=order.procurement_group_id.id, context=context) ctx = context.copy() ctx['procurement_autorun_defer'] = True proc_id = procurement_obj.create(cr, uid, vals, context=ctx) proc_ids.append(proc_id) #Confirm procurement order such that rules will be applied on it #note that the workflow normally ensure proc_ids isn't an empty list procurement_obj.run(cr, uid, proc_ids, context=context) #if shipping was in exception and the user choose to recreate the delivery order, write the new status of SO if order.state == 'shipping_except': val = {'state': 'progress', 'shipped': False} if (order.order_policy == 'manual'): for line in order.order_line: if (not line.invoiced) and (line.state not in ('cancel', 'draft')): val['state'] = 'manual' break order.write(val) return True def onchange_fiscal_position(self, cr, uid, ids, fiscal_position, order_lines, context=None): '''Update taxes of order lines for each line where a product is defined :param list ids: not used :param int fiscal_position: sale order fiscal position :param list order_lines: command list for one2many write method ''' order_line = [] fiscal_obj = self.pool.get('account.fiscal.position') product_obj = self.pool.get('product.product') line_obj = self.pool.get('sale.order.line') fpos = False if fiscal_position: fpos = fiscal_obj.browse(cr, uid, fiscal_position, context=context) for line in order_lines: # create (0, 0, { fields }) # update (1, ID, { fields }) if line[0] in [0, 1]: prod = None if line[2].get('product_id'): prod = product_obj.browse(cr, uid, line[2]['product_id'], context=context) elif line[1]: prod = line_obj.browse(cr, uid, line[1], context=context).product_id if prod and prod.taxes_id: line[2]['tax_id'] = [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]] order_line.append(line) # link (4, ID) # link all (6, 0, IDS) elif line[0] in [4, 6]: line_ids = line[0] == 4 and [line[1]] or line[2] for line_id in line_ids: prod = line_obj.browse(cr, uid, line_id, context=context).product_id if prod and prod.taxes_id: order_line.append([1, line_id, {'tax_id': [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]]}]) else: order_line.append([4, line_id]) else: order_line.append(line) return {'value': {'order_line': order_line, 'amount_untaxed': False, 'amount_tax': False, 'amount_total': False}} def test_procurements_done(self, cr, uid, ids, context=None): for sale in self.browse(cr, uid, ids, context=context): for line in sale.order_line: if line.state == 'cancel': continue if not all([x.state == 'done' for x in line.procurement_ids]): return False return True def test_procurements_except(self, cr, uid, ids, context=None): for sale in self.browse(cr, uid, ids, context=context): for line in sale.order_line: if line.state == 'cancel': continue if any([x.state == 'cancel' for x in line.procurement_ids]): return True return False # TODO add a field price_unit_uos # - update it on change product and unit price # - use it in report if there is a uos class sale_order_line(osv.osv): def need_procurement(self, cr, uid, ids, context=None): #when sale is installed only, there is no need to create procurements, that's only #further installed modules (sale_service, sale_stock) that will change this. prod_obj = self.pool.get('product.product') for line in self.browse(cr, uid, ids, context=context): if prod_obj.need_procurement(cr, uid, [line.product_id.id], context=context): return True return False def _amount_line(self, cr, uid, ids, field_name, arg, context=None): tax_obj = self.pool.get('account.tax') cur_obj = self.pool.get('res.currency') res = {} if context is None: context = {} for line in self.browse(cr, uid, ids, context=context): price = line.price_unit * (1 - (line.discount or 0.0) / 100.0) taxes = tax_obj.compute_all(cr, uid, line.tax_id, price, line.product_uom_qty, line.product_id, line.order_id.partner_id) cur = line.order_id.pricelist_id.currency_id res[line.id] = cur_obj.round(cr, uid, cur, taxes['total']) return res def _get_uom_id(self, cr, uid, *args): try: proxy = self.pool.get('ir.model.data') result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit') return result[1] except Exception, ex: return False def _fnct_line_invoiced(self, cr, uid, ids, field_name, args, context=None): res = dict.fromkeys(ids, False) for this in self.browse(cr, uid, ids, context=context): res[this.id] = this.invoice_lines and \ all(iline.invoice_id.state != 'cancel' for iline in this.invoice_lines) return res def _order_lines_from_invoice(self, cr, uid, ids, context=None): # direct access to the m2m table is the less convoluted way to achieve this (and is ok ACL-wise) cr.execute("""SELECT DISTINCT sol.id FROM sale_order_invoice_rel rel JOIN sale_order_line sol ON (sol.order_id = rel.order_id) WHERE rel.invoice_id = ANY(%s)""", (list(ids),)) return [i[0] for i in cr.fetchall()] def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None): res = dict.fromkeys(ids, 0.0) for line in self.browse(cr, uid, ids, context=context): res[line.id] = line.price_subtotal / line.product_uom_qty return res _name = 'sale.order.line' _description = 'Sales Order Line' _columns = { 'order_id': fields.many2one('sale.order', 'Order Reference', required=True, ondelete='cascade', select=True, readonly=True, states={'draft':[('readonly',False)]}), 'name': fields.text('Description', required=True, readonly=True, states={'draft': [('readonly', False)]}), 'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of sales order lines."), 'product_id': fields.many2one('product.product', 'Product', domain=[('sale_ok', '=', True)], change_default=True, readonly=True, states={'draft': [('readonly', False)]}, ondelete='restrict'), 'invoice_lines': fields.many2many('account.invoice.line', 'sale_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True, copy=False), 'invoiced': fields.function(_fnct_line_invoiced, string='Invoiced', type='boolean', store={ 'account.invoice': (_order_lines_from_invoice, ['state'], 10), 'sale.order.line': (lambda self,cr,uid,ids,ctx=None: ids, ['invoice_lines'], 10) }), 'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price'), readonly=True, states={'draft': [('readonly', False)]}), 'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')), 'price_reduce': fields.function(_get_price_reduce, type='float', string='Price Reduce', digits_compute=dp.get_precision('Product Price')), 'tax_id': fields.many2many('account.tax', 'sale_order_tax', 'order_line_id', 'tax_id', 'Taxes', readonly=True, states={'draft': [('readonly', False)]}), 'address_allotment_id': fields.many2one('res.partner', 'Allotment Partner',help="A partner to whom the particular product needs to be allotted."), 'product_uom_qty': fields.float('Quantity', digits_compute= dp.get_precision('Product UoS'), required=True, readonly=True, states={'draft': [('readonly', False)]}), 'product_uom': fields.many2one('product.uom', 'Unit of Measure ', required=True, readonly=True, states={'draft': [('readonly', False)]}), 'product_uos_qty': fields.float('Quantity (UoS)' ,digits_compute= dp.get_precision('Product UoS'), readonly=True, states={'draft': [('readonly', False)]}), 'product_uos': fields.many2one('product.uom', 'Product UoS'), 'discount': fields.float('Discount (%)', digits_compute= dp.get_precision('Discount'), readonly=True, states={'draft': [('readonly', False)]}), 'th_weight': fields.float('Weight', readonly=True, states={'draft': [('readonly', False)]}), 'state': fields.selection( [('cancel', 'Cancelled'),('draft', 'Draft'),('confirmed', 'Confirmed'),('exception', 'Exception'),('done', 'Done')], 'Status', required=True, readonly=True, copy=False, help='* The \'Draft\' status is set when the related sales order in draft status. \ \n* The \'Confirmed\' status is set when the related sales order is confirmed. \ \n* The \'Exception\' status is set when the related sales order is set as exception. \ \n* The \'Done\' status is set when the sales order line has been picked. \ \n* The \'Cancelled\' status is set when a user cancel the sales order related.'), 'order_partner_id': fields.related('order_id', 'partner_id', type='many2one', relation='res.partner', store=True, string='Customer'), 'salesman_id':fields.related('order_id', 'user_id', type='many2one', relation='res.users', store=True, string='Salesperson'), 'company_id': fields.related('order_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True), 'delay': fields.float('Delivery Lead Time', required=True, help="Number of days between the order confirmation and the shipping of the products to the customer", readonly=True, states={'draft': [('readonly', False)]}), 'procurement_ids': fields.one2many('procurement.order', 'sale_line_id', 'Procurements'), } _order = 'order_id desc, sequence, id' _defaults = { 'product_uom' : _get_uom_id, 'discount': 0.0, 'product_uom_qty': 1, 'product_uos_qty': 1, 'sequence': 10, 'state': 'draft', 'price_unit': 0.0, 'delay': 0.0, } def _get_line_qty(self, cr, uid, line, context=None): if line.product_uos: return line.product_uos_qty or 0.0 return line.product_uom_qty def _get_line_uom(self, cr, uid, line, context=None): if line.product_uos: return line.product_uos.id return line.product_uom.id def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None): """Prepare the dict of values to create the new invoice line for a sales order line. This method may be overridden to implement custom invoice generation (making sure to call super() to establish a clean extension chain). :param browse_record line: sale.order.line record to invoice :param int account_id: optional ID of a G/L account to force (this is used for returning products including service) :return: dict of values to create() the invoice line """ res = {} if not line.invoiced: if not account_id: if line.product_id: account_id = line.product_id.property_account_income.id if not account_id: account_id = line.product_id.categ_id.property_account_income_categ.id if not account_id: raise osv.except_osv(_('Error!'), _('Please define income account for this product: "%s" (id:%d).') % \ (line.product_id.name, line.product_id.id,)) else: prop = self.pool.get('ir.property').get(cr, uid, 'property_account_income_categ', 'product.category', context=context) account_id = prop and prop.id or False uosqty = self._get_line_qty(cr, uid, line, context=context) uos_id = self._get_line_uom(cr, uid, line, context=context) pu = 0.0 if uosqty: pu = round(line.price_unit * line.product_uom_qty / uosqty, self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Price')) fpos = line.order_id.fiscal_position or False account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, account_id) if not account_id: raise osv.except_osv(_('Error!'), _('There is no Fiscal Position defined or Income category account defined for default properties of Product categories.')) res = { 'name': line.name, 'sequence': line.sequence, 'origin': line.order_id.name, 'account_id': account_id, 'price_unit': pu, 'quantity': uosqty, 'discount': line.discount, 'uos_id': uos_id, 'product_id': line.product_id.id or False, 'invoice_line_tax_id': [(6, 0, [x.id for x in line.tax_id])], 'account_analytic_id': line.order_id.project_id and line.order_id.project_id.id or False, } return res def invoice_line_create(self, cr, uid, ids, context=None): if context is None: context = {} create_ids = [] sales = set() for line in self.browse(cr, uid, ids, context=context): vals = self._prepare_order_line_invoice_line(cr, uid, line, False, context) if vals: inv_id = self.pool.get('account.invoice.line').create(cr, uid, vals, context=context) self.write(cr, uid, [line.id], {'invoice_lines': [(4, inv_id)]}, context=context) sales.add(line.order_id.id) create_ids.append(inv_id) # Trigger workflow events for sale_id in sales: workflow.trg_write(uid, 'sale.order', sale_id, cr) return create_ids def button_cancel(self, cr, uid, ids, context=None): lines = self.browse(cr, uid, ids, context=context) for line in lines: if line.invoiced: raise osv.except_osv(_('Invalid Action!'), _('You cannot cancel a sales order line that has already been invoiced.')) procurement_obj = self.pool['procurement.order'] procurement_obj.cancel(cr, uid, sum([l.procurement_ids.ids for l in lines], []), context=context) return self.write(cr, uid, ids, {'state': 'cancel'}) def button_confirm(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'confirmed'}) def button_done(self, cr, uid, ids, context=None): res = self.write(cr, uid, ids, {'state': 'done'}) for line in self.browse(cr, uid, ids, context=context): workflow.trg_write(uid, 'sale.order', line.order_id.id, cr) return res def uos_change(self, cr, uid, ids, product_uos, product_uos_qty=0, product_id=None): product_obj = self.pool.get('product.product') if not product_id: return {'value': {'product_uom': product_uos, 'product_uom_qty': product_uos_qty}, 'domain': {}} product = product_obj.browse(cr, uid, product_id) value = { 'product_uom': product.uom_id.id, } # FIXME must depend on uos/uom of the product and not only of the coeff. try: value.update({ 'product_uom_qty': product_uos_qty / product.uos_coeff, 'th_weight': product_uos_qty / product.uos_coeff * product.weight }) except ZeroDivisionError: pass return {'value': value} def create(self, cr, uid, values, context=None): if values.get('order_id') and values.get('product_id') and any(f not in values for f in ['name', 'price_unit', 'type', 'product_uom_qty', 'product_uom']): order = self.pool['sale.order'].read(cr, uid, values['order_id'], ['pricelist_id', 'partner_id', 'date_order', 'fiscal_position'], context=context) defaults = self.product_id_change(cr, uid, [], order['pricelist_id'][0], values['product_id'], qty=float(values.get('product_uom_qty', False)), uom=values.get('product_uom', False), qty_uos=float(values.get('product_uos_qty', False)), uos=values.get('product_uos', False), name=values.get('name', False), partner_id=order['partner_id'][0], date_order=order['date_order'], fiscal_position=order['fiscal_position'][0] if order['fiscal_position'] else False, flag=False, # Force name update context=context )['value'] if defaults.get('tax_id'): defaults['tax_id'] = [[6, 0, defaults['tax_id']]] values = dict(defaults, **values) return super(sale_order_line, self).create(cr, uid, values, context=context) def product_id_change(self, cr, uid, ids, pricelist, product, qty=0, uom=False, qty_uos=0, uos=False, name='', partner_id=False, lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None): context = context or {} lang = lang or context.get('lang', False) if not partner_id: raise osv.except_osv(_('No Customer Defined!'), _('Before choosing a product,\n select a customer in the sales form.')) warning = False product_uom_obj = self.pool.get('product.uom') partner_obj = self.pool.get('res.partner') product_obj = self.pool.get('product.product') context = {'lang': lang, 'partner_id': partner_id} partner = partner_obj.browse(cr, uid, partner_id) lang = partner.lang context_partner = {'lang': lang, 'partner_id': partner_id} if not product: return {'value': {'th_weight': 0, 'product_uos_qty': qty}, 'domain': {'product_uom': [], 'product_uos': []}} if not date_order: date_order = time.strftime(DEFAULT_SERVER_DATE_FORMAT) result = {} warning_msgs = '' product_obj = product_obj.browse(cr, uid, product, context=context_partner) uom2 = False if uom: uom2 = product_uom_obj.browse(cr, uid, uom) if product_obj.uom_id.category_id.id != uom2.category_id.id: uom = False if uos: if product_obj.uos_id: uos2 = product_uom_obj.browse(cr, uid, uos) if product_obj.uos_id.category_id.id != uos2.category_id.id: uos = False else: uos = False fpos = False if not fiscal_position: fpos = partner.property_account_position or False else: fpos = self.pool.get('account.fiscal.position').browse(cr, uid, fiscal_position) if update_tax: #The quantity only have changed result['tax_id'] = self.pool.get('account.fiscal.position').map_tax(cr, uid, fpos, product_obj.taxes_id) if not flag: result['name'] = self.pool.get('product.product').name_get(cr, uid, [product_obj.id], context=context_partner)[0][1] if product_obj.description_sale: result['name'] += '\n'+product_obj.description_sale domain = {} if (not uom) and (not uos): result['product_uom'] = product_obj.uom_id.id if product_obj.uos_id: result['product_uos'] = product_obj.uos_id.id result['product_uos_qty'] = qty * product_obj.uos_coeff uos_category_id = product_obj.uos_id.category_id.id else: result['product_uos'] = False result['product_uos_qty'] = qty uos_category_id = False result['th_weight'] = qty * product_obj.weight domain = {'product_uom': [('category_id', '=', product_obj.uom_id.category_id.id)], 'product_uos': [('category_id', '=', uos_category_id)]} elif uos and not uom: # only happens if uom is False result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id result['product_uom_qty'] = qty_uos / product_obj.uos_coeff result['th_weight'] = result['product_uom_qty'] * product_obj.weight elif uom: # whether uos is set or not default_uom = product_obj.uom_id and product_obj.uom_id.id q = product_uom_obj._compute_qty(cr, uid, uom, qty, default_uom) if product_obj.uos_id: result['product_uos'] = product_obj.uos_id.id result['product_uos_qty'] = qty * product_obj.uos_coeff else: result['product_uos'] = False result['product_uos_qty'] = qty result['th_weight'] = q * product_obj.weight # Round the quantity up if not uom2: uom2 = product_obj.uom_id # get unit price if not pricelist: warn_msg = _('You have to select a pricelist or a customer in the sales form !\n' 'Please set one before choosing a product.') warning_msgs += _("No Pricelist ! : ") + warn_msg +"\n\n" else: price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist], product, qty or 1.0, partner_id, { 'uom': uom or result.get('product_uom'), 'date': date_order, })[pricelist] if price is False: warn_msg = _("Cannot find a pricelist line matching this product and quantity.\n" "You have to change either the product, the quantity or the pricelist.") warning_msgs += _("No valid pricelist line found ! :") + warn_msg +"\n\n" else: result.update({'price_unit': price}) if warning_msgs: warning = { 'title': _('Configuration Error!'), 'message' : warning_msgs } return {'value': result, 'domain': domain, 'warning': warning} def product_uom_change(self, cursor, user, ids, pricelist, product, qty=0, uom=False, qty_uos=0, uos=False, name='', partner_id=False, lang=False, update_tax=True, date_order=False, context=None): context = context or {} lang = lang or ('lang' in context and context['lang']) if not uom: return {'value': {'price_unit': 0.0, 'product_uom' : uom or False}} return self.product_id_change(cursor, user, ids, pricelist, product, qty=qty, uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id, lang=lang, update_tax=update_tax, date_order=date_order, context=context) def unlink(self, cr, uid, ids, context=None): if context is None: context = {} """Allows to delete sales order lines in draft,cancel states""" for rec in self.browse(cr, uid, ids, context=context): if rec.state not in ['draft', 'cancel']: raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a sales order line which is in state \'%s\'.') %(rec.state,)) return super(sale_order_line, self).unlink(cr, uid, ids, context=context) class mail_compose_message(osv.Model): _inherit = 'mail.compose.message' def send_mail(self, cr, uid, ids, context=None): context = context or {} if context.get('default_model') == 'sale.order' and context.get('default_res_id') and context.get('mark_so_as_sent'): context = dict(context, mail_post_autofollow=True) self.pool.get('sale.order').signal_workflow(cr, uid, [context['default_res_id']], 'quotation_sent') return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context) class account_invoice(osv.Model): _inherit = 'account.invoice' def _get_default_section_id(self, cr, uid, context=None): """ Gives default section by checking if present in the context """ section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False if not section_id: section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False return section_id def _resolve_section_id_from_context(self, cr, uid, context=None): """ Returns ID of section based on the value of 'section_id' context key, or None if it cannot be resolved to a single Sales Team. """ if context is None: context = {} if type(context.get('default_section_id')) in (int, long): return context.get('default_section_id') if isinstance(context.get('default_section_id'), basestring): section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context) if len(section_ids) == 1: return int(section_ids[0][0]) return None _columns = { 'section_id': fields.many2one('crm.case.section', 'Sales Team'), } _defaults = { 'section_id': lambda self, cr, uid, c=None: self._get_default_section_id(cr, uid, context=c) } def confirm_paid(self, cr, uid, ids, context=None): sale_order_obj = self.pool.get('sale.order') res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context) so_ids = sale_order_obj.search(cr, uid, [('invoice_ids', 'in', ids)], context=context) for so_id in so_ids: sale_order_obj.message_post(cr, uid, so_id, body=_("Invoice paid"), context=context) return res def unlink(self, cr, uid, ids, context=None): """ Overwrite unlink method of account invoice to send a trigger to the sale workflow upon invoice deletion """ invoice_ids = self.search(cr, uid, [('id', 'in', ids), ('state', 'in', ['draft', 'cancel'])], context=context) #if we can't cancel all invoices, do nothing if len(invoice_ids) == len(ids): #Cancel invoice(s) first before deleting them so that if any sale order is associated with them #it will trigger the workflow to put the sale order in an 'invoice exception' state for id in ids: workflow.trg_validate(uid, 'account.invoice', id, 'invoice_cancel', cr) return super(account_invoice, self).unlink(cr, uid, ids, context=context) class procurement_order(osv.osv): _inherit = 'procurement.order' _columns = { 'sale_line_id': fields.many2one('sale.order.line', string='Sale Order Line'), } def write(self, cr, uid, ids, vals, context=None): if isinstance(ids, (int, long)): ids = [ids] res = super(procurement_order, self).write(cr, uid, ids, vals, context=context) from openerp import workflow if vals.get('state') in ['done', 'cancel', 'exception']: for proc in self.browse(cr, uid, ids, context=context): if proc.sale_line_id and proc.sale_line_id.order_id: order_id = proc.sale_line_id.order_id.id if self.pool.get('sale.order').test_procurements_done(cr, uid, [order_id], context=context): workflow.trg_validate(uid, 'sale.order', order_id, 'ship_end', cr) if self.pool.get('sale.order').test_procurements_except(cr, uid, [order_id], context=context): workflow.trg_validate(uid, 'sale.order', order_id, 'ship_except', cr) return res class product_product(osv.Model): _inherit = 'product.product' def _sales_count(self, cr, uid, ids, field_name, arg, context=None): r = dict.fromkeys(ids, 0) domain = [ ('state', 'in', ['waiting_date','progress','manual', 'shipping_except', 'invoice_except', 'done']), ('product_id', 'in', ids), ] for group in self.pool['sale.report'].read_group(cr, uid, domain, ['product_id','product_uom_qty'], ['product_id'], context=context): r[group['product_id'][0]] = group['product_uom_qty'] return r def action_view_sales(self, cr, uid, ids, context=None): result = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'sale.action_order_line_product_tree', raise_if_not_found=True) result = self.pool['ir.actions.act_window'].read(cr, uid, [result], context=context)[0] result['domain'] = "[('product_id','in',[" + ','.join(map(str, ids)) + "])]" return result _columns = { 'sales_count': fields.function(_sales_count, string='# Sales', type='integer'), } class product_template(osv.Model): _inherit = 'product.template' def _sales_count(self, cr, uid, ids, field_name, arg, context=None): res = dict.fromkeys(ids, 0) for template in self.browse(cr, uid, ids, context=context): res[template.id] = sum([p.sales_count for p in template.product_variant_ids]) return res def action_view_sales(self, cr, uid, ids, context=None): act_obj = self.pool.get('ir.actions.act_window') mod_obj = self.pool.get('ir.model.data') product_ids = [] for template in self.browse(cr, uid, ids, context=context):<|fim▁hole|> return result _columns = { 'sales_count': fields.function(_sales_count, string='# Sales', type='integer'), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:<|fim▁end|>
product_ids += [x.id for x in template.product_variant_ids] result = mod_obj.xmlid_to_res_id(cr, uid, 'sale.action_order_line_product_tree',raise_if_not_found=True) result = act_obj.read(cr, uid, [result], context=context)[0] result['domain'] = "[('product_id','in',[" + ','.join(map(str, product_ids)) + "])]"
<|file_name|>echo_cmdline_client.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 """ echo_cmdline_client.py Copyright (c) 2018-2019 Alan Yorinks All right reserved. Python Banyan is free software; you can redistribute it and/or modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE Version 3 as published by the Free Software Foundation; either or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU AFFERO GENERAL PUBLIC LICENSE along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """ import argparse import signal import sys import time from python_banyan.banyan_base import BanyanBase class EchoCmdClient(BanyanBase): """ This is an echo client that will allow the user to specify command line arguments to change the default behavior of the client. It sends out a series of messages and expects an echo reply from the server. When it completes, press enter, and it will send a message to the server so that it also quits To use: 1. Start the backplane. 2. Start the server. 3. Start this client. usage: echo_cmdline_client.py [-h] [-b BACK_PLANE_IP_ADDRESS] [-m NUMBER_OF_MESSAGES] [-n PROCESS_NAME] [-p PUBLISHER_PORT] [-s SUBSCRIBER_PORT] [-t LOOP_TIME] optional arguments: -h, --help show this help message and exit -b BACK_PLANE_IP_ADDRESS None or IP address used by Back Plane -m NUMBER_OF_MESSAGES Number of messages to publish -n PROCESS_NAME Set process name in banner -p PUBLISHER_PORT Publisher IP port -s SUBSCRIBER_PORT Subscriber IP port -t LOOP_TIME Event Loop Timer in seconds """ def __init__(self, **kwargs): """ kwargs is a dictionary that will contain the following keys: :param back_plane_ip_address: banyan_base back_planeIP Address - if not specified, it will be set to the local computer :param subscriber_port: banyan_base back plane subscriber port. This must match that of the banyan_base backplane :param publisher_port: banyan_base back plane publisher port. This must match that of the banyan_base backplane. :param number_of_messages: number of message to transmit :param process_name: Component identifier :param loop_time: receive loop sleep time """ # initialize the parent super(EchoCmdClient, self).__init__(back_plane_ip_address=kwargs['back_plane_ip_address'], subscriber_port=kwargs['subscriber_port'], publisher_port=kwargs['publisher_port'], process_name=kwargs['process_name'], loop_time=kwargs['loop_time']) # allow zmq connections to establish time.sleep(.3) # accept banyan messages with the topic of reply self.set_subscriber_topic('reply') # sequence number of messages self.message_number = kwargs['number_of_messages'] # number of messages to send self.number_of_messages = kwargs['number_of_messages'] # send the first message - make sure that the server is already started self.publish_payload({'message_number': self.message_number}, 'echo') self.message_number -= 1 # get the reply messages try: self.receive_loop() except KeyboardInterrupt: self.clean_up() sys.exit(0) def incoming_message_processing(self, topic, payload): """ Messages are sent here from the receive_loop :param topic: Message Topic string :param payload: Message Data :return: """ <|fim▁hole|> if payload['message_number'] == 0: print(str(self.number_of_messages) + ' messages sent and received. ') input('Press enter to exit.') self.clean_up() sys.exit(0) # bump the message number and send the message out else: self.message_number -= 1 if self.message_number >= 0: self.publish_payload({'message_number': self.message_number}, 'echo') def echo_cmdline_client(): parser = argparse.ArgumentParser() # allow user to bypass the IP address auto-discovery. # This is necessary if the component resides on a computer # other than the computing running the backplane. parser.add_argument("-b", dest="back_plane_ip_address", default="None", help="None or IP address used by Back Plane") parser.add_argument("-m", dest="number_of_messages", default="10", help="Number of messages to publish") # allow the user to specify a name for the component and have it shown on the console banner. # modify the default process name to one you wish to see on the banner. # change the default in the derived class to set the name parser.add_argument("-n", dest="process_name", default="EchoCmdClient", help="Set process name in banner") parser.add_argument("-p", dest="publisher_port", default='43124', help="Publisher IP port") parser.add_argument("-s", dest="subscriber_port", default='43125', help="Subscriber IP port") parser.add_argument("-t", dest="loop_time", default=".1", help="Event Loop Timer in seconds") args = parser.parse_args() if args.back_plane_ip_address == 'None': args.back_plane_ip_address = None kw_options = {'back_plane_ip_address': args.back_plane_ip_address, 'number_of_messages': int(args.number_of_messages), 'publisher_port': args.publisher_port, 'subscriber_port': args.subscriber_port, 'process_name': args.process_name, 'loop_time': float(args.loop_time)} # replace with the name of your class EchoCmdClient(**kw_options) # signal handler function called when Control-C occurs # noinspection PyShadowingNames,PyUnusedLocal,PyUnusedLocal def signal_handler(sig, frame): print('Exiting Through Signal Handler') raise KeyboardInterrupt # listen for SIGINT signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) if __name__ == '__main__': echo_cmdline_client()<|fim▁end|>
# When a message is received and its number is zero, finish up.
<|file_name|>OpenVRRenderer.py<|end_file_name|><|fim▁begin|>from ctypes import c_float, cast, POINTER import numpy as np import OpenGL.GL as gl import openvr from openvr.gl_renderer import OpenVrFramebuffer as OpenVRFramebuffer from openvr.gl_renderer import matrixForOpenVrMatrix as matrixForOpenVRMatrix from openvr.tracked_devices_actor import TrackedDevicesActor import gltfutils as gltfu c_float_p = POINTER(c_float) class OpenVRRenderer(object): def __init__(self, multisample=0, znear=0.1, zfar=1000): self.vr_system = openvr.init(openvr.VRApplication_Scene) w, h = self.vr_system.getRecommendedRenderTargetSize() self.vr_framebuffers = (OpenVRFramebuffer(w, h, multisample=multisample), OpenVRFramebuffer(w, h, multisample=multisample)) self.vr_compositor = openvr.VRCompositor() if self.vr_compositor is None: raise Exception('unable to create compositor') self.vr_framebuffers[0].init_gl() self.vr_framebuffers[1].init_gl() poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount self.poses = poses_t() self.projection_matrices = (np.asarray(matrixForOpenVRMatrix(self.vr_system.getProjectionMatrix(openvr.Eye_Left, znear, zfar))), np.asarray(matrixForOpenVRMatrix(self.vr_system.getProjectionMatrix(openvr.Eye_Right, znear, zfar)))) self.eye_transforms = (np.asarray(matrixForOpenVRMatrix(self.vr_system.getEyeToHeadTransform(openvr.Eye_Left)).I), np.asarray(matrixForOpenVRMatrix(self.vr_system.getEyeToHeadTransform(openvr.Eye_Right)).I)) self.view = np.eye(4, dtype=np.float32) self.view_matrices = (np.empty((4,4), dtype=np.float32), np.empty((4,4), dtype=np.float32)) self.controllers = TrackedDevicesActor(self.poses) self.controllers.show_controllers_only = False self.controllers.init_gl() self.vr_event = openvr.VREvent_t() def render(self, gltf, nodes, window_size=(800, 600)): self.vr_compositor.waitGetPoses(self.poses, openvr.k_unMaxTrackedDeviceCount, None, 0) hmd_pose = self.poses[openvr.k_unTrackedDeviceIndex_Hmd] if not hmd_pose.bPoseIsValid: return hmd_34 = np.ctypeslib.as_array(cast(hmd_pose.mDeviceToAbsoluteTracking.m, c_float_p), shape=(3,4)) self.view[:3,:] = hmd_34 view = np.linalg.inv(self.view.T) view.dot(self.eye_transforms[0], out=self.view_matrices[0]) view.dot(self.eye_transforms[1], out=self.view_matrices[1]) gl.glViewport(0, 0, self.vr_framebuffers[0].width, self.vr_framebuffers[0].height) for eye in (0, 1): gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.vr_framebuffers[eye].fb) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) gltfu.set_material_state.current_material = None gltfu.set_technique_state.current_technique = None for node in nodes: gltfu.draw_node(node, gltf, projection_matrix=self.projection_matrices[eye], view_matrix=self.view_matrices[eye]) self.controllers.display_gl(self.view_matrices[eye], self.projection_matrices[eye]) self.vr_compositor.submit(openvr.Eye_Left, self.vr_framebuffers[0].texture) self.vr_compositor.submit(openvr.Eye_Right, self.vr_framebuffers[1].texture) # mirror left eye framebuffer to screen: gl.glBlitNamedFramebuffer(self.vr_framebuffers[0].fb, 0, 0, 0, self.vr_framebuffers[0].width, self.vr_framebuffers[0].height, 0, 0, window_size[0], window_size[1],<|fim▁hole|> gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) def process_input(self): pass # state = self.vr_system.getControllerState(1) # if state and state.rAxis[1].x > 0.05: # self.vr_system.triggerHapticPulse(1, 0, int(3200 * state.rAxis[1].x)) # state = self.vr_system.getControllerState(2) # if state and state.rAxis[1].x > 0.05: # self.vr_system.triggerHapticPulse(2, 0, int(3200 * state.rAxis[1].x)) # if self.vr_system.pollNextEvent(self.vr_event): # if self.vr_event.eventType == openvr.VREvent_ButtonPress: # pass #print('vr controller button pressed') # elif self.vr_event.eventType == openvr.VREvent_ButtonUnpress: # pass #print('vr controller button unpressed') def shutdown(self): self.controllers.dispose_gl() openvr.shutdown()<|fim▁end|>
<|file_name|>connected-position-strategy.spec.ts<|end_file_name|><|fim▁begin|>import {ElementRef} from '@angular/core'; import {ConnectedPositionStrategy} from './connected-position-strategy'; import {ViewportRuler} from './viewport-ruler'; import {OverlayPositionBuilder} from './overlay-position-builder'; import {ConnectedOverlayPositionChange} from './connected-position'; import {Scrollable} from '../scroll/scrollable'; import {Subscription} from 'rxjs'; import Spy = jasmine.Spy; // Default width and height of the overlay and origin panels throughout these tests. const DEFAULT_HEIGHT = 30; const DEFAULT_WIDTH = 60; // For all tests, we assume the browser window is 1024x786 (outerWidth x outerHeight). // The karma config has been set to this for local tests, and it is the default size // for tests on CI (both SauceLabs and Browserstack). describe('ConnectedPositionStrategy', () => { describe('with origin on document body', () => { const ORIGIN_HEIGHT = DEFAULT_HEIGHT; const ORIGIN_WIDTH = DEFAULT_WIDTH; const OVERLAY_HEIGHT = DEFAULT_HEIGHT; const OVERLAY_WIDTH = DEFAULT_WIDTH; let originElement: HTMLElement; let overlayElement: HTMLElement; let overlayContainerElement: HTMLElement; let strategy: ConnectedPositionStrategy; let fakeElementRef: ElementRef; let fakeViewportRuler: FakeViewportRuler; let positionBuilder: OverlayPositionBuilder; let originRect: ClientRect; let originCenterX: number; let originCenterY: number; beforeEach(() => { fakeViewportRuler = new FakeViewportRuler(); // The origin and overlay elements need to be in the document body in order to have geometry. originElement = createPositionedBlockElement(); overlayContainerElement = createFixedElement(); overlayElement = createPositionedBlockElement(); document.body.appendChild(originElement); document.body.appendChild(overlayContainerElement); overlayContainerElement.appendChild(overlayElement); fakeElementRef = new FakeElementRef(originElement); positionBuilder = new OverlayPositionBuilder(new ViewportRuler()); }); afterEach(() => { document.body.removeChild(originElement); document.body.removeChild(overlayContainerElement); // Reset the origin geometry after each test so we don't accidently keep state between tests. originRect = null; originCenterX = null; originCenterY = null; }); describe('when not near viewport edge, not scrolled', () => { // Place the original element close to the center of the window. // (1024 / 2, 768 / 2). It's not exact, since outerWidth/Height includes browser // chrome, but it doesn't really matter for these tests. const ORIGIN_LEFT = 500; const ORIGIN_TOP = 350; beforeEach(() => { originElement.style.left = `${ORIGIN_LEFT}px`; originElement.style.top = `${ORIGIN_TOP}px`; originRect = originElement.getBoundingClientRect(); originCenterX = originRect.left + (ORIGIN_WIDTH / 2); originCenterY = originRect.top + (ORIGIN_HEIGHT / 2); }); // Preconditions are set, now just run the full set of simple position tests. runSimplePositionTests(); }); describe('when scrolled', () => { // Place the original element decently far outside the unscrolled document (1024x768). const ORIGIN_LEFT = 2500; const ORIGIN_TOP = 2500; // Create a very large element that will make the page scrollable. let veryLargeElement: HTMLElement = document.createElement('div'); veryLargeElement.style.width = '4000px'; veryLargeElement.style.height = '4000px'; beforeEach(() => { // Scroll the page such that the origin element is roughly in the // center of the visible viewport (2500 - 1024/2, 2500 - 768/2). document.body.appendChild(veryLargeElement); document.body.scrollTop = 2100; document.body.scrollLeft = 2100; originElement.style.top = `${ORIGIN_TOP}px`; originElement.style.left = `${ORIGIN_LEFT}px`; originRect = originElement.getBoundingClientRect(); originCenterX = originRect.left + (ORIGIN_WIDTH / 2); originCenterY = originRect.top + (ORIGIN_HEIGHT / 2); }); afterEach(() => { document.body.removeChild(veryLargeElement); document.body.scrollTop = 0; document.body.scrollLeft = 0; }); // Preconditions are set, now just run the full set of simple position tests. runSimplePositionTests(); }); describe('when near viewport edge', () => { it('should reposition the overlay if it would go off the top of the screen', () => { // We can use the real ViewportRuler in this test since we know that zero is // always the top of the viewport. originElement.style.top = '5px'; originElement.style.left = '200px'; originRect = originElement.getBoundingClientRect(); strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'end', originY: 'top'}, {overlayX: 'end', overlayY: 'bottom'}) .withFallbackPosition( {originX: 'start', originY: 'bottom'}, {overlayX: 'start', overlayY: 'top'}); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.top).toBe(originRect.bottom); expect(overlayRect.left).toBe(originRect.left); }); it('should reposition the overlay if it would go off the left of the screen', () => { // We can use the real ViewportRuler in this test since we know that zero is // always the left edge of the viewport.<|fim▁hole|> originElement.style.left = '5px'; originRect = originElement.getBoundingClientRect(); originCenterY = originRect.top + (ORIGIN_HEIGHT / 2); strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'start', originY: 'bottom'}, {overlayX: 'end', overlayY: 'top'}) .withFallbackPosition( {originX: 'end', originY: 'center'}, {overlayX: 'start', overlayY: 'center'}); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.top).toBe(originCenterY - (OVERLAY_HEIGHT / 2)); expect(overlayRect.left).toBe(originRect.right); }); it('should reposition the overlay if it would go off the bottom of the screen', () => { // Use the fake viewport ruler because we don't know *exactly* how big the viewport is. fakeViewportRuler.fakeRect = { top: 0, left: 0, width: 500, height: 500, right: 500, bottom: 500 }; positionBuilder = new OverlayPositionBuilder(fakeViewportRuler); originElement.style.top = '475px'; originElement.style.left = '200px'; originRect = originElement.getBoundingClientRect(); strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'start', originY: 'bottom'}, {overlayX: 'start', overlayY: 'top'}) .withFallbackPosition( {originX: 'end', originY: 'top'}, {overlayX: 'end', overlayY: 'bottom'}); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.bottom).toBe(originRect.top); expect(overlayRect.right).toBe(originRect.right); }); it('should reposition the overlay if it would go off the right of the screen', () => { // Use the fake viewport ruler because we don't know *exactly* how big the viewport is. fakeViewportRuler.fakeRect = { top: 0, left: 0, width: 500, height: 500, right: 500, bottom: 500 }; positionBuilder = new OverlayPositionBuilder(fakeViewportRuler); originElement.style.top = '200px'; originElement.style.left = '475px'; originRect = originElement.getBoundingClientRect(); strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'end', originY: 'center'}, {overlayX: 'start', overlayY: 'center'}) .withFallbackPosition( {originX: 'start', originY: 'bottom'}, {overlayX: 'end', overlayY: 'top'}); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.top).toBe(originRect.bottom); expect(overlayRect.right).toBe(originRect.left); }); it('should position a panel properly when rtl', () => { // must make the overlay longer than the origin to properly test attachment overlayElement.style.width = `500px`; originRect = originElement.getBoundingClientRect(); strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'start', originY: 'bottom'}, {overlayX: 'start', overlayY: 'top'}) .withDirection('rtl'); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.top).toBe(originRect.bottom); expect(overlayRect.right).toBe(originRect.right); }); it('should position a panel with the x offset provided', () => { originRect = originElement.getBoundingClientRect(); strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'start', originY: 'top'}, {overlayX: 'start', overlayY: 'top'}); strategy.withOffsetX(10); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.top).toBe(originRect.top); expect(overlayRect.left).toBe(originRect.left + 10); }); it('should position a panel with the y offset provided', () => { originRect = originElement.getBoundingClientRect(); strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'start', originY: 'top'}, {overlayX: 'start', overlayY: 'top'}); strategy.withOffsetY(50); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.top).toBe(originRect.top + 50); expect(overlayRect.left).toBe(originRect.left); }); }); it('should emit onPositionChange event when position changes', () => { // force the overlay to open in a fallback position fakeViewportRuler.fakeRect = { top: 0, left: 0, width: 500, height: 500, right: 500, bottom: 500 }; positionBuilder = new OverlayPositionBuilder(fakeViewportRuler); originElement.style.top = '200px'; originElement.style.left = '475px'; strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'end', originY: 'center'}, {overlayX: 'start', overlayY: 'center'}) .withFallbackPosition( {originX: 'start', originY: 'bottom'}, {overlayX: 'end', overlayY: 'top'}); const positionChangeHandler = jasmine.createSpy('positionChangeHandler'); const subscription = strategy.onPositionChange.subscribe(positionChangeHandler); strategy.apply(overlayElement); expect(positionChangeHandler).toHaveBeenCalled(); expect(positionChangeHandler.calls.mostRecent().args[0]) .toEqual(jasmine.any(ConnectedOverlayPositionChange), `Expected strategy to emit an instance of ConnectedOverlayPositionChange.`); it('should pick the fallback position that shows the largest area of the element', () => { // Use the fake viewport ruler because we don't know *exactly* how big the viewport is. fakeViewportRuler.fakeRect = { top: 0, left: 0, width: 500, height: 500, right: 500, bottom: 500 }; positionBuilder = new OverlayPositionBuilder(fakeViewportRuler); originElement.style.top = '200px'; originElement.style.left = '475px'; originRect = originElement.getBoundingClientRect(); strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'end', originY: 'center'}, {overlayX: 'start', overlayY: 'center'}) .withFallbackPosition( {originX: 'end', originY: 'top'}, {overlayX: 'start', overlayY: 'bottom'}) .withFallbackPosition( {originX: 'end', originY: 'top'}, {overlayX: 'end', overlayY: 'top'}); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.top).toBe(originRect.top); expect(overlayRect.left).toBe(originRect.left); }); it('should position a panel properly when rtl', () => { // must make the overlay longer than the origin to properly test attachment overlayElement.style.width = `500px`; originRect = originElement.getBoundingClientRect(); strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'start', originY: 'bottom'}, {overlayX: 'start', overlayY: 'top'}) .withDirection('rtl'); originElement.style.top = '0'; originElement.style.left = '0'; // If the strategy is re-applied and the initial position would now fit, // the position change event should be emitted again. strategy.apply(overlayElement); expect(positionChangeHandler).toHaveBeenCalledTimes(2); subscription.unsubscribe(); }); }); /** * Run all tests for connecting the overlay to the origin such that first preferred * position does not go off-screen. We do this because there are several cases where we * want to run the exact same tests with different preconditions (e.g., not scroll, scrolled, * different element sized, etc.). */ function runSimplePositionTests() { it('should position a panel below, left-aligned', () => { strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'start', originY: 'bottom'}, {overlayX: 'start', overlayY: 'top'}); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.top).toBe(originRect.bottom); expect(overlayRect.left).toBe(originRect.left); }); it('should position to the right, center aligned vertically', () => { strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'end', originY: 'center'}, {overlayX: 'start', overlayY: 'center'}); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.top).toBe(originCenterY - (OVERLAY_HEIGHT / 2)); expect(overlayRect.left).toBe(originRect.right); }); it('should position to the left, below', () => { strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'start', originY: 'bottom'}, {overlayX: 'end', overlayY: 'top'}); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.top).toBe(originRect.bottom); expect(overlayRect.right).toBe(originRect.left); }); it('should position above, right aligned', () => { strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'end', originY: 'top'}, {overlayX: 'end', overlayY: 'bottom'}); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.bottom).toBe(originRect.top); expect(overlayRect.right).toBe(originRect.right); }); it('should position below, centered', () => { strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'center', originY: 'bottom'}, {overlayX: 'center', overlayY: 'top'}); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.top).toBe(originRect.bottom); expect(overlayRect.left).toBe(originCenterX - (OVERLAY_WIDTH / 2)); }); it('should center the overlay on the origin', () => { strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'center', originY: 'center'}, {overlayX: 'center', overlayY: 'center'}); strategy.apply(overlayElement); let overlayRect = overlayElement.getBoundingClientRect(); expect(overlayRect.top).toBe(originRect.top); expect(overlayRect.left).toBe(originRect.left); }); } }); describe('onPositionChange with scrollable view properties', () => { let overlayElement: HTMLElement; let overlayContainerElement: HTMLElement; let strategy: ConnectedPositionStrategy; let scrollable: HTMLDivElement; let positionChangeHandler: Spy; let onPositionChangeSubscription: Subscription; let positionChange: ConnectedOverlayPositionChange; beforeEach(() => { // Set up the overlay overlayContainerElement = createFixedElement(); overlayElement = createPositionedBlockElement(); document.body.appendChild(overlayContainerElement); overlayContainerElement.appendChild(overlayElement); // Set up the origin let originElement = createBlockElement(); originElement.style.margin = '0 1000px 1000px 0'; // Added so that the container scrolls // Create a scrollable container and put the origin inside scrollable = createOverflowContainerElement(); document.body.appendChild(scrollable); scrollable.appendChild(originElement); // Create a strategy with knowledge of the scrollable container let positionBuilder = new OverlayPositionBuilder(new ViewportRuler()); let fakeElementRef = new FakeElementRef(originElement); strategy = positionBuilder.connectedTo( fakeElementRef, {originX: 'start', originY: 'bottom'}, {overlayX: 'start', overlayY: 'top'}); strategy.withScrollableContainers([new Scrollable(new FakeElementRef(scrollable), null)]); positionChangeHandler = jasmine.createSpy('positionChangeHandler'); onPositionChangeSubscription = strategy.onPositionChange.subscribe(positionChangeHandler); }); afterEach(() => { onPositionChangeSubscription.unsubscribe(); document.body.removeChild(scrollable); document.body.removeChild(overlayContainerElement); }); it('should not have origin or overlay clipped or out of view without scroll', () => { strategy.apply(overlayElement); expect(positionChangeHandler).toHaveBeenCalled(); positionChange = positionChangeHandler.calls.mostRecent().args[0]; expect(positionChange.scrollableViewProperties).toEqual({ isOriginClipped: false, isOriginOutsideView: false, isOverlayClipped: false, isOverlayOutsideView: false }); }); it('should evaluate if origin is clipped if scrolled slightly down', () => { scrollable.scrollTop = 10; // Clip the origin by 10 pixels strategy.apply(overlayElement); expect(positionChangeHandler).toHaveBeenCalled(); positionChange = positionChangeHandler.calls.mostRecent().args[0]; expect(positionChange.scrollableViewProperties).toEqual({ isOriginClipped: true, isOriginOutsideView: false, isOverlayClipped: false, isOverlayOutsideView: false }); }); it('should evaluate if origin is out of view and overlay is clipped if scrolled enough', () => { scrollable.scrollTop = 31; // Origin is 30 pixels, move out of view and clip the overlay 1px strategy.apply(overlayElement); expect(positionChangeHandler).toHaveBeenCalled(); positionChange = positionChangeHandler.calls.mostRecent().args[0]; expect(positionChange.scrollableViewProperties).toEqual({ isOriginClipped: true, isOriginOutsideView: true, isOverlayClipped: true, isOverlayOutsideView: false }); }); it('should evaluate the overlay and origin are both out of the view', () => { scrollable.scrollTop = 61; // Scroll by overlay height + origin height + 1px buffer strategy.apply(overlayElement); expect(positionChangeHandler).toHaveBeenCalled(); positionChange = positionChangeHandler.calls.mostRecent().args[0]; expect(positionChange.scrollableViewProperties).toEqual({ isOriginClipped: true, isOriginOutsideView: true, isOverlayClipped: true, isOverlayOutsideView: true }); }); }); }); /** Creates an absolutely positioned, display: block element with a default size. */ function createPositionedBlockElement() { let element = createBlockElement(); element.style.position = 'absolute'; element.style.top = '0'; element.style.left = '0'; return element; } /** Creates a block element with a default size. */ function createBlockElement() { let element = document.createElement('div'); element.style.width = `${DEFAULT_WIDTH}px`; element.style.height = `${DEFAULT_HEIGHT}px`; element.style.backgroundColor = 'rebeccapurple'; element.style.zIndex = '100'; return element; } /** Creates an position: fixed element that spans the screen size. */ function createFixedElement() { let element = document.createElement('div'); element.style.position = 'fixed'; element.style.top = '0'; element.style.left = '0'; element.style.width = `100%`; element.style.height = `100%`; element.style.zIndex = '100'; return element; } /** Creates an overflow container with a set height and width with margin. */ function createOverflowContainerElement() { let element = document.createElement('div'); element.style.position = 'relative'; element.style.overflow = 'auto'; element.style.height = '300px'; element.style.width = '300px'; element.style.margin = '100px'; return element; } /** Fake implementation of ViewportRuler that just returns the previously given ClientRect. */ class FakeViewportRuler implements ViewportRuler { fakeRect: ClientRect = {left: 0, top: 0, width: 1014, height: 686, bottom: 686, right: 1014}; fakeScrollPos: {top: number, left: number} = {top: 0, left: 0}; getViewportRect() { return this.fakeRect; } getViewportScrollPosition(documentRect?: ClientRect): {top: number; left: number} { return this.fakeScrollPos; } } /** Fake implementation of ElementRef that is just a simple container for nativeElement. */ class FakeElementRef implements ElementRef { constructor(public nativeElement: HTMLElement) { } }<|fim▁end|>
originElement.style.top = '200px';
<|file_name|>dom-utils.ts<|end_file_name|><|fim▁begin|>export const containerSize = () => document.getElementById('app-view-container') || { offsetHeight: 960, offsetWidth: 960 }; export const windowSize = () => ({ width: window.innerWidth, height: window.innerHeight }); export const browserLocale = () => { let lang; const nav: any = navigator;<|fim▁hole|> lang = nav.languages[0]; } else if (nav.userLanguage) { // IE only lang = nav.userLanguage; } else { // latest versions of Chrome, Firefox, and Safari set this correctly lang = nav.language; } return lang; };<|fim▁end|>
// tslint:disable-next-line if (nav.languages && nav.languages.length) { // latest versions of Chrome and Firefox set this correctly
<|file_name|>markers.py<|end_file_name|><|fim▁begin|>from numpy.random import random from bokeh.plotting import figure, show, output_file def mscatter(p, x, y, marker): p.scatter(x, y, marker=marker, size=15, line_color="navy", fill_color="orange", alpha=0.5) def mtext(p, x, y, text): p.text(x, y, text=[text], text_color="firebrick", text_align="center", text_font_size="10pt") p = figure(title="Bokeh Markers", toolbar_location=None) p.grid.grid_line_color = None p.background_fill_color = "#eeeeee" N = 10 mscatter(p, random(N)+2, random(N)+1, "circle") mscatter(p, random(N)+4, random(N)+1, "square") mscatter(p, random(N)+6, random(N)+1, "triangle") mscatter(p, random(N)+8, random(N)+1, "asterisk") mscatter(p, random(N)+2, random(N)+4, "circle_x") mscatter(p, random(N)+4, random(N)+4, "square_x") mscatter(p, random(N)+6, random(N)+4, "inverted_triangle") mscatter(p, random(N)+8, random(N)+4, "x") mscatter(p, random(N)+2, random(N)+7, "circle_cross") mscatter(p, random(N)+4, random(N)+7, "square_cross") mscatter(p, random(N)+6, random(N)+7, "diamond") mscatter(p, random(N)+8, random(N)+7, "cross")<|fim▁hole|> mtext(p, 2.5, 0.5, "circle / o") mtext(p, 4.5, 0.5, "square") mtext(p, 6.5, 0.5, "triangle") mtext(p, 8.5, 0.5, "asterisk / *") mtext(p, 2.5, 3.5, "circle_x / ox") mtext(p, 4.5, 3.5, "square_x") mtext(p, 6.5, 3.5, "inverted_triangle") mtext(p, 8.5, 3.5, "x") mtext(p, 2.5, 6.5, "circle_cross / o+") mtext(p, 4.5, 6.5, "square_cross") mtext(p, 6.5, 6.5, "diamond") mtext(p, 8.5, 6.5, "cross / +") output_file("markers.html", title="markers.py example") show(p) # open a browser<|fim▁end|>
<|file_name|>jquery.fatNav.js<|end_file_name|><|fim▁begin|>(function($, window, document) { var pluginName = 'fatNav', defaults = {}; function Plugin(options) { this.settings = $.extend({}, defaults, options); this._defaults = defaults; this._name = pluginName; this.init(); } $.extend(Plugin.prototype, { init: function() { var self = this; var $nav = this.$nav = $('.fat-nav'); var $hamburger = this.$hamburger = $('<a href="javascript:void(0)" class="hamburger"><div class="hamburger__icon"></div></a>');<|fim▁hole|> // Hack to prevent mobile safari scrolling the whole body when nav is open if (navigator.userAgent.match(/(iPad|iPhone|iPod)/g)) { $nav.children().css({ 'height': '110%', 'transform': 'translateY(-5%)' }); } $('body').append($hamburger); $().add($hamburger).add($nav.find('a')).on('click', function(e) { self.toggleNav(); }); }, toggleNav: function() { var self = this; this.$nav.fadeToggle(400); self.toggleBodyOverflow(); $().add(this.$hamburger).add(this.$nav).toggleClass('active'); }, toggleBodyOverflow: function() { var self = this; var $body = $('body'); $body.toggleClass('no-scroll'); var isNavOpen = $body.hasClass('no-scroll'); // $body.width($body.width()); $body.css('overflow', isNavOpen ? 'hidden' : self._bodyOverflow); } }); if (typeof $[pluginName] === 'undefined') { $[pluginName] = function(options) { return new Plugin(this, options); }; } }(jQuery, window, document));<|fim▁end|>
this._bodyOverflow = $('body').css('overflow');
<|file_name|>ImageListCellRenderer.java<|end_file_name|><|fim▁begin|>package com.lizardtech.djvubean.outline; import java.awt.Color; import java.awt.Component; import javax.swing.JLabel; import javax.swing.JList; import javax.swing.JPanel; import javax.swing.ListCellRenderer; import javax.swing.UIManager; public class ImageListCellRenderer implements ListCellRenderer { /** * From <a href="http://java.sun.com/javase/6/docs/api/javax/swing/ListCellRenderer.html:" title="http://java.sun.com/javase/6/docs/api/javax/swing/ListCellRenderer.html:">http://java.sun.com/javase/6/docs/api/javax/swing/ListCellRenderer.html:</a> * * Return a component that has been configured to display the specified value. * That component's paint method is then called to "render" the cell. * If it is necessary to compute the dimensions of a list because the list cells do not have a fixed size, * this method is called to generate a component on which getPreferredSize can be invoked. * * jlist - the jlist we're painting * value - the value returned by list.getModel().getElementAt(index). * cellIndex - the cell index * isSelected - true if the specified cell is currently selected * cellHasFocus - true if the cell has focus */ public Component getListCellRendererComponent(JList jlist, Object value, int cellIndex, boolean isSelected, <|fim▁hole|> Component component = (Component) value; component.setForeground (Color.white); component.setBackground (isSelected ? UIManager.getColor("Table.focusCellForeground") : Color.white); return component; } else { // TODO - I get one String here when the JList is first rendered; proper way to deal with this? //System.out.println("Got something besides a JPanel: " + value.getClass().getCanonicalName()); return new JLabel("???"); } }}<|fim▁end|>
boolean cellHasFocus) { if (value instanceof JPanel) {
<|file_name|>bitcoin_vi_VN.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" encoding="utf-8"?> <!DOCTYPE TS> <TS version="2.1" language="vi_VN"> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About Gridcoin</source> <translation type="unfinished"></translation> </message> <message> <location line="+42"/> <source>&lt;b&gt;Gridcoin&lt;/b&gt; </source> <translation type="unfinished"></translation> </message> <message> <location line="+58"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source> <translation type="unfinished"></translation> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation type="unfinished"></translation> </message> <message> <location line="+6"/> <source>These are your Gridcoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation type="unfinished"></translation> </message> <message> <location line="+16"/> <source>Double-click to edit address or label</source> <translation type="unfinished"></translation> </message> <message> <location line="+27"/> <source>Create a new address</source> <translation>Tạo một địa chỉ mới</translation> </message> <message> <location line="+3"/> <source>&amp;New</source> <translation>&amp;Mới</translation> </message> <message> <location line="+11"/> <source>Copy the currently selected address to the system clipboard</source> <translation>Copy địa chỉ được chọn vào clipboard</translation> </message> <message> <location line="+3"/> <source>&amp;Copy</source> <translation>&amp;Copy</translation> </message> <message> <location line="+11"/> <source>Show &amp;QR Code</source> <translation type="unfinished"></translation> </message> <message> <location line="+11"/> <source>Sign a message to prove you own a Gridcoin address</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation type="unfinished"></translation> </message> <message> <location line="+11"/> <source>Verify a message to ensure it was signed with a specified Gridcoin address</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation type="unfinished"></translation> </message> <message> <location line="+11"/> <source>Delete the currently selected address from the list</source> <translation>Xóa địa chỉ hiện tại từ danh sách</translation> </message> <message> <location line="+3"/> <source>&amp;Delete</source> <translation>&amp;Xó&amp;a</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+65"/> <source>Copy &amp;Label</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>&amp;Edit</source> <translation type="unfinished"></translation> </message> <message> <location line="+250"/> <source>Export Address Book Data</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation type="unfinished"></translation> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"></translation> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+144"/> <source>Label</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>Address</source> <translation type="unfinished"></translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation type="unfinished"></translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation>Hội thoại Passphrase</translation> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>Điền passphrase</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>Passphrase mới</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>Điền lại passphrase</translation> </message> <message> <location line="+33"/> <source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>For staking only</source> <translation type="unfinished"></translation> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+37"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;ten or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Encrypt wallet</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation type="unfinished"></translation> </message> <message> <location line="+45"/> <source>Confirm wallet encryption</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR COINS&lt;/b&gt;!</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation type="unfinished"></translation> </message> <message> <location line="+9"/> <location line="+60"/> <source>Wallet encrypted</source> <translation type="unfinished"></translation> </message> <message> <location line="-58"/> <source>Gridcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation type="unfinished"></translation> </message> <message> <location line="+9"/> <location line="+7"/> <location line="+44"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation type="unfinished"></translation> </message> <message> <location line="-56"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <location line="+50"/> <source>The supplied passphrases do not match.</source> <translation type="unfinished"></translation> </message> <message> <location line="-38"/> <source>Wallet unlock failed</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <location line="+12"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation type="unfinished"></translation> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation type="unfinished"></translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation type="unfinished"></translation> </message> <message> <location line="+48"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation type="unfinished"></translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+798"/> <source>Sign &amp;message...</source> <translation>Chứ ký &amp; Tin nhắn...</translation> </message> <message> <location line="-131"/> <source>&amp;Overview</source> <translation>&amp;Tổng quan</translation> </message> <message> <location line="+1"/> <source>Show general overview of wallet</source> <translation>Hiện thỉ thông tin sơ lược chung về Ví</translation> </message> <message> <location line="+5"/> <source>Send coins to a Gridcoin address</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Show the list of addresses for receiving payments</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>&amp;Transactions</source> <translation>&amp;Giao dịch</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>Duyệt tìm lịch sử giao dịch</translation> </message> <message> <location line="+4"/> <source>&amp;Address Book</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Edit the list of stored addresses and labels</source> <translation type="unfinished"></translation> </message> <message> <location line="+9"/> <source>&amp;Block Explorer</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Block Explorer</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;Exchange</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <location line="+4"/> <source>Web Site</source> <translation type="unfinished"></translation> </message> <message> <location line="-1"/> <source>&amp;Web Site</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>&amp;GRC Chat Room</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>GRC Chatroom</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;BOINC</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Gridcoin rewards distributed computing with BOINC</source> <translation type="unfinished"></translation> </message> <message> <location line="+21"/> <source>E&amp;xit</source> <translation>T&amp;hoát</translation> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>Thoát chương trình</translation> </message> <message> <location line="+47"/> <source>&amp;Options...</source> <translation>&amp;Tùy chọn...</translation> </message> <message> <location line="+4"/> <source>&amp;Encrypt Wallet...</source> <translation>&amp;Mã hóa ví tiền</translation> </message> <message> <location line="+1"/> <source>Encrypt or decrypt wallet</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>&amp;Backup Wallet...</source> <translation>&amp;Sao lưu ví tiền...</translation> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation>&amp;Thay đổi mật khẩu...</translation> </message> <message> <location line="-1"/> <source>Backup wallet to another location</source> <translation>Sao lưu ví tiền ở vị trí khác</translation> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>Thay đổi cụm mật mã dùng cho mã hoá Ví</translation> </message> <message> <location line="+1"/> <source>&amp;Unlock Wallet...</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Unlock wallet</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>&amp;Lock Wallet</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Lock wallet</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>&amp;Export...</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Export the data in the current tab to a file</source> <translation type="unfinished">Xuất dữ liệu trong mục hiện tại ra file</translation> </message> <message> <location line="+1"/> <source>&amp;Debug window</source> <translation>&amp;Cửa sổ xử lý lỗi (debug)</translation> </message> <message> <location line="-4"/> <source>&amp;Verify message...</source> <translation>&amp;Tin nhắn xác thực</translation> </message> <message> <location line="-623"/> <source>Wallet</source> <translation>Ví</translation> </message> <message> <location line="+0"/> <source>Gridcoin</source> <translation type="unfinished"></translation> </message> <message> <location line="+496"/> <source>&amp;Send</source> <translation>&amp;Gửi</translation> </message> <message> <location line="+5"/> <source>&amp;Receive</source> <translation>&amp;Nhận</translation> </message> <message> <location line="+65"/> <source>&amp;Rebuild Block Chain</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Rebuild Block Chain</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;Download Blocks</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Download Blocks</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;Upgrade Client</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Upgrade Client</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;About Gridcoin</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Show information about Gridcoin</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;Neural Network</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Neural Network</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;Advanced Configuration</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Advanced Configuration</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;New User Wizard</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <location line="+590"/> <source>New User Wizard</source> <translation type="unfinished"></translation> </message> <message> <location line="-665"/> <source>&amp;Voting</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Voting</source> <translation type="unfinished"></translation> </message> <message> <location line="+77"/> <source>&amp;Foundation</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Foundation</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;Diagnostics</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Diagnostics</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>FA&amp;Q</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Interactive FAQ</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>Modify configuration options for Gridcoin</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>&amp;Show / Hide</source> <translation>Ẩn / H&amp;iện</translation> </message> <message> <location line="+18"/> <source>Open debugging and diagnostic console</source> <translation type="unfinished"></translation> </message> <message> <location line="+197"/> <location line="+9"/> <source>[testnet]</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <location line="+64"/> <source>Gridcoin client</source> <translation type="unfinished"></translation> </message> <message> <location line="+82"/> <source>%1 active connection(s) to Gridcoin network</source> <translation type="unfinished"></translation> </message> <message numerus="yes"> <location line="+12"/> <source>Processed %n block(s) of transaction history.</source> <translation type="unfinished"> <numerusform></numerusform> </translation> </message> <message> <location line="+13"/> <source>%1 second(s) ago</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>%1 minute(s) ago</source> <translation type="unfinished"></translation><|fim▁hole|> <source>%1 hour(s) ago</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>%1 day(s) ago</source> <translation type="unfinished"></translation> </message> <message> <location line="+23"/> <source>Last received block was generated %1.</source> <translation type="unfinished"></translation> </message> <message> <location line="+68"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Confirm transaction fee</source> <translation type="unfinished"></translation> </message> <message> <location line="+69"/> <source>Please enter your boinc E-mail address, or click &lt;Cancel&gt; to skip for now:</source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Created new Configuration File Successfully. </source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>New Account Created - Welcome Aboard!</source> <translation type="unfinished"></translation> </message> <message> <location line="+8"/> <source>To get started with Boinc, run the boinc client, choose projects, then populate the gridcoinresearch.conf file in %appdata%\GridcoinResearch with your boinc e-mail address. To run this wizard again, please delete the gridcoinresearch.conf file. </source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>New User Wizard - Skipped</source> <translation type="unfinished"></translation> </message> <message> <location line="+16"/> <source>Attention! - Boinc Path Error!</source> <translation type="unfinished"></translation> </message> <message> <location line="+315"/> <source>Backup Wallet</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation type="unfinished"></translation> </message> <message numerus="yes"> <location line="+300"/> <source>%n second(s)</source> <translation type="unfinished"> <numerusform></numerusform> </translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n minute(s)</source> <translation type="unfinished"> <numerusform></numerusform> </translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n hour(s)</source> <translation type="unfinished"> <numerusform></numerusform> </translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s)</source> <translation type="unfinished"> <numerusform></numerusform> </translation> </message> <message> <location line="+27"/> <source>Staking.&lt;br&gt;Your weight is %1&lt;br&gt;Network weight is %2&lt;br&gt;&lt;b&gt;Estimated&lt;/b&gt; time to earn reward is %3. %4</source> <translation type="unfinished"></translation> </message> <message> <location line="+9"/> <source>Not staking because wallet is locked</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Not staking because wallet is offline</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>Not staking because wallet is syncing</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Not staking because you don&apos;t have mature coins and stake weight is too low.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Not staking because you don&apos;t have mature coins</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Searching for mature coins... Please wait</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Not staking</source> <translation type="unfinished"></translation> </message> <message> <location line="-1246"/> <source>&amp;File</source> <translation>&amp;File</translation> </message> <message> <location line="+8"/> <source>&amp;Settings</source> <translation>&amp;Thiết lập</translation> </message> <message> <location line="+9"/> <source>&amp;Community</source> <translation type="unfinished"></translation> </message> <message> <location line="+8"/> <source>&amp;Advanced</source> <translation type="unfinished"></translation> </message> <message> <location line="+15"/> <source>&amp;Help</source> <translation>Trợ &amp;giúp</translation> </message> <message> <location line="+316"/> <source>Up to date</source> <translation>Đã cập nhật</translation> </message> <message> <location line="+7"/> <source>Catching up...</source> <translation>Bắt kịp...</translation> </message> <message> <location line="+218"/> <source>Sent transaction</source> <translation>Giao dịch đã gửi</translation> </message> <message> <location line="+1"/> <source>Incoming transaction</source> <translation>Giao dịch đang tới</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation type="unfinished"></translation> </message> <message> <location line="+211"/> <location line="+15"/> <source>URI can not be parsed! This can be caused by an invalid Gridcoin address or malformed URI parameters.</source> <translation type="unfinished"></translation> </message> <message> <location line="-15"/> <location line="+15"/> <source>URI handling</source> <translation type="unfinished"></translation> </message> <message> <location line="+18"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>Ví tiền &lt;b&gt; đã được mã hóa&lt;/b&gt;và hiện &lt;b&gt;đang mở&lt;/b&gt;</translation> </message> <message> <location line="+10"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>Ví tiền &lt;b&gt; đã được mã hóa&lt;/b&gt;và hiện &lt;b&gt;đang khóa&lt;/b&gt;</translation> </message> <message> <location filename="../bitcoin.cpp" line="+145"/> <source>A fatal error occurred. Gridcoin can no longer continue safely and will quit.</source> <translation type="unfinished"></translation> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+116"/> <source>Network Alert</source> <translation type="unfinished"></translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <location filename="../forms/coincontroldialog.ui" line="+14"/> <source>Coin Control</source> <translation type="unfinished"></translation> </message> <message> <location line="+28"/> <source>Quantity:</source> <translation>Lượng:</translation> </message> <message> <location line="+29"/> <source>Bytes:</source> <translation>Bytes:</translation> </message> <message> <location line="+45"/> <source>Amount:</source> <translation>Lượng:</translation> </message> <message> <location line="+29"/> <source>Priority:</source> <translation type="unfinished"></translation> </message> <message> <location line="+45"/> <source>Fee:</source> <translation>Phí:</translation> </message> <message> <location line="+32"/> <source>Low Output:</source> <translation type="unfinished"></translation> </message> <message> <location line="+48"/> <source>After Fee:</source> <translation>Sau thuế, phí:</translation> </message> <message> <location line="+32"/> <source>Change:</source> <translation>Thay đổi:</translation> </message> <message> <location line="+69"/> <source>(un)select all</source> <translation>(bỏ)chọn tất cả</translation> </message> <message> <location line="+13"/> <source>Tree &amp;mode</source> <translation type="unfinished"></translation> </message> <message> <location line="+66"/> <source>Label</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Address</source> <translation type="unfinished"></translation> </message> <message> <location line="+18"/> <source>Priority</source> <translation type="unfinished"></translation> </message> <message> <location line="-73"/> <source>List mode</source> <translation>Chế độ danh sách</translation> </message> <message> <location line="+45"/> <source>Amount</source> <translation>Lượng</translation> </message> <message> <location line="+15"/> <source>Date</source> <translation>Ngày tháng</translation> </message> <message> <location line="+5"/> <source>Confirmations</source> <translation>Lần xác nhận</translation> </message> <message> <location line="+3"/> <source>Confirmed</source> <translation>Đã xác nhận</translation> </message> <message> <location filename="../coincontroldialog.cpp" line="+36"/> <source>Copy address</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <location line="+26"/> <source>Copy amount</source> <translation type="unfinished"></translation> </message> <message> <location line="-25"/> <source>Copy transaction ID</source> <translation type="unfinished"></translation> </message> <message> <location line="+24"/> <source>Copy quantity</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Copy fee</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy change</source> <translation type="unfinished"></translation> </message> <message> <location line="+317"/> <source>highest</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>high</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>medium-high</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>medium</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>low-medium</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>low</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>lowest</source> <translation type="unfinished"></translation> </message> <message> <location line="+155"/> <source>no</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>DUST</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>yes</source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>This label turns red, if the transaction size is bigger than 10000 bytes. This means a fee of at least %1 per kb is required. Can vary +/- 1 Byte per input.</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Transactions with higher priority get more likely into a block. This label turns red, if the priority is smaller than &quot;medium&quot;. This means a fee of at least %1 per kb is required.</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>This label turns red, if any recipient receives an amount smaller than %1. This means a fee of at least %2 is required. Amounts below 0.546 times the minimum relay fee are shown as DUST.</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>This label turns red, if the change is smaller than %1. This means a fee of at least %2 is required.</source> <translation type="unfinished"></translation> </message> <message> <location line="+37"/> <location line="+63"/> <source>(no label)</source> <translation type="unfinished"></translation> </message> <message> <location line="-9"/> <source>change from %1 (%2)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>(change)</source> <translation type="unfinished"></translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>Thay đổi địa chỉ</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>Nhãn</translation> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>Địa chỉ</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation type="unfinished"></translation> </message> <message> <location filename="../editaddressdialog.cpp" line="+20"/> <source>New receiving address</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation type="unfinished"></translation> </message> <message> <location line="+71"/> <source>The entered address &quot;%1&quot; is not a valid Gridcoin address.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Could not unlock wallet.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation type="unfinished"></translation> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+424"/> <source>version</source> <translation type="unfinished">version</translation> </message> <message> <location line="+2"/> <source>Usage:</source> <translation type="unfinished">Mức sử dụng</translation> </message> <message> <location line="+1"/> <source>command-line options</source> <translation type="unfinished">tùy chọn dòng lệnh</translation> </message> <message> <location line="+4"/> <source>UI options</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation type="unfinished">Chọn ngôn ngữ, ví dụ &quot;de_DE&quot; (mặc định: Vị trí hệ thống)</translation> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Gridcoin-Qt</source> <translation type="unfinished"></translation> </message> </context> <context> <name>NewPollDialog</name> <message> <location filename="../votingdialog.cpp" line="+896"/> <location line="+96"/> <source>Create Poll</source> <translation type="unfinished"></translation> </message> <message> <location line="-81"/> <source>Title: </source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Days: </source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Question: </source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Discussion URL: </source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Share Type: </source> <translation type="unfinished"></translation> </message> <message> <location line="+23"/> <source>Add Item</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Remove Item</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Clear All</source> <translation type="unfinished"></translation> </message> <message> <location line="+36"/> <source>Creating poll failed! Title is missing.</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>Creating poll failed! Days value is missing.</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>Creating poll failed! Question is missing.</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>Creating poll failed! URL is missing.</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>Creating poll failed! Answer is missing.</source> <translation type="unfinished"></translation> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>Lựa chọn</translation> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation>&amp;Chính</translation> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source> <translation type="unfinished"></translation> </message> <message> <location line="+15"/> <source>Pa&amp;y transaction fee</source> <translation type="unfinished"></translation> </message> <message> <location line="+31"/> <source>Reserved amount does not participate in staking and is therefore spendable at any time.</source> <translation type="unfinished"></translation> </message> <message> <location line="+15"/> <source>Reser&amp;ve</source> <translation type="unfinished"></translation> </message> <message> <location line="+31"/> <source>Automatically start Gridcoin after logging in to the system.</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;Start Gridcoin on system login</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;Detach databases at shutdown</source> <translation type="unfinished"></translation> </message> <message> <location line="+21"/> <source>&amp;Network</source> <translation type="unfinished"></translation> </message> <message> <location line="+6"/> <source>Automatically open the Gridcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Connect to the Gridcoin network through a SOCKS proxy (e.g. when connecting through Tor).</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy:</source> <translation type="unfinished"></translation> </message> <message> <location line="+28"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation type="unfinished"></translation> </message> <message> <location line="+33"/> <source>SOCKS &amp;Version:</source> <translation type="unfinished"></translation> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation type="unfinished"></translation> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation type="unfinished"></translation> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation type="unfinished"></translation> </message> <message> <location line="+42"/> <source>The user interface language can be set here. This setting will take effect after restarting Gridcoin.</source> <translation type="unfinished"></translation> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation type="unfinished"></translation> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation type="unfinished"></translation> </message> <message> <location line="+11"/> <source>Style:</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Choose a stylesheet to change the look of the wallet.</source> <translation type="unfinished"></translation> </message> <message> <location line="+9"/> <source>Whether to show Gridcoin addresses in the transaction list or not.</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>&amp;Display addresses in transaction list</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Whether to show coin control features or not.</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Display coin &amp;control features (experts only!)</source> <translation type="unfinished"></translation> </message> <message> <location line="+91"/> <source>&amp;Apply</source> <translation type="unfinished"></translation> </message> <message> <location line="-317"/> <source>Proxy &amp;IP:</source> <translation>Proxy &amp;IP:</translation> </message> <message> <location line="+26"/> <source>&amp;Port:</source> <translation>&amp;Cổng:</translation> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation>Cổng proxy (e.g. 9050) </translation> </message> <message> <location line="+96"/> <source>&amp;Display</source> <translation>&amp;Hiển thị</translation> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation>Giao diện người dùng &amp; ngôn ngữ</translation> </message> <message> <location line="+151"/> <source>&amp;OK</source> <translation>&amp;OK</translation> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation>&amp;Từ chối</translation> </message> <message> <location filename="../optionsdialog.cpp" line="+55"/> <source>default</source> <translation>mặc định</translation> </message> <message> <location line="+30"/> <source>Native</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Light</source> <translation type="unfinished"></translation> </message> <message> <location line="+123"/> <location line="+9"/> <source>Warning</source> <translation type="unfinished">Chú ý</translation> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting Gridcoin.</source> <translation type="unfinished"></translation> </message> <message> <location line="+42"/> <source>The supplied proxy address is invalid.</source> <translation type="unfinished"></translation> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+32"/> <source>Form</source> <translation>Form</translation> </message> <message> <location line="+47"/> <source>Wallet</source> <translation type="unfinished">Ví</translation> </message> <message> <location line="+10"/> <location line="+466"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Gridcoin network after a connection is established, but this process has not completed yet.</source> <translation type="unfinished"></translation> </message> <message> <location line="-403"/> <source>Your current spendable balance</source> <translation type="unfinished"></translation> </message> <message> <location line="+13"/> <source>Stake</source> <translation type="unfinished"></translation> </message> <message> <location line="+15"/> <source>Total number of coins that are staking, and do not yet count toward the current balance</source> <translation type="unfinished"></translation> </message> <message> <location line="+13"/> <source>Unconfirmed</source> <translation type="unfinished"></translation> </message> <message> <location line="+15"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation type="unfinished"></translation> </message> <message> <location line="+19"/> <source>Immature:</source> <translation type="unfinished"></translation> </message> <message> <location line="+12"/> <source>Total mined coins that have not yet matured.</source> <translation type="unfinished"></translation> </message> <message> <location line="+45"/> <source>Your current total balance</source> <translation type="unfinished"></translation> </message> <message> <location line="+49"/> <source>Blocks:</source> <translation type="unfinished"></translation> </message> <message> <location line="+17"/> <source>Difficulty:</source> <translation type="unfinished"></translation> </message> <message> <location line="+17"/> <source>Net Weight:</source> <translation type="unfinished"></translation> </message> <message> <location line="+17"/> <source>DPOR Weight:</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Magnitude:</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Project:</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>CPID:</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Status:</source> <translation type="unfinished"></translation> </message> <message> <location line="+124"/> <source>Recent transactions</source> <translation type="unfinished"></translation> </message> <message> <location line="+99"/> <source>Current Poll:</source> <translation type="unfinished"></translation> </message> <message> <location line="+17"/> <source>Client Messages:</source> <translation type="unfinished"></translation> </message> <message> <location line="-515"/> <source>Available:</source> <translation>Khả dụng</translation> </message> <message> <location line="+140"/> <source>Total:</source> <translation>Tổng:</translation> </message> <message> <location filename="../overviewpage.cpp" line="+132"/> <location line="+1"/> <source>out of sync</source> <translation type="unfinished"></translation> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation type="unfinished"></translation> </message> <message> <location line="+62"/> <source>Request Payment</source> <translation type="unfinished"></translation> </message> <message> <location line="+12"/> <source>Label:</source> <translation type="unfinished"></translation> </message> <message> <location line="+19"/> <source>Message:</source> <translation type="unfinished"></translation> </message> <message> <location line="+25"/> <source>Amount:</source> <translation type="unfinished">Lượng:</translation> </message> <message> <location line="+46"/> <source>&amp;Save As...</source> <translation type="unfinished"></translation> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation type="unfinished">Lỗi khi encode từ URI thành QR Code</translation> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation type="unfinished"></translation> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation type="unfinished"></translation> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation type="unfinished"></translation> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+14"/> <source>Gridcoin - Debug Console</source> <translation type="unfinished"></translation> </message> <message> <location line="+16"/> <source>&amp;Information</source> <translation>Thông tin</translation> </message> <message> <location line="+9"/> <source>Boost version</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <location line="+27"/> <location line="+42"/> <location line="+42"/> <location line="+54"/> <location line="+19"/> <location line="+29"/> <location line="+34"/> <location line="+39"/> <location line="+107"/> <location filename="../rpcconsole.cpp" line="+386"/> <source>N/A</source> <translation type="unfinished"></translation> </message> <message> <location line="-386"/> <source>Proof Of Research Difficulty</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>1</source> <translation type="unfinished"></translation> </message> <message> <location line="+29"/> <source>Number of connections</source> <translation type="unfinished"></translation> </message> <message> <location line="+42"/> <source>Last block time</source> <translation type="unfinished"></translation> </message> <message> <location line="+57"/> <source>Gridcoin Core:</source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Client version</source> <translation type="unfinished"></translation> </message> <message> <location line="+48"/> <source>Build date</source> <translation type="unfinished"></translation> </message> <message> <location line="+34"/> <source>Network:</source> <translation type="unfinished"></translation> </message> <message> <location line="+29"/> <source>On testnet</source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Current number of blocks</source> <translation type="unfinished"></translation> </message> <message> <location line="+29"/> <source>Estimated total blocks</source> <translation type="unfinished"></translation> </message> <message> <location line="+15"/> <source>Debug log file</source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Open the Gridcoin debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation type="unfinished"></translation> </message> <message> <location line="+6"/> <source>&amp;Open</source> <translation type="unfinished"></translation> </message> <message> <location line="+15"/> <source>Command-line options</source> <translation type="unfinished">&amp;Tùy chọn dòng lệnh</translation> </message> <message> <location line="+10"/> <source>Show the Gridcoin help message to get a list with possible Gridcoin command-line options.</source> <translation type="unfinished"></translation> </message> <message> <location line="+6"/> <source>&amp;Show</source> <translation type="unfinished"></translation> </message> <message> <location line="+45"/> <source>Startup time</source> <translation type="unfinished"></translation> </message> <message> <location line="+23"/> <source>OpenSSL version</source> <translation type="unfinished"></translation> </message> <message> <location line="+13"/> <source>Client name</source> <translation type="unfinished"></translation> </message> <message> <location line="+8"/> <source>&amp;Network Traffic</source> <translation type="unfinished"></translation> </message> <message> <location line="+52"/> <source>&amp;Clear</source> <translation type="unfinished"></translation> </message> <message> <location line="+13"/> <source>Totals</source> <translation type="unfinished"></translation> </message> <message> <location line="+27"/> <source>In:</source> <translation type="unfinished"></translation> </message> <message> <location line="+43"/> <source>Out:</source> <translation type="unfinished"></translation> </message> <message> <location line="+41"/> <source>&amp;Console</source> <translation type="unfinished"></translation> </message> <message> <location line="+56"/> <source>Clear console</source> <translation type="unfinished"></translation> </message> <message> <location line="-558"/> <source>Block chain</source> <translation>Block chain</translation> </message> <message> <location filename="../rpcconsole.cpp" line="-34"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation>Sử dụng phím lên và xuống để di chuyển lịch sử, và &lt;b&gt;Ctrl-L&lt;/b&gt; để xóa màn hình</translation> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation>Gõ &lt;b&gt;help&lt;/b&gt; để xem nhưng câu lệnh có sẵn</translation> </message> <message> <location line="+111"/> <source>%1 B</source> <translation>%1 B</translation> </message> <message> <location line="+2"/> <source>%1 KB</source> <translation>%1 KB</translation> </message> <message> <location line="+2"/> <source>%1 MB</source> <translation>%1 MB</translation> </message> <message> <location line="+2"/> <source>%1 GB</source> <translation>%1 GB</translation> </message> <message> <location line="+7"/> <source>%1 m</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>%1 h</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>%1 h %2 m</source> <translation type="unfinished"></translation> </message> <message> <location line="-133"/> <source>Welcome to the Gridcoin RPC console! </source> <translation type="unfinished"></translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+182"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation>Gửi Coins</translation> </message> <message> <location line="+73"/> <source>Coin Control Features</source> <translation>Tính năng Control Coin</translation> </message> <message> <location line="+20"/> <source>Inputs...</source> <translation>Nhập...</translation> </message> <message> <location line="+7"/> <source>automatically selected</source> <translation>Tự động chọn</translation> </message> <message> <location line="+16"/> <source>Insufficient funds!</source> <translation>Không đủ tiền</translation> </message> <message> <location line="+83"/> <source>Quantity:</source> <translation>Lượng:</translation> </message> <message> <location line="+22"/> <location line="+32"/> <source>0</source> <translation type="unfinished"></translation> </message> <message> <location line="-19"/> <source>Bytes:</source> <translation>Bytes:</translation> </message> <message> <location line="+48"/> <source>Amount:</source> <translation>Lượng:</translation> </message> <message> <location line="+22"/> <location line="+80"/> <location line="+80"/> <location line="+29"/> <source>0.00 GRC</source> <translation type="unfinished"></translation> </message> <message> <location line="-179"/> <source>Priority:</source> <translation type="unfinished"></translation> </message> <message> <location line="+19"/> <source>medium</source> <translation type="unfinished"></translation> </message> <message> <location line="+29"/> <source>Fee:</source> <translation>Phí:</translation> </message> <message> <location line="+32"/> <source>Low Output:</source> <translation type="unfinished"></translation> </message> <message> <location line="+19"/> <source>no</source> <translation type="unfinished"></translation> </message> <message> <location line="+29"/> <source>After Fee:</source> <translation>Sau thuế, phí:</translation> </message> <message> <location line="+32"/> <source>Change</source> <translation type="unfinished"></translation> </message> <message> <location line="+50"/> <source>custom change address</source> <translation type="unfinished"></translation> </message> <message> <location line="+138"/> <source>Remove all transaction fields</source> <translation type="unfinished"></translation> </message> <message> <location line="+47"/> <source>123.456 GRC</source> <translation type="unfinished"></translation> </message> <message> <location line="+34"/> <source>S&amp;end</source> <translation type="unfinished"></translation> </message> <message> <location line="-104"/> <source>Send to multiple recipients at once</source> <translation>Gửi đến nhiều người nhận trong một lần</translation> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation>Thêm &amp;Người nhận</translation> </message> <message> <location line="+23"/> <source>Clear &amp;All</source> <translation>Xóa &amp;Tất cả</translation> </message> <message> <location line="+28"/> <source>Balance:</source> <translation>Tài khoản</translation> </message> <message> <location line="+47"/> <source>Confirm the send action</source> <translation>Xác nhận sự gửi</translation> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-173"/> <source>Enter a Gridcoin address (e.g. G8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"></translation> </message> <message> <location line="+15"/> <source>Copy quantity</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy fee</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy change</source> <translation type="unfinished"></translation> </message> <message> <location line="+86"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation>Xác nhận gửi coins</translation> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source> and </source> <translation type="unfinished"></translation> </message> <message> <location line="+29"/> <source>The recipient address is not valid, please recheck.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation type="unfinished"></translation> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"></translation> </message> <message> <location line="+251"/> <source>WARNING: Invalid Gridcoin address</source> <translation type="unfinished"></translation> </message> <message> <location line="+13"/> <source>(no label)</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>WARNING: unknown change address</source> <translation type="unfinished"></translation> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+170"/> <source>A&amp;mount:</source> <translation>Lượng:</translation> </message> <message> <location line="-138"/> <source>&amp;Label:</source> <translation>&amp;Nhãn</translation> </message> <message> <location line="-18"/> <source>Form</source> <translation type="unfinished">Form</translation> </message> <message> <location line="+39"/> <location filename="../sendcoinsentry.cpp" line="+29"/> <source>Enter a label for this address to add it to your address book</source> <translation type="unfinished"></translation> </message> <message> <location line="+9"/> <source>Pay &amp;To:</source> <translation type="unfinished"></translation> </message> <message> <location line="+18"/> <source>The address to send the payment to (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Choose address from address book</source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation type="unfinished"></translation> </message> <message> <location line="+16"/> <source>Message:</source> <translation type="unfinished"></translation> </message> <message> <location line="+21"/> <source>Send Custom Message to a Gridcoin Recipient</source> <translation type="unfinished"></translation> </message> <message> <location line="+22"/> <source>Track Coins</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Add Attachment</source> <translation type="unfinished"></translation> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a Gridcoin address (e.g. G8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"></translation> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation type="unfinished"></translation> </message> <message> <location line="+13"/> <source>&amp;Sign Message</source> <translation type="unfinished"></translation> </message> <message> <location line="+6"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation type="unfinished"></translation> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <location line="+203"/> <source>Choose an address from the address book</source> <translation type="unfinished"></translation> </message> <message> <location line="-193"/> <location line="+203"/> <source>Alt+A</source> <translation type="unfinished"></translation> </message> <message> <location line="-193"/> <source>Paste address from clipboard</source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation type="unfinished"></translation> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation type="unfinished"></translation> </message> <message> <location line="+24"/> <source>Copy the current signature to the system clipboard</source> <translation type="unfinished"></translation> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this Gridcoin address</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation type="unfinished"></translation> </message> <message> <location line="+14"/> <source>Reset all sign message fields</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation>Xóa &amp;Tất cả</translation> </message> <message> <location line="-87"/> <location line="+70"/> <source>&amp;Verify Message</source> <translation type="unfinished"></translation> </message> <message> <location line="-64"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation type="unfinished"></translation> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source> <translation type="unfinished"></translation> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified Gridcoin address</source> <translation type="unfinished"></translation> </message> <message> <location line="+17"/> <source>Reset all verify message fields</source> <translation type="unfinished"></translation> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a Gridcoin address (e.g. G8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"></translation> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Enter Gridcoin signature</source> <translation type="unfinished"></translation> </message> <message> <location line="+82"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation type="unfinished"></translation> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation type="unfinished"></translation> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation type="unfinished"></translation> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation type="unfinished"></translation> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation type="unfinished"></translation> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation type="unfinished"></translation> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation type="unfinished"></translation> </message> </context> <context> <name>TransactionDesc</name> <message numerus="yes"> <location filename="../transactiondesc.cpp" line="+36"/> <source>Open for %n more block(s)</source> <translation type="unfinished"> <numerusform></numerusform> </translation> </message> <message> <location line="+2"/> <source>Open until %1</source> <translation type="unfinished"></translation> </message> <message> <location line="+6"/> <source>conflicted</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>%1/offline</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation type="unfinished"></translation> </message> <message> <location line="+45"/> <source>Status</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>, has not been successfully broadcast yet</source> <translation type="unfinished"></translation> </message> <message numerus="yes"> <location line="+2"/> <source>, broadcast through %n node(s)</source> <translation type="unfinished"> <numerusform></numerusform> </translation> </message> <message> <location line="+4"/> <source>Date</source> <translation type="unfinished">Ngày tháng</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>Generated</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>unknown</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation type="unfinished"></translation> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation type="unfinished"></translation> </message> <message> <location line="-2"/> <source>label</source> <translation type="unfinished"></translation> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+43"/> <source>Credit</source> <translation type="unfinished"></translation> </message> <message numerus="yes"> <location line="-115"/> <source>matures in %n more block(s)</source> <translation type="unfinished"> <numerusform></numerusform> </translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation type="unfinished"></translation> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+43"/> <source>Debit</source> <translation type="unfinished"></translation> </message> <message> <location line="-52"/> <source>Transaction fee</source> <translation type="unfinished"></translation> </message> <message> <location line="+16"/> <source>Net amount</source> <translation type="unfinished"></translation> </message> <message> <location line="+6"/> <source>Message</source> <translation>Tin nhắn</translation> </message> <message> <location line="+2"/> <source>Comment</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Block Type</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Block Number</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Gridcoin generated coins must mature 110 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation type="unfinished"></translation> </message> <message> <location line="+14"/> <source>Information</source> <translation type="unfinished">Thông tin</translation> </message> <message> <location line="+8"/> <source>Transaction</source> <translation type="unfinished"></translation> </message> <message> <location line="+25"/> <source>Inputs</source> <translation type="unfinished"></translation> </message> <message> <location line="+26"/> <source>Amount</source> <translation>Lượng</translation> </message> <message> <location line="+1"/> <source>true</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>false</source> <translation type="unfinished"></translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+20"/> <source>Transaction details</source> <translation type="unfinished"></translation> </message> <message> <location line="+9"/> <source>This pane shows a detailed description of the transaction</source> <translation type="unfinished"></translation> </message> <message> <location line="+25"/> <source>View Attachment</source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Execute Contract</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>C&amp;lose</source> <translation type="unfinished">Đó&amp;ng</translation> </message> <message> <location filename="../transactiondescdialog.cpp" line="+40"/> <source>Gridcoin Documents</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Document cannot be found on P2P server.</source> <translation type="unfinished"></translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+239"/> <source>Date</source> <translation type="unfinished">Ngày tháng</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>Address</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation type="unfinished">Lượng</translation> </message> <message numerus="yes"> <location line="+52"/> <source>Open for %n more block(s)</source> <translation type="unfinished"> <numerusform></numerusform> </translation> </message> <message> <location line="+3"/> <source>Open until %1</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Offline</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Unconfirmed</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Confirming (%1 of %2 recommended confirmations)&lt;br&gt;</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Confirmed (%1 confirmations)</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Conflicted</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Immature (%1 confirmations, will be available after %2)&lt;br&gt;</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>This block was not received by any other nodes&lt;br&gt; and will probably not be accepted!</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation type="unfinished"></translation> </message> <message> <location line="+61"/> <source>Received with</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Received from</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Sent to</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation type="unfinished"></translation> </message> <message> <location line="+9"/> <source>Mined - DPOR</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>Minted - (Local) DPOR</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Mined - PoR</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>Mined - Interest</source> <translation type="unfinished"></translation> </message> <message> <location line="+58"/> <source>(n/a)</source> <translation type="unfinished"></translation> </message> <message> <location line="+193"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation type="unfinished"></translation> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+55"/> <location line="+16"/> <source>All</source> <translation type="unfinished"></translation> </message> <message> <location line="-15"/> <source>Today</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>This week</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>This month</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Last month</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>This year</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Range...</source> <translation type="unfinished"></translation> </message> <message> <location line="+11"/> <source>Received with</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Sent to</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>To yourself</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Mined</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Other</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Min amount</source> <translation type="unfinished"></translation> </message> <message> <location line="+34"/> <source>Copy address</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Edit label</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation type="unfinished"></translation> </message> <message> <location line="+146"/> <source>Export Transaction Data</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation type="unfinished"></translation> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation type="unfinished">Đã xác nhận</translation> </message> <message> <location line="+1"/> <source>Date</source> <translation type="unfinished">Ngày tháng</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Label</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Address</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation type="unfinished">Lượng</translation> </message> <message> <location line="+1"/> <source>ID</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation type="unfinished"></translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"></translation> </message> <message> <location line="+100"/> <source>Range:</source> <translation type="unfinished"></translation> </message> <message> <location line="+8"/> <source>to</source> <translation type="unfinished"></translation> </message> </context> <context> <name>UpgradeDialog</name> <message> <location filename="../forms/upgradedialog.ui" line="+14"/> <source>Gridcoin Upgrading Facility</source> <translation type="unfinished"></translation> </message> <message> <location line="+65"/> <source>Retry Download</source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Upgrade</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <source>Hide</source> <translation type="unfinished">Ẩn</translation> </message> </context> <context> <name>VotingChartDialog</name> <message> <location filename="../votingdialog.cpp" line="-374"/> <source>Poll Results</source> <translation type="unfinished"></translation> </message> <message> <location line="+7"/> <location line="+60"/> <source>Q: </source> <translation type="unfinished"></translation> </message> <message> <location line="-54"/> <location line="+55"/> <source>Discussion URL: </source> <translation type="unfinished"></translation> </message> <message> <location line="-43"/> <source>Chart</source> <translation type="unfinished"></translation> </message> <message> <location line="+14"/> <source>List</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <location line="+25"/> <source>Best Answer: </source> <translation type="unfinished"></translation> </message> </context> <context> <name>VotingDialog</name> <message> <location line="-343"/> <source>Active Polls (Right Click to Vote)</source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Filter: </source> <translation type="unfinished"></translation> </message> <message> <location line="+16"/> <source>Reload Polls</source> <translation type="unfinished"></translation> </message> <message> <location line="+6"/> <source>Load History</source> <translation type="unfinished"></translation> </message> <message> <location line="+6"/> <source>Create Poll</source> <translation type="unfinished"></translation> </message> <message> <location line="+37"/> <source>...loading data!</source> <translation type="unfinished"></translation> </message> </context> <context> <name>VotingTableModel</name> <message> <location line="-387"/> <source>#</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Title</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Expiration</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Share Type</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Question</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <location line="+158"/> <source>Answers</source> <translation type="unfinished"></translation> </message> <message> <location line="-157"/> <source># Voters</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Total Shares</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>URL</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Best Answer</source> <translation type="unfinished"></translation> </message> <message> <location line="+144"/> <source>Row Number.</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Title.</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Expiration.</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Share Type.</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Question.</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>Total Participants.</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Total Shares.</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>URL.</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Best Answer.</source> <translation type="unfinished"></translation> </message> </context> <context> <name>VotingVoteDialog</name> <message> <location line="+528"/> <source>PlaceVote</source> <translation type="unfinished"></translation> </message> <message> <location line="+14"/> <source>Q: </source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Discussion URL: </source> <translation type="unfinished"></translation> </message> <message> <location line="+10"/> <source>Best Answer: </source> <translation type="unfinished"></translation> </message> <message> <location line="+17"/> <source>Vote</source> <translation type="unfinished"></translation> </message> <message> <location line="+37"/> <source>Vote failed! Select one or more items to vote.</source> <translation type="unfinished"></translation> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+249"/> <source>Sending...</source> <translation type="unfinished"></translation> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+39"/> <source>Options:</source> <translation>Lựa chọn:</translation> </message> <message> <location line="+131"/> <source>Loading addresses...</source> <translation>Đang đọc các địa chỉ...</translation> </message> <message> <location line="+18"/> <source>Insufficient funds</source> <translation>Không đủ tiền</translation> </message> <message> <location line="-33"/> <source>Loading block index...</source> <translation>Đang đọc block index...</translation> </message> <message> <location line="-147"/> <source>To use the %s option</source> <translation type="unfinished"></translation> </message> <message> <location line="+149"/> <source>Loading wallet...</source> <translation>Đang đọc ví...</translation> </message> <message> <location line="+8"/> <source>Cannot downgrade wallet</source> <translation>Không downgrade được ví</translation> </message> <message> <location line="+1"/> <source>Cannot write default address</source> <translation>Không ghi được địa chỉ mặc định</translation> </message> <message> <location line="+1"/> <source>Rescanning...</source> <translation>Đang quét lại...</translation> </message> <message> <location line="+5"/> <source>Done loading</source> <translation>Đã nạp xong</translation> </message> <message> <location line="-150"/> <source>Error</source> <translation>Lỗi</translation> </message> <message> <location line="-13"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=gridcoinrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;Gridcoin Alert&quot; [email protected] </source> <translation type="unfinished"></translation> </message> <message> <location line="+14"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation type="unfinished"></translation> </message> <message> <location line="+5"/> <source>Gridcoin version</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Usage:</source> <translation type="unfinished">Mức sử dụng</translation> </message> <message> <location line="+1"/> <source>Send command to -server or gridcoind</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>List commands</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Get help for a command</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Gridcoin</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>This help message</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Specify configuration file (default: gridcoin.conf)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Specify pid file (default: gridcoind.pid)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Specify data directory</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Specify wallet file (within data directory)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Set database cache size in megabytes (default: 25)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Set database disk log size in megabytes (default: 100)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Connect through socks proxy</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Select the version of socks proxy to use (4-5, default: 5)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Listen for connections on &lt;port&gt; (default: 32749 or testnet: 32748)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Connect only to the specified node(s)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Specify your own public address</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Find peers using internet relay chat (default: 0)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Bind to given address. Use [host]:port notation for IPv6</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Find peers using DNS lookup (default: 1)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Sync checkpoints policy (default: strict)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Fee per KB to add to transactions you send</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Accept command line and JSON-RPC commands</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Run in the background as a daemon and accept commands</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Use the test network</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Output extra debugging information. Implies all other -debug* options</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Output extra network debugging information</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Prepend debug output with timestamp</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Send trace/debug info to debugger</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Username for JSON-RPC connections</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Password for JSON-RPC connections</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 15715 or testnet: 25715)</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Require a confirmations for change (default: 0)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Upgrade wallet to latest format</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Rescan the block chain for missing wallet transactions</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>How many blocks to check at startup (default: 2500, 0 = all)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>How thorough the block verification is (0-6, default: 1)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Imports blocks from external blk000?.dat file</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Block creation options:</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Server certificate file (default: server.cert)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Invalid amount for -mininput=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Initialization sanity check failed. Gridcoin is shutting down.</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Wallet %s resides outside data directory %s.</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Cannot obtain a lock on data directory %s. Gridcoin is probably already running.</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Verifying database integrity...</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>wallet.dat corrupt, salvage failed</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Unknown -socks proxy version requested: %i</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Invalid amount for -reservebalance=&lt;amount&gt;</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Unable to sign checkpoint, wrong checkpointkey? </source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Error loading blkindex.dat</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Error loading wallet.dat: Wallet requires newer version of Gridcoin</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Wallet needed to be rewritten: restart Gridcoin to complete</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Error loading wallet.dat</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>Importing blockchain data file.</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Importing bootstrap blockchain data file.</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Error: could not start node</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Unable to bind to %s on this computer. Gridcoin is probably already running.</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Error: Wallet locked, unable to create transaction </source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Error: Wallet unlocked for staking only, unable to create transaction.</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Error: Transaction creation failed </source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Sending...</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"></translation> </message> <message> <location line="+4"/> <source>Invalid amount</source> <translation type="unfinished"></translation> </message> <message> <location line="+2"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong Gridcoin will not work properly.</source> <translation type="unfinished"></translation> </message> <message> <location line="+3"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>WARNING: syncronized checkpoint violation detected, but skipped!</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>Warning: Disk space is low!</source> <translation type="unfinished"></translation> </message> <message> <location line="+1"/> <source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source> <translation type="unfinished"></translation> </message> </context> </TS><|fim▁end|>
</message> <message> <location line="+4"/>
<|file_name|>test_scan.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import unittest import weakref from pyopenapi.migration.scan import Scanner, Dispatcher, scan from pyopenapi.migration.versions.v1_2.objects import ( ApiDeclaration, Authorization, Operation, ResponseMessage, Parameter) from pyopenapi.migration.versions.v3_0_0.objects import ( Header as Header3, Parameter as Parameter3, ) from ..utils import get_test_data_folder, SampleApp class CountObject(object): """ a scanner for counting objects and looking for longest attribute name. Just for test. """ class Disp(Dispatcher): pass def __init__(self): self.total = { ApiDeclaration: 0, Authorization: 0, Operation: 0, ResponseMessage: 0 } self.long_name = '' @Disp.register([ApiDeclaration, Authorization, Operation, ResponseMessage]) def _count(self, path, obj, _): self.total[obj.__class__] = self.total[obj.__class__] + 1 return path @Disp.result def _result(self, name): if len(name) > len(self.long_name): self.long_name = name class PathRecord(object): """ a scanner to record all json path """ class Disp(Dispatcher): pass def __init__(self): self.api_declaration = [] self.authorization = [] self.response_message = [] self.parameter = [] # pylint: disable=unused-argument @Disp.register([ApiDeclaration]) def _api_declaration(self, path, obj, _): self.api_declaration.append(path) # pylint: disable=unused-argument @Disp.register([Authorization]) def _authorization(self, path, obj, _): self.authorization.append(path) # pylint: disable=unused-argument @Disp.register([ResponseMessage]) def _response_message(self, path, obj, _): self.response_message.append(path) @Disp.register([Parameter]) def _parameter(self, path, obj, _): self.parameter.append(path) class ScannerTestCase(unittest.TestCase): """ test scanner """ @classmethod def setUpClass(cls): cls.app = SampleApp.load( get_test_data_folder(version='1.2', which='wordnik')) def test_count(self): scanner = Scanner(self.app) count_obj = CountObject() scanner.scan(route=[count_obj], root=self.app.raw) for name in self.app.raw.cached_apis: scanner.scan(route=[count_obj], root=self.app.raw.cached_apis[name]) self.assertEqual( len(count_obj.long_name), len('#/apis/3/operations/0/responseMessages/0')) self.assertEqual(count_obj.total, { Authorization: 1, ApiDeclaration: 3,<|fim▁hole|> def test_leaves(self): scanner = Scanner(self.app) count_obj = CountObject() scanner.scan(route=[count_obj], root=self.app.raw, leaves=[Operation]) for name in self.app.raw.cached_apis: scanner.scan( route=[count_obj], root=self.app.raw.cached_apis[name], leaves=[Operation]) # the scanning would stop at Operation, so ResponseMessage # would not be counted. self.assertEqual(count_obj.total, { Authorization: 1, ApiDeclaration: 3, Operation: 20, ResponseMessage: 0 }) def test_path(self): scanner = Scanner(self.app) path_record = PathRecord() scanner.scan(route=[path_record], root=self.app.raw) scanner.scan( route=[path_record], root=self.app.raw.cached_apis['store']) self.assertEqual(sorted(path_record.api_declaration), sorted(['#'])) self.assertEqual(path_record.authorization, ['#/authorizations/oauth2']) self.assertEqual( sorted(path_record.response_message), sorted([ '#/apis/0/operations/0/responseMessages/0', '#/apis/1/operations/0/responseMessages/1', '#/apis/1/operations/0/responseMessages/0', '#/apis/1/operations/1/responseMessages/1', '#/apis/1/operations/1/responseMessages/0' ])) self.assertEqual( sorted(path_record.parameter), sorted([ '#/apis/0/operations/0/parameters/0', '#/apis/1/operations/0/parameters/0', '#/apis/1/operations/1/parameters/0', ])) class ResolveTestCase(unittest.TestCase): """ test for scanner: Resolve """ @classmethod def setUpClass(cls): cls.app = SampleApp.create( get_test_data_folder(version='1.2', which='model_subtypes'), to_spec_version='2.0') def test_ref_resolve(self): """ make sure pre resolve works """ schema, _ = self.app.resolve_obj( '#/definitions/user!##!UserWithInfo/allOf/0', from_spec_version='2.0') ref = schema.get_attrs('migration').ref_obj self.assertTrue(isinstance(ref, weakref.ProxyTypes)) schema, _ = self.app.resolve_obj( '#/definitions/user!##!User', from_spec_version='2.0', ) self.assertEqual(ref, schema) class CountParemeter3(object): """ a scanner just for test """ class Disp(Dispatcher): pass def __init__(self): self.total = { Header3: 0, Parameter3: 0, } @Disp.register([Header3, Parameter3]) def _count(self, _, obj): self.total[obj.__class__] = self.total[obj.__class__] + 1 class Scanner2TestCase(unittest.TestCase): """ test case for Scanner2 """ def test_child_class_called_twice(self): """ make a callback for 'Header' and 'Parameter' would only be called once, when Header inherit Paremeter """ header = Header3({}) count_param = CountParemeter3() scan(route=[count_param], root=header) self.assertEqual(count_param.total[Header3], 1) self.assertEqual(count_param.total[Parameter3], 0)<|fim▁end|>
Operation: 20, ResponseMessage: 23 })
<|file_name|>TimezonedbModel.ts<|end_file_name|><|fim▁begin|>export interface ITimezonedbModel { status: string; message: string; countryCode: string; countryName: string; zoneName: string; abbreviation: string; gmtOffset: number; dst: string; dstStart: number; dstEnd: number;<|fim▁hole|> nextAbbreviation: string; timestamp: number; formatted: string; }<|fim▁end|>
<|file_name|>test_marshaller_plugins.py<|end_file_name|><|fim▁begin|># Copyright (c) 2014-2021, Freja Nordsiek # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os.path import tempfile import pkg_resources import pytest import hdf5storage import hdf5storage.plugins # Check if the example package is installed because some tests will # depend on it. try: import example_hdf5storage_marshaller_plugin has_example_hdf5storage_marshaller_plugin = True except: has_example_hdf5storage_marshaller_plugin = False def test_marshaller_api_versions(): assert ('1.0', ) == \ hdf5storage.plugins.supported_marshaller_api_versions() def test_find_thirdparty_marshaller_plugins(): found_example = False apivs = hdf5storage.plugins.supported_marshaller_api_versions() plugins = hdf5storage.plugins.find_thirdparty_marshaller_plugins() assert isinstance(plugins, dict) assert set(apivs) == set(plugins) for k, v in plugins.items(): assert isinstance(k, str) assert isinstance(v, dict) for k2, v2 in v.items(): assert isinstance(k2, str) assert isinstance(v2, pkg_resources.EntryPoint) if k2 == 'example_hdf5storage_marshaller_plugin': found_example = True assert has_example_hdf5storage_marshaller_plugin == found_example @pytest.mark.skipif(has_example_hdf5storage_marshaller_plugin, reason='requires example_hdf5storage_marshaller_' 'plugin') def test_plugin_marshaller_SubList(): mc = hdf5storage.MarshallerCollection(load_plugins=True, lazy_loading=True) options = hdf5storage.Options(store_python_metadata=True, matlab_compatible=False, marshaller_collection=mc)<|fim▁hole|> data = example_hdf5storage_marshaller_plugin.SubList(ell) name = '/a' with tempfile.TemporaryDirectory() as folder: filename = os.path.join(folder, 'data.h5') hdf5storage.write(data, path=name, filename=filename, options=options) out = hdf5storage.read(path=name, filename=filename, options=options) assert ell == list(out) assert type(out) == example_hdf5storage_marshaller_plugin.SubList<|fim▁end|>
ell = [1, 2, 'b1', b'3991', True, None]
<|file_name|>skill_data.py<|end_file_name|><|fim▁begin|># Copyright 2018 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Module containing methods needed to load skill data such as dialogs, intents and regular expressions. """ from os import walk from os.path import splitext, join import re from mycroft.messagebus.message import Message def load_vocab_from_file(path, vocab_type, bus): """Load Mycroft vocabulary from file The vocab is sent to the intent handler using the message bus Args: path: path to vocabulary file (*.voc) vocab_type: keyword name bus: Mycroft messagebus connection skill_id(str): skill id """ if path.endswith('.voc'): with open(path, 'r') as voc_file: for line in voc_file.readlines(): if line.startswith("#"): continue parts = line.strip().split("|") entity = parts[0] bus.emit(Message("register_vocab", { 'start': entity, 'end': vocab_type })) for alias in parts[1:]: bus.emit(Message("register_vocab", { 'start': alias, 'end': vocab_type, 'alias_of': entity })) def load_regex_from_file(path, bus, skill_id): """Load regex from file The regex is sent to the intent handler using the message bus Args: path: path to vocabulary file (*.voc) bus: Mycroft messagebus connection """ if path.endswith('.rx'): with open(path, 'r') as reg_file: for line in reg_file.readlines(): if line.startswith("#"): continue re.compile(munge_regex(line.strip(), skill_id)) bus.emit( Message("register_vocab", {'regex': munge_regex(line.strip(), skill_id)})) def load_vocabulary(basedir, bus, skill_id): """Load vocabulary from all files in the specified directory. Args: basedir (str): path of directory to load from (will recurse) bus (messagebus emitter): messagebus instance used to send the vocab to the intent service skill_id: skill the data belongs to """ for path, _, files in walk(basedir): for f in files: if f.endswith(".voc"): vocab_type = to_alnum(skill_id) + splitext(f)[0] load_vocab_from_file(join(path, f), vocab_type, bus) def load_regex(basedir, bus, skill_id): """Load regex from all files in the specified directory. <|fim▁hole|> the intent service skill_id (str): skill identifier """ for path, _, files in walk(basedir): for f in files: if f.endswith(".rx"): load_regex_from_file(join(path, f), bus, skill_id) def to_alnum(skill_id): """Convert a skill id to only alphanumeric characters Non alpha-numeric characters are converted to "_" Args: skill_id (str): identifier to be converted Returns: (str) String of letters """ return ''.join(c if c.isalnum() else '_' for c in str(skill_id)) def munge_regex(regex, skill_id): """Insert skill id as letters into match groups. Args: regex (str): regex string skill_id (str): skill identifier Returns: (str) munged regex """ base = '(?P<' + to_alnum(skill_id) return base.join(regex.split('(?P<')) def munge_intent_parser(intent_parser, name, skill_id): """Rename intent keywords to make them skill exclusive This gives the intent parser an exclusive name in the format <skill_id>:<name>. The keywords are given unique names in the format <Skill id as letters><Intent name>. The function will not munge instances that's already been munged Args: intent_parser: (IntentParser) object to update name: (str) Skill name skill_id: (int) skill identifier """ # Munge parser name if str(skill_id) + ':' not in name: intent_parser.name = str(skill_id) + ':' + name else: intent_parser.name = name # Munge keywords skill_id = to_alnum(skill_id) # Munge required keyword reqs = [] for i in intent_parser.requires: if skill_id not in i[0]: kw = (skill_id + i[0], skill_id + i[0]) reqs.append(kw) else: reqs.append(i) intent_parser.requires = reqs # Munge optional keywords opts = [] for i in intent_parser.optional: if skill_id not in i[0]: kw = (skill_id + i[0], skill_id + i[0]) opts.append(kw) else: opts.append(i) intent_parser.optional = opts # Munge at_least_one keywords at_least_one = [] for i in intent_parser.at_least_one: element = [skill_id + e.replace(skill_id, '') for e in i] at_least_one.append(tuple(element)) intent_parser.at_least_one = at_least_one<|fim▁end|>
Args: basedir (str): path of directory to load from bus (messagebus emitter): messagebus instance used to send the vocab to
<|file_name|>hero-list.component.spec.js<|end_file_name|><|fim▁begin|>'use strict'; describe('heroList', function(){ //Load module that contains the heroList component beforeEach(module('heroList')); describe('HeroListController', function(){ it('should create a `heroes` model with 6 heroes', inject(function($componentController){ var ctrl = $componentController('heroList'); expect(ctrl.heroes.length).toBe(6); })); });<|fim▁hole|>});<|fim▁end|>
<|file_name|>require-min.js<|end_file_name|><|fim▁begin|>/* RequireJS 2.2.0 Copyright jQuery Foundation and other contributors. Released under MIT license, http://github.com/requirejs/requirejs/LICENSE */ var requirejs, require, define; (function(ga) { function ka(b, c, d, g) { return g || "" } function K(b) { return "[object Function]" === Q.call(b) } function L(b) { return "[object Array]" === Q.call(b) } function y(b, c) { if (b) { var d; for (d = 0; d < b.length && (!b[d] || !c(b[d], d, b)); d += 1) ; } } function X(b, c) { if (b) { var d; for (d = b.length - 1; -1 < d && (!b[d] || !c(b[d], d, b)); --d) ; } } function x(b, c) { return la.call(b, c) } function e(b, c) { return x(b, c) && b[c] } function D(b, c) { for (var d in b) if (x(b, d) && c(b[d], d)) break } function Y(b, c, d, g) { c && D(c, function(c, e) { if (d || !x(b, e)) !g || "object" !== typeof c || !c || L(c) || K(c) || c instanceof RegExp ? b[e] = c : (b[e] || (b[e] = {}), Y(b[e], c, d, g)) }); return b } function z(b, c) { return function() { return c.apply(b, arguments) } } function ha(b) { throw b; } function ia(b) { if (!b) return b; var c = ga; y(b.split("."), function(b) { c = c[b] }); return c } function F(b, c, d, g) { c = Error(c + "\nhttp://requirejs.org/docs/errors.html#" + b); c.requireType = b; c.requireModules = g; d && (c.originalError = d); return c } function ma(b) { function c(a, n, b) { var h, k, f, c, d, l, g, r; n = n && n.split("/"); var q = p.map , m = q && q["*"]; if (a) { a = a.split("/"); k = a.length - 1; p.nodeIdCompat && U.test(a[k]) && (a[k] = a[k].replace(U, "")); "." === a[0].charAt(0) && n && (k = n.slice(0, n.length - 1), a = k.concat(a)); k = a; for (f = 0; f < k.length; f++) c = k[f], "." === c ? (k.splice(f, 1), --f) : ".." === c && 0 !== f && (1 !== f || ".." !== k[2]) && ".." !== k[f - 1] && 0 < f && (k.splice(f - 1, 2), f -= 2); a = a.join("/") } if (b && q && (n || m)) { k = a.split("/"); f = k.length; a: for (; 0 < f; --f) { d = k.slice(0, f).join("/"); if (n) for (c = n.length; 0 < c; --c) if (b = e(q, n.slice(0, c).join("/"))) if (b = e(b, d)) { h = b; l = f; break a } !g && m && e(m, d) && (g = e(m, d), r = f) } !h && g && (h = g, l = r); h && (k.splice(0, l, h), a = k.join("/")) } return (h = e(p.pkgs, a)) ? h : a } function d(a) { E && y(document.getElementsByTagName("script"), function(n) { if (n.getAttribute("data-requiremodule") === a && n.getAttribute("data-requirecontext") === l.contextName) return n.parentNode.removeChild(n), !0 }) } function m(a) { var n = e(p.paths, a); if (n && L(n) && 1 < n.length) return n.shift(), l.require.undef(a), l.makeRequire(null, { skipMap: !0 })([a]), !0 } function r(a) { var n, b = a ? a.indexOf("!") : -1; -1 < b && (n = a.substring(0, b), a = a.substring(b + 1, a.length)); return [n, a] } function q(a, n, b, h) { var k, f, d = null, g = n ? n.name : null, p = a, q = !0, m = ""; a || (q = !1, a = "_@r" + (Q += 1)); a = r(a); d = a[0]; a = a[1]; d && (d = c(d, g, h), f = e(v, d)); a && (d ? m = f && f.normalize ? f.normalize(a, function(a) { return c(a, g, h) }) : -1 === a.indexOf("!") ? c(a, g, h) : a : (m = c(a, g, h), a = r(m), d = a[0], m = a[1], b = !0, k = l.nameToUrl(m))); b = !d || f || b ? "" : "_unnormalized" + (T += 1); return { prefix: d, name: m, parentMap: n, unnormalized: !!b, url: k, originalName: p, isDefine: q, id: (d ? d + "!" + m : m) + b } } function u(a) { var b = a.id , c = e(t, b); c || (c = t[b] = new l.Module(a)); return c } function w(a, b, c) { var h = a.id , k = e(t, h); if (!x(v, h) || k && !k.defineEmitComplete) if (k = u(a), k.error && "error" === b) c(k.error); else k.on(b, c); else "defined" === b && c(v[h]) } function A(a, b) { var c = a.requireModules , h = !1; if (b) b(a); else if (y(c, function(b) { if (b = e(t, b)) b.error = a, b.events.error && (h = !0, b.emit("error", a)) }), !h) g.onError(a) } function B() { V.length && (y(V, function(a) { var b = a[0]; "string" === typeof b && (l.defQueueMap[b] = !0); G.push(a) }), V = []) } function C(a) { delete t[a]; delete Z[a] } function J(a, b, c) { var h = a.map.id; a.error ? a.emit("error", a.error) : (b[h] = !0, y(a.depMaps, function(h, f) { var d = h.id , g = e(t, d); !g || a.depMatched[f] || c[d] || (e(b, d) ? (a.defineDep(f, v[d]), a.check()) : J(g, b, c)) }), c[h] = !0) } function H() { var a, b, c = (a = 1E3 * p.waitSeconds) && l.startTime + a < (new Date).getTime(), h = [], k = [], f = !1, g = !0; if (!aa) { aa = !0; D(Z, function(a) { var l = a.map , e = l.id; if (a.enabled && (l.isDefine || k.push(a), !a.error)) if (!a.inited && c) m(e) ? f = b = !0 : (h.push(e), d(e)); else if (!a.inited && a.fetched && l.isDefine && (f = !0, !l.prefix)) return g = !1 }); if (c && h.length) return a = F("timeout", "Load timeout for modules: " + h, null, h), a.contextName = l.contextName, A(a); g && y(k, function(a) { J(a, {}, {}) }); c && !b || !f || !E && !ja || ba || (ba = setTimeout(function() { ba = 0; H() }, 50)); aa = !1 } } function I(a) { x(v, a[0]) || u(q(a[0], null, !0)).init(a[1], a[2]) } function O(a) { a = a.currentTarget || a.srcElement; var b = l.onScriptLoad; a.detachEvent && !ca ? a.detachEvent("onreadystatechange", b) : a.removeEventListener("load", b, !1); b = l.onScriptError; a.detachEvent && !ca || a.removeEventListener("error", b, !1); return { node: a, id: a && a.getAttribute("data-requiremodule") } } function P() { var a; for (B(); G.length; ) { a = G.shift(); if (null === a[0]) return A(F("mismatch", "Mismatched anonymous define() module: " + a[a.length - 1])); I(a) } l.defQueueMap = {} } var aa, da, l, R, ba, p = { waitSeconds: 7, baseUrl: "./", paths: {}, bundles: {}, pkgs: {}, shim: {}, config: {} }, t = {}, Z = {}, ea = {}, G = [], v = {}, W = {}, fa = {}, Q = 1, T = 1; R = { require: function(a) { return a.require ? a.require : a.require = l.makeRequire(a.map) }, exports: function(a) { a.usingExports = !0; if (a.map.isDefine) return a.exports ? v[a.map.id] = a.exports : a.exports = v[a.map.id] = {} }, module: function(a) { return a.module ? a.module : a.module = { id: a.map.id, uri: a.map.url, config: function() { return e(p.config, a.map.id) || {} }, exports: a.exports || (a.exports = {}) } } }; da = function(a) { this.events = e(ea, a.id) || {}; this.map = a; this.shim = e(p.shim, a.id); this.depExports = []; this.depMaps = []; this.depMatched = []; this.pluginMaps = {}; this.depCount = 0 } ; da.prototype = { init: function(a, b, c, h) { h = h || {}; if (!this.inited) { this.factory = b; if (c) this.on("error", c); else this.events.error && (c = z(this, function(a) { this.emit("error", a) })); this.depMaps = a && a.slice(0); this.errback = c; this.inited = !0; this.ignore = h.ignore; h.enabled || this.enabled ? this.enable() : this.check() } }, defineDep: function(a, b) { this.depMatched[a] || (this.depMatched[a] = !0, --this.depCount, this.depExports[a] = b) }, fetch: function() { if (!this.fetched) { this.fetched = !0; l.startTime = (new Date).getTime(); var a = this.map; if (this.shim) l.makeRequire(this.map, { enableBuildCallback: !0 })(this.shim.deps || [], z(this, function() { return a.prefix ? this.callPlugin() : this.load() })); else return a.prefix ? this.callPlugin() : this.load() } }, load: function() { var a = this.map.url; W[a] || (W[a] = !0, l.load(this.map.id, a)) }, check: function() { if (this.enabled && !this.enabling) { var a, b, c = this.map.id; b = this.depExports; var h = this.exports , k = this.factory; if (!this.inited) x(l.defQueueMap, c) || this.fetch(); else if (this.error) this.emit("error", this.error); else if (!this.defining) { this.defining = !0; if (1 > this.depCount && !this.defined) { if (K(k)) { if (this.events.error && this.map.isDefine || g.onError !== ha) try { h = l.execCb(c, k, b, h) } catch (d) { a = d } else h = l.execCb(c, k, b, h); this.map.isDefine && void 0 === h && ((b = this.module) ? h = b.exports : this.usingExports && (h = this.exports)); if (a) return a.requireMap = this.map, a.requireModules = this.map.isDefine ? [this.map.id] : null, a.requireType = this.map.isDefine ? "define" : "require", A(this.error = a) } else h = k; this.exports = h; if (this.map.isDefine && !this.ignore && (v[c] = h, g.onResourceLoad)) { var f = []; y(this.depMaps, function(a) { f.push(a.normalizedMap || a) }); g.onResourceLoad(l, this.map, f) } C(c); this.defined = !0 } this.defining = !1; this.defined && !this.defineEmitted && (this.defineEmitted = !0, this.emit("defined", this.exports), this.defineEmitComplete = !0) } } }, callPlugin: function() { var a = this.map , b = a.id , d = q(a.prefix); this.depMaps.push(d); w(d, "defined", z(this, function(h) { var k, f, d = e(fa, this.map.id), M = this.map.name, r = this.map.parentMap ? this.map.parentMap.name : null, m = l.makeRequire(a.parentMap, { enableBuildCallback: !0 }); if (this.map.unnormalized) { if (h.normalize && (M = h.normalize(M, function(a) { return c(a, r, !0) }) || ""), f = q(a.prefix + "!" + M, this.map.parentMap), w(f, "defined", z(this, function(a) { this.map.normalizedMap = f; this.init([], function() { return a }, null, { enabled: !0, ignore: !0 }) })), h = e(t, f.id)) { this.depMaps.push(f); if (this.events.error) h.on("error", z(this, function(a) { this.emit("error", a) })); h.enable() } } else d ? (this.map.url = l.nameToUrl(d), this.load()) : (k = z(this, function(a) { this.init([], function() { return a }, null, { enabled: !0 }) }), k.error = z(this, function(a) { this.inited = !0; this.error = a; a.requireModules = [b]; D(t, function(a) { 0 === a.map.id.indexOf(b + "_unnormalized") && C(a.map.id) }); A(a) }), k.fromText = z(this, function(h, c) { var d = a.name , f = q(d) , M = S; c && (h = c); M && (S = !1); u(f); x(p.config, b) && (p.config[d] = p.config[b]); try { g.exec(h) } catch (e) { return A(F("fromtexteval", "fromText eval for " + b + " failed: " + e, e, [b])) } M && (S = !0); this.depMaps.push(f); l.completeLoad(d); m([d], k) }), h.load(a.name, m, k, p)) })); l.enable(d, this); this.pluginMaps[d.id] = d }, enable: function() { Z[this.map.id] = this; this.enabling = this.enabled = !0; y(this.depMaps, z(this, function(a, b) { var c, h; if ("string" === typeof a) { a = q(a, this.map.isDefine ? this.map : this.map.parentMap, !1, !this.skipMap); this.depMaps[b] = a; if (c = e(R, a.id)) { this.depExports[b] = c(this); return } this.depCount += 1; w(a, "defined", z(this, function(a) { this.undefed || (this.defineDep(b, a), this.check()) })); this.errback ? w(a, "error", z(this, this.errback)) : this.events.error && w(a, "error", z(this, function(a) { this.emit("error", a) })) } c = a.id; h = t[c]; x(R, c) || !h || h.enabled || l.enable(a, this) })); D(this.pluginMaps, z(this, function(a) { var b = e(t, a.id); b && !b.enabled && l.enable(a, this) })); this.enabling = !1; this.check() }, on: function(a, b) { var c = this.events[a]; c || (c = this.events[a] = []); c.push(b) }, emit: function(a, b) { y(this.events[a], function(a) { a(b) }); "error" === a && delete this.events[a] } }; l = { config: p, contextName: b, registry: t, defined: v, urlFetched: W, defQueue: G, defQueueMap: {}, Module: da, makeModuleMap: q, nextTick: g.nextTick, onError: A, configure: function(a) { a.baseUrl && "/" !== a.baseUrl.charAt(a.baseUrl.length - 1) && (a.baseUrl += "/"); if ("string" === typeof a.urlArgs) { var b = a.urlArgs; a.urlArgs = function(a, c) { return (-1 === c.indexOf("?") ? "?" : "&") + b } } var c = p.shim , h = { paths: !0, bundles: !0, config: !0, map: !0 }; D(a, function(a, b) { h[b] ? (p[b] || (p[b] = {}), Y(p[b], a, !0, !0)) : p[b] = a }); a.bundles && D(a.bundles, function(a, b) { y(a, function(a) { a !== b && (fa[a] = b) }) }); a.shim && (D(a.shim, function(a, b) { L(a) && (a = { deps: a }); !a.exports && !a.init || a.exportsFn || (a.exportsFn = l.makeShimExports(a)); c[b] = a }), p.shim = c); a.packages && y(a.packages, function(a) { var b; a = "string" === typeof a ? { name: a } : a; b = a.name; a.location && (p.paths[b] = a.location); p.pkgs[b] = a.name + "/" + (a.main || "main").replace(na, "").replace(U, "") }); D(t, function(a, b) { a.inited || a.map.unnormalized || (a.map = q(b, null, !0)) }); (a.deps || a.callback) && l.require(a.deps || [], a.callback) }, makeShimExports: function(a) { return function() { var b; a.init && (b = a.init.apply(ga, arguments)); return b || a.exports && ia(a.exports) } }, makeRequire: function(a, n) { function m(c, d, f) { var e, r; n.enableBuildCallback && d && K(d) && (d.__requireJsBuild = !0); if ("string" === typeof c) { if (K(d)) return A(F("requireargs", "Invalid require call"), f); if (a && x(R, c)) return R[c](t[a.id]); if (g.get) return g.get(l, c, a, m); e = q(c, a, !1, !0); e = e.id; return x(v, e) ? v[e] : A(F("notloaded", 'Module name "' + e + '" has not been loaded yet for context: ' + b + (a ? "" : ". Use require([])"))) } P(); l.nextTick(function() { P(); r = u(q(null, a)); r.skipMap = n.skipMap; r.init(c, d, f, { enabled: !0 }); H() }); return m } n = n || {}; Y(m, { isBrowser: E, toUrl: function(b) { var d, f = b.lastIndexOf("."), g = b.split("/")[0]; -1 !== f && ("." !== g && ".." !== g || 1 < f) && (d = b.substring(f, b.length), b = b.substring(0, f)); return l.nameToUrl(c(b, a && a.id, !0), d, !0) }, defined: function(b) { return x(v, q(b, a, !1, !0).id) }, specified: function(b) { b = q(b, a, !1, !0).id; return x(v, b) || x(t, b) } }); a || (m.undef = function(b) { B(); var c = q(b, a, !0) , f = e(t, b); f.undefed = !0; d(b); delete v[b]; delete W[c.url]; delete ea[b]; X(G, function(a, c) { a[0] === b && G.splice(c, 1) }); delete l.defQueueMap[b]; f && (f.events.defined && (ea[b] = f.events), C(b)) } ); return m }, enable: function(a) { e(t, a.id) && u(a).enable() }, completeLoad: function(a) { var b, c, d = e(p.shim, a) || {}, g = d.exports; for (B(); G.length; ) { c = G.shift(); if (null === c[0]) { c[0] = a; if (b) break; b = !0 } else c[0] === a && (b = !0); I(c) } l.defQueueMap = {}; c = e(t, a); if (!b && !x(v, a) && c && !c.inited) if (!p.enforceDefine || g && ia(g)) I([a, d.deps || [], d.exportsFn]); else return m(a) ? void 0 : A(F("nodefine", "No define call for " + a, null, [a]));<|fim▁hole|> nameToUrl: function(a, b, c) { var d, k, f, m; (d = e(p.pkgs, a)) && (a = d); if (d = e(fa, a)) return l.nameToUrl(d, b, c); if (g.jsExtRegExp.test(a)) d = a + (b || ""); else { d = p.paths; k = a.split("/"); for (f = k.length; 0 < f; --f) if (m = k.slice(0, f).join("/"), m = e(d, m)) { L(m) && (m = m[0]); k.splice(0, f, m); break } d = k.join("/"); d += b || (/^data\:|^blob\:|\?/.test(d) || c ? "" : ".js"); d = ("/" === d.charAt(0) || d.match(/^[\w\+\.\-]+:/) ? "" : p.baseUrl) + d } return p.urlArgs && !/^blob\:/.test(d) ? d + p.urlArgs(a, d) : d }, load: function(a, b) { g.load(l, a, b) }, execCb: function(a, b, c, d) { return b.apply(d, c) }, onScriptLoad: function(a) { if ("load" === a.type || oa.test((a.currentTarget || a.srcElement).readyState)) N = null, a = O(a), l.completeLoad(a.id) }, onScriptError: function(a) { var b = O(a); if (!m(b.id)) { var c = []; D(t, function(a, d) { 0 !== d.indexOf("_@r") && y(a.depMaps, function(a) { if (a.id === b.id) return c.push(d), !0 }) }); return A(F("scripterror", 'Script error for "' + b.id + (c.length ? '", needed by: ' + c.join(", ") : '"'), a, [b.id])) } } }; l.require = l.makeRequire(); return l } function pa() { if (N && "interactive" === N.readyState) return N; X(document.getElementsByTagName("script"), function(b) { if ("interactive" === b.readyState) return N = b }); return N } var g, B, C, H, O, I, N, P, u, T, qa = /(\/\*([\s\S]*?)\*\/|([^:]|^)\/\/(.*)$)/mg, ra = /[^.]\s*require\s*\(\s*["']([^'"\s]+)["']\s*\)/g, U = /\.js$/, na = /^\.\//; B = Object.prototype; var Q = B.toString , la = B.hasOwnProperty , E = !("undefined" === typeof window || "undefined" === typeof navigator || !window.document) , ja = !E && "undefined" !== typeof importScripts , oa = E && "PLAYSTATION 3" === navigator.platform ? /^complete$/ : /^(complete|loaded)$/ , ca = "undefined" !== typeof opera && "[object Opera]" === opera.toString() , J = {} , w = {} , V = [] , S = !1; if ("undefined" === typeof define) { if ("undefined" !== typeof requirejs) { if (K(requirejs)) return; w = requirejs; requirejs = void 0 } "undefined" === typeof require || K(require) || (w = require, require = void 0); g = requirejs = function(b, c, d, m) { var r, q = "_"; L(b) || "string" === typeof b || (r = b, L(c) ? (b = c, c = d, d = m) : b = []); r && r.context && (q = r.context); (m = e(J, q)) || (m = J[q] = g.s.newContext(q)); r && m.configure(r); return m.require(b, c, d) } ; g.config = function(b) { return g(b) } ; g.nextTick = "undefined" !== typeof setTimeout ? function(b) { setTimeout(b, 4) } : function(b) { b() } ; require || (require = g); g.version = "2.2.0"; g.jsExtRegExp = /^\/|:|\?|\.js$/; g.isBrowser = E; B = g.s = { contexts: J, newContext: ma }; g({}); y(["toUrl", "undef", "defined", "specified"], function(b) { g[b] = function() { var c = J._; return c.require[b].apply(c, arguments) } }); E && (C = B.head = document.getElementsByTagName("head")[0], H = document.getElementsByTagName("base")[0]) && (C = B.head = H.parentNode); g.onError = ha; g.createNode = function(b, c, d) { c = b.xhtml ? document.createElementNS("http://www.w3.org/1999/xhtml", "html:script") : document.createElement("script"); c.type = b.scriptType || "text/javascript"; c.charset = "utf-8"; c.async = !0; return c } ; g.load = function(b, c, d) { var m = b && b.config || {}, e; if (E) { e = g.createNode(m, c, d); e.setAttribute("data-requirecontext", b.contextName); e.setAttribute("data-requiremodule", c); !e.attachEvent || e.attachEvent.toString && 0 > e.attachEvent.toString().indexOf("[native code") || ca ? (e.addEventListener("load", b.onScriptLoad, !1), e.addEventListener("error", b.onScriptError, !1)) : (S = !0, e.attachEvent("onreadystatechange", b.onScriptLoad)); e.src = d; if (m.onNodeCreated) m.onNodeCreated(e, m, c, d); P = e; H ? C.insertBefore(e, H) : C.appendChild(e); P = null; return e } if (ja) try { setTimeout(function() {}, 0), importScripts(d), b.completeLoad(c) } catch (q) { b.onError(F("importscripts", "importScripts failed for " + c + " at " + d, q, [c])) } } ; E && !w.skipDataMain && X(document.getElementsByTagName("script"), function(b) { C || (C = b.parentNode); if (O = b.getAttribute("data-main")) return u = O, w.baseUrl || -1 !== u.indexOf("!") || (I = u.split("/"), u = I.pop(), T = I.length ? I.join("/") + "/" : "./", w.baseUrl = T), u = u.replace(U, ""), g.jsExtRegExp.test(u) && (u = O), w.deps = w.deps ? w.deps.concat(u) : [u], !0 }); define = function(b, c, d) { var e, g; "string" !== typeof b && (d = c, c = b, b = null); L(c) || (d = c, c = null); !c && K(d) && (c = [], d.length && (d.toString().replace(qa, ka).replace(ra, function(b, d) { c.push(d) }), c = (1 === d.length ? ["require"] : ["require", "exports", "module"]).concat(c))); S && (e = P || pa()) && (b || (b = e.getAttribute("data-requiremodule")), g = J[e.getAttribute("data-requirecontext")]); g ? (g.defQueue.push([b, c, d]), g.defQueueMap[b] = !0) : V.push([b, c, d]) } ; define.amd = { jQuery: !0 }; g.exec = function(b) { return eval(b) } ; g(w) } } )(this);<|fim▁end|>
H() },
<|file_name|>file.go<|end_file_name|><|fim▁begin|>// Mgmt // Copyright (C) 2013-2016+ James Shubin and the project contributors // Written by James Shubin <[email protected]> and the project contributors // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. package resources import ( "bytes" "crypto/sha256" "encoding/gob" "encoding/hex" "fmt" "io" "io/ioutil" "log" "os" "path" "path/filepath" "strings" "time" "github.com/purpleidea/mgmt/event" "github.com/purpleidea/mgmt/global" // XXX: package mgmtmain instead? "github.com/purpleidea/mgmt/recwatch" "github.com/purpleidea/mgmt/util" errwrap "github.com/pkg/errors" ) func init() { gob.Register(&FileRes{}) } // FileRes is a file and directory resource. type FileRes struct { BaseRes `yaml:",inline"` Path string `yaml:"path"` // path variable (should default to name) Dirname string `yaml:"dirname"` Basename string `yaml:"basename"` Content *string `yaml:"content"` // nil to mark as undefined Source string `yaml:"source"` // file path for source content State string `yaml:"state"` // state: exists/present?, absent, (undefined?) Recurse bool `yaml:"recurse"` Force bool `yaml:"force"` path string // computed path isDir bool // computed isDir sha256sum string recWatcher *recwatch.RecWatcher } // NewFileRes is a constructor for this resource. It also calls Init() for you. func NewFileRes(name, path, dirname, basename string, content *string, source, state string, recurse, force bool) (*FileRes, error) { obj := &FileRes{ BaseRes: BaseRes{ Name: name, }, Path: path, Dirname: dirname, Basename: basename, Content: content, Source: source, State: state, Recurse: recurse, Force: force, } return obj, obj.Init() } // Init runs some startup code for this resource. func (obj *FileRes) Init() error { obj.sha256sum = "" if obj.Path == "" { // use the name as the path default if missing obj.Path = obj.BaseRes.Name } obj.path = obj.GetPath() // compute once obj.isDir = strings.HasSuffix(obj.path, "/") // dirs have trailing slashes obj.BaseRes.kind = "File" return obj.BaseRes.Init() // call base init, b/c we're overriding } // GetPath returns the actual path to use for this resource. It computes this // after analysis of the Path, Dirname and Basename values. Dirs end with slash. func (obj *FileRes) GetPath() string { d := util.Dirname(obj.Path) b := util.Basename(obj.Path) if obj.Dirname == "" && obj.Basename == "" { return obj.Path } if obj.Dirname == "" { return d + obj.Basename } if obj.Basename == "" { return obj.Dirname + b } // if obj.dirname != "" && obj.basename != "" return obj.Dirname + obj.Basename } // Validate reports any problems with the struct definition. func (obj *FileRes) Validate() error { if obj.Dirname != "" && !strings.HasSuffix(obj.Dirname, "/") { return fmt.Errorf("Dirname must end with a slash.") } if strings.HasPrefix(obj.Basename, "/") { return fmt.Errorf("Basename must not start with a slash.") } if obj.Content != nil && obj.Source != "" { return fmt.Errorf("Can't specify both Content and Source.") } if obj.isDir && obj.Content != nil { // makes no sense return fmt.Errorf("Can't specify Content when creating a Dir.") } // XXX: should this specify that we create an empty directory instead? //if obj.Source == "" && obj.isDir { // return fmt.Errorf("Can't specify an empty source when creating a Dir.") //} return nil } // Watch is the primary listener for this resource and it outputs events. // This one is a file watcher for files and directories. // Modify with caution, it is probably important to write some test cases first! // If the Watch returns an error, it means that something has gone wrong, and it // must be restarted. On a clean exit it returns nil. // FIXME: Also watch the source directory when using obj.Source !!! func (obj *FileRes) Watch(processChan chan event.Event) error { if obj.IsWatching() { return nil // TODO: should this be an error? } obj.SetWatching(true) defer obj.SetWatching(false) cuid := obj.converger.Register() defer cuid.Unregister() var startup bool Startup := func(block bool) <-chan time.Time { if block { return nil // blocks forever //return make(chan time.Time) // blocks forever } return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout } var err error obj.recWatcher, err = recwatch.NewRecWatcher(obj.Path, obj.Recurse) if err != nil { return err } defer obj.recWatcher.Close() var send = false // send event? var exit = false for { if global.DEBUG { log.Printf("%s[%s]: Watching: %s", obj.Kind(), obj.GetName(), obj.Path) // attempting to watch... } obj.SetState(ResStateWatching) // reset select { case event, ok := <-obj.recWatcher.Events(): if !ok { // channel shutdown return nil } cuid.SetConverged(false) if err := event.Error; err != nil { return errwrap.Wrapf(err, "Unknown %s[%s] watcher error", obj.Kind(), obj.GetName()) } if global.DEBUG { // don't access event.Body if event.Error isn't nil log.Printf("%s[%s]: Event(%s): %v", obj.Kind(), obj.GetName(), event.Body.Name, event.Body.Op) } send = true obj.StateOK(false) // dirty case event := <-obj.Events(): cuid.SetConverged(false) if exit, send = obj.ReadEvent(&event); exit { return nil // exit } //obj.StateOK(false) // dirty // these events don't invalidate state case <-cuid.ConvergedTimer(): cuid.SetConverged(true) // converged! continue case <-Startup(startup): cuid.SetConverged(false) send = true obj.StateOK(false) // dirty } // do all our event sending all together to avoid duplicate msgs if send { startup = true // startup finished send = false if exit, err := obj.DoSend(processChan, ""); exit || err != nil { return err // we exit or bubble up a NACK... } } } } // smartPath adds a trailing slash to the path if it is a directory. func smartPath(fileInfo os.FileInfo) string { smartPath := fileInfo.Name() // absolute path if fileInfo.IsDir() { smartPath += "/" // add a trailing slash for dirs } return smartPath } // FileInfo is an enhanced variant of the traditional os.FileInfo struct. It can // store both the absolute and the relative paths (when built from our ReadDir), // and those two paths contain a trailing slash when they refer to a directory. type FileInfo struct { os.FileInfo // embed AbsPath string // smart variant RelPath string // smart variant } // ReadDir reads a directory path, and returns a list of enhanced FileInfo's. func ReadDir(path string) ([]FileInfo, error) { if !strings.HasSuffix(path, "/") { // dirs have trailing slashes return nil, fmt.Errorf("Path must be a directory.") } output := []FileInfo{} // my file info fileInfos, err := ioutil.ReadDir(path) if os.IsNotExist(err) { return output, err // return empty list } if err != nil { return nil, err } for _, fi := range fileInfos { abs := path + smartPath(fi) rel, err := filepath.Rel(path, abs) // NOTE: calls Clean() if err != nil { // shouldn't happen return nil, errwrap.Wrapf(err, "ReadDir: Unhandled error") } if fi.IsDir() { rel += "/" // add a trailing slash for dirs } x := FileInfo{ FileInfo: fi, AbsPath: abs, RelPath: rel, } output = append(output, x) } return output, nil } // smartMapPaths adds a trailing slash to every path that is a directory. It // returns the data as a map where the keys are the smart paths and where the // values are the original os.FileInfo entries. func mapPaths(fileInfos []FileInfo) map[string]FileInfo { paths := make(map[string]FileInfo) for _, fileInfo := range fileInfos { paths[fileInfo.RelPath] = fileInfo } return paths } // fileCheckApply is the CheckApply operation for a source and destination file. // It can accept an io.Reader as the source, which can be a regular file, or it // can be a bytes Buffer struct. It can take an input sha256 hash to use instead // of computing the source data hash, and it returns the computed value if this // function reaches that stage. As usual, it respects the apply action variable, // and it symmetry with the main CheckApply function returns checkOK and error. func (obj *FileRes) fileCheckApply(apply bool, src io.ReadSeeker, dst string, sha256sum string) (string, bool, error) { // TODO: does it make sense to switch dst to an io.Writer ? // TODO: use obj.Force when dealing with symlinks and other file types! if global.DEBUG { log.Printf("fileCheckApply: %s -> %s", src, dst) } srcFile, isFile := src.(*os.File) _, isBytes := src.(*bytes.Reader) // supports seeking! if !isFile && !isBytes { return "", false, fmt.Errorf("Can't open src as either file or buffer!") } var srcStat os.FileInfo if isFile { var err error srcStat, err = srcFile.Stat() if err != nil { return "", false, err } // TODO: deal with symlinks if !srcStat.Mode().IsRegular() { // can't copy non-regular files or dirs return "", false, fmt.Errorf("Non-regular src file: %s (%q)", srcStat.Name(), srcStat.Mode()) } } dstFile, err := os.Open(dst) if err != nil && !os.IsNotExist(err) { // ignore ErrNotExist errors return "", false, err } dstClose := func() error { return dstFile.Close() // calling this twice is safe :) } defer dstClose() dstExists := !os.IsNotExist(err) dstStat, err := dstFile.Stat() if err != nil && dstExists { return "", false, err } if dstExists && dstStat.IsDir() { // oops, dst is a dir, and we want a file... if !apply { return "", false, nil } if !obj.Force { return "", false, fmt.Errorf("Can't force dir into file: %s", dst) } cleanDst := path.Clean(dst) if cleanDst == "" || cleanDst == "/" { return "", false, fmt.Errorf("Don't want to remove root!") // safety } // FIXME: respect obj.Recurse here... // there is a dir here, where we want a file... log.Printf("fileCheckApply: Removing (force): %s", cleanDst) if err := os.RemoveAll(cleanDst); err != nil { // dangerous ;) return "", false, err } dstExists = false // now it's gone! } else if err == nil { if !dstStat.Mode().IsRegular() { return "", false, fmt.Errorf("Non-regular dst file: %s (%q)", dstStat.Name(), dstStat.Mode()) } if isFile && os.SameFile(srcStat, dstStat) { // same inode, we're done! return "", true, nil } } if dstExists { // if dst doesn't exist, no need to compare hashes // hash comparison (efficient because we can cache hash of content str) if sha256sum == "" { // cache is invalid hash := sha256.New() // TODO: file existence test? if _, err := io.Copy(hash, src); err != nil { return "", false, err } sha256sum = hex.EncodeToString(hash.Sum(nil)) // since we re-use this src handler below, it is // *critical* to seek to 0, or we'll copy nothing! if n, err := src.Seek(0, 0); err != nil || n != 0 { return sha256sum, false, err } } // dst hash hash := sha256.New() if _, err := io.Copy(hash, dstFile); err != nil { return "", false, err } if h := hex.EncodeToString(hash.Sum(nil)); h == sha256sum { return sha256sum, true, nil // same! } } // state is not okay, no work done, exit, but without error if !apply { return sha256sum, false, nil } if global.DEBUG { log.Printf("fileCheckApply: Apply: %s -> %s", src, dst) } dstClose() // unlock file usage so we can write to it dstFile, err = os.Create(dst) if err != nil { return sha256sum, false, err } defer dstFile.Close() // TODO: is this redundant because of the earlier defered Close() ? if isFile { // set mode because it's a new file if err := dstFile.Chmod(srcStat.Mode()); err != nil { return sha256sum, false, err } } // TODO: attempt to reflink with Splice() and int(file.Fd()) as input... // syscall.Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) // TODO: should we offer a way to cancel the copy on ^C ? if global.DEBUG { log.Printf("fileCheckApply: Copy: %s -> %s", src, dst) } if n, err := io.Copy(dstFile, src); err != nil { return sha256sum, false, err } else if global.DEBUG { log.Printf("fileCheckApply: Copied: %v", n) } return sha256sum, false, dstFile.Sync() } // syncCheckApply is the CheckApply operation for a source and destination dir. // It is recursive and can create directories directly, and files via the usual // fileCheckApply method. It returns checkOK and error as is normally expected. func (obj *FileRes) syncCheckApply(apply bool, src, dst string) (bool, error) { if global.DEBUG { log.Printf("syncCheckApply: %s -> %s", src, dst) } if src == "" || dst == "" { return false, fmt.Errorf("The src and dst must not be empty!") } var checkOK = true // TODO: handle ./ cases or ../ cases that need cleaning ? srcIsDir := strings.HasSuffix(src, "/") dstIsDir := strings.HasSuffix(dst, "/") if srcIsDir != dstIsDir { return false, fmt.Errorf("The src and dst must be both either files or directories.") } if !srcIsDir && !dstIsDir { if global.DEBUG { log.Printf("syncCheckApply: %s -> %s", src, dst) } fin, err := os.Open(src) if err != nil { if global.DEBUG && os.IsNotExist(err) { // if we get passed an empty src log.Printf("syncCheckApply: Missing src: %s", src) } return false, err } _, checkOK, err := obj.fileCheckApply(apply, fin, dst, "") if err != nil { fin.Close() return false, err } return checkOK, fin.Close() } // else: if srcIsDir && dstIsDir srcFiles, err := ReadDir(src) // if src does not exist... if err != nil && !os.IsNotExist(err) { // an empty map comes out below! return false, err } dstFiles, err := ReadDir(dst) if err != nil && !os.IsNotExist(err) { return false, err } //log.Printf("syncCheckApply: srcFiles: %v", srcFiles) //log.Printf("syncCheckApply: dstFiles: %v", dstFiles) smartSrc := mapPaths(srcFiles) smartDst := mapPaths(dstFiles) for relPath, fileInfo := range smartSrc { absSrc := fileInfo.AbsPath // absolute path absDst := dst + relPath // absolute dest if _, exists := smartDst[relPath]; !exists { if fileInfo.IsDir() { if !apply { // only checking and not identical! return false, nil } // file exists, but we want a dir: we need force // we check for the file w/o the smart dir slash relPathFile := strings.TrimSuffix(relPath, "/") if _, ok := smartDst[relPathFile]; ok { absCleanDst := path.Clean(absDst) if !obj.Force { return false, fmt.Errorf("Can't force file into dir: %s", absCleanDst) } if absCleanDst == "" || absCleanDst == "/" { return false, fmt.Errorf("Don't want to remove root!") // safety } log.Printf("syncCheckApply: Removing (force): %s", absCleanDst) if err := os.Remove(absCleanDst); err != nil { return false, err } delete(smartDst, relPathFile) // rm from purge list } if global.DEBUG { log.Printf("syncCheckApply: mkdir -m %s '%s'", fileInfo.Mode(), absDst) } if err := os.Mkdir(absDst, fileInfo.Mode()); err != nil { return false, err } checkOK = false // we did some work } // if we're a regular file, the recurse will create it } if global.DEBUG { log.Printf("syncCheckApply: Recurse: %s -> %s", absSrc, absDst) } if obj.Recurse { if c, err := obj.syncCheckApply(apply, absSrc, absDst); err != nil { // recurse return false, errwrap.Wrapf(err, "syncCheckApply: Recurse failed") } else if !c { // don't let subsequent passes make this true checkOK = false } } if !apply && !checkOK { // check failed, and no apply to do, so exit! return false, nil } delete(smartDst, relPath) // rm from purge list } if !apply && len(smartDst) > 0 { // we know there are files to remove! return false, nil // so just exit now } // any files that now remain in smartDst need to be removed... for relPath, fileInfo := range smartDst { absSrc := src + relPath // absolute dest (should not exist!) absDst := fileInfo.AbsPath // absolute path (should get removed) absCleanDst := path.Clean(absDst) if absCleanDst == "" || absCleanDst == "/" { return false, fmt.Errorf("Don't want to remove root!") // safety } // FIXME: respect obj.Recurse here... // NOTE: we could use os.RemoveAll instead of recursing, but I // think the symmetry is more elegant and correct here for now // Avoiding this is also useful if we had a recurse limit arg! if true { // switch log.Printf("syncCheckApply: Removing: %s", absCleanDst) if apply { if err := os.RemoveAll(absCleanDst); err != nil { // dangerous ;) return false, err } checkOK = false } continue } _ = absSrc //log.Printf("syncCheckApply: Recurse rm: %s -> %s", absSrc, absDst) //if c, err := obj.syncCheckApply(apply, absSrc, absDst); err != nil { // return false, errwrap.Wrapf(err, "syncCheckApply: Recurse rm failed") //} else if !c { // don't let subsequent passes make this true // checkOK = false //} //log.Printf("syncCheckApply: Removing: %s", absCleanDst) //if apply { // safety // if err := os.Remove(absCleanDst); err != nil { // return false, err // } // checkOK = false //} } return checkOK, nil } // contentCheckApply performs a CheckApply for the file existence and content. func (obj *FileRes) contentCheckApply(apply bool) (checkOK bool, _ error) { log.Printf("%s[%s]: contentCheckApply(%t)", obj.Kind(), obj.GetName(), apply) if obj.State == "absent" { if _, err := os.Stat(obj.path); os.IsNotExist(err) { // no such file or directory, but // file should be missing, phew :) return true, nil } else if err != nil { // what could this error be? return false, err } // state is not okay, no work done, exit, but without error if !apply { return false, nil } // apply portion if obj.path == "" || obj.path == "/" { return false, fmt.Errorf("Don't want to remove root!") // safety } log.Printf("contentCheckApply: Removing: %s", obj.path) // FIXME: respect obj.Recurse here... // TODO: add recurse limit here err := os.RemoveAll(obj.path) // dangerous ;) return false, err // either nil or not } // content is not defined, leave it alone... if obj.Content == nil { return true, nil } if obj.Source == "" { // do the obj.Content checks first... if obj.isDir { // TODO: should we create an empty dir this way? log.Fatal("XXX: Not implemented!") // XXX } bufferSrc := bytes.NewReader([]byte(*obj.Content)) sha256sum, checkOK, err := obj.fileCheckApply(apply, bufferSrc, obj.path, obj.sha256sum) if sha256sum != "" { // empty values mean errored or didn't hash // this can be valid even when the whole function errors obj.sha256sum = sha256sum // cache value } if err != nil { return false, err } // if no err, but !ok, then... return checkOK, nil // success } checkOK, err := obj.syncCheckApply(apply, obj.Source, obj.path) if err != nil { log.Printf("syncCheckApply: Error: %v", err) return false, err } return checkOK, nil } // CheckApply checks the resource state and applies the resource if the bool // input is true. It returns error info and if the state check passed or not. func (obj *FileRes) CheckApply(apply bool) (checkOK bool, _ error) { checkOK = true if c, err := obj.contentCheckApply(apply); err != nil { return false, err } else if !c { checkOK = false } // TODO //if c, err := obj.chmodCheckApply(apply); err != nil { // return false, err //} else if !c { // checkOK = false //} // TODO //if c, err := obj.chownCheckApply(apply); err != nil { // return false, err //} else if !c { // checkOK = false //} return checkOK, nil // w00t } // FileUID is the UID struct for FileRes. type FileUID struct { BaseUID path string } // IFF aka if and only if they are equivalent, return true. If not, false. func (obj *FileUID) IFF(uid ResUID) bool { res, ok := uid.(*FileUID) if !ok { return false } return obj.path == res.path } // FileResAutoEdges holds the state of the auto edge generator. type FileResAutoEdges struct { data []ResUID pointer int found bool } // Next returns the next automatic edge. func (obj *FileResAutoEdges) Next() []ResUID {<|fim▁hole|> log.Fatal("Shouldn't be called anymore!") } if len(obj.data) == 0 { // check length for rare scenarios return nil } value := obj.data[obj.pointer] obj.pointer++ return []ResUID{value} // we return one, even though api supports N } // Test gets results of the earlier Next() call, & returns if we should continue! func (obj *FileResAutoEdges) Test(input []bool) bool { // if there aren't any more remaining if len(obj.data) <= obj.pointer { return false } if obj.found { // already found, done! return false } if len(input) != 1 { // in case we get given bad data log.Fatal("Expecting a single value!") } if input[0] { // if a match is found, we're done! obj.found = true // no more to find! return false } return true // keep going } // AutoEdges generates a simple linear sequence of each parent directory from // the bottom up! func (obj *FileRes) AutoEdges() AutoEdge { var data []ResUID // store linear result chain here... values := util.PathSplitFullReversed(obj.path) // build it _, values = values[0], values[1:] // get rid of first value which is me! for _, x := range values { var reversed = true // cheat by passing a pointer data = append(data, &FileUID{ BaseUID: BaseUID{ name: obj.GetName(), kind: obj.Kind(), reversed: &reversed, }, path: x, // what matters }) // build list } return &FileResAutoEdges{ data: data, pointer: 0, found: false, } } // GetUIDs includes all params to make a unique identification of this object. // Most resources only return one, although some resources can return multiple. func (obj *FileRes) GetUIDs() []ResUID { x := &FileUID{ BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()}, path: obj.path, } return []ResUID{x} } // GroupCmp returns whether two resources can be grouped together or not. func (obj *FileRes) GroupCmp(r Res) bool { _, ok := r.(*FileRes) if !ok { return false } // TODO: we might be able to group directory children into a single // recursive watcher in the future, thus saving fanotify watches return false // not possible atm } // Compare two resources and return if they are equivalent. func (obj *FileRes) Compare(res Res) bool { switch res.(type) { case *FileRes: res := res.(*FileRes) if !obj.BaseRes.Compare(res) { // call base Compare return false } if obj.Name != res.Name { return false } if obj.path != res.Path { return false } if (obj.Content == nil) != (res.Content == nil) { // xor return false } if obj.Content != nil && res.Content != nil { if *obj.Content != *res.Content { // compare the strings return false } } if obj.Source != res.Source { return false } if obj.State != res.State { return false } if obj.Recurse != res.Recurse { return false } if obj.Force != res.Force { return false } default: return false } return true } // CollectPattern applies the pattern for collection resources. func (obj *FileRes) CollectPattern(pattern string) { // XXX: currently the pattern for files can only override the Dirname variable :P obj.Dirname = pattern // XXX: simplistic for now }<|fim▁end|>
if obj.found {
<|file_name|>0009_auto_20160516_0649.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-05-16 06:49 from __future__ import unicode_literals from django.db import migrations, models<|fim▁hole|> dependencies = [ ('voximplant', '0008_auto_20160514_0800'), ] operations = [ migrations.RemoveField( model_name='calllist', name='completed', ), migrations.AddField( model_name='calllist', name='downloaded', field=models.DateTimeField(blank=True, help_text='Last datetime of checking state from VoxImplant', null=True), ), migrations.AlterField( model_name='calllistphone', name='completed', field=models.DateTimeField(blank=True, null=True), ), ]<|fim▁end|>
class Migration(migrations.Migration):
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import url from . import views<|fim▁hole|> url(r'^$', views.IndexView.as_view(), name='index'), url(r'^logout/$', views.logout_view, name='logout'), ]<|fim▁end|>
from django.views.decorators.cache import cache_page app_name = 'webinter' urlpatterns = [
<|file_name|>modulemanager.js<|end_file_name|><|fim▁begin|>// Copyright 2008 The Closure Library Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @fileoverview A singleton object for managing Javascript code modules. * */ goog.provide('goog.module.ModuleManager'); goog.provide('goog.module.ModuleManager.CallbackType'); goog.provide('goog.module.ModuleManager.FailureType'); goog.require('goog.Disposable'); goog.require('goog.array'); goog.require('goog.asserts'); goog.require('goog.async.Deferred'); goog.require('goog.debug.Trace'); /** @suppress {extraRequire} */ goog.require('goog.dispose'); goog.require('goog.log'); /** @suppress {extraRequire} */ goog.require('goog.module'); /** @suppress {extraRequire} interface */ goog.require('goog.module.AbstractModuleLoader'); goog.require('goog.module.ModuleInfo'); goog.require('goog.module.ModuleLoadCallback'); goog.require('goog.object'); /** * The ModuleManager keeps track of all modules in the environment. * Since modules may not have their code loaded, we must keep track of them. * @constructor * @extends {goog.Disposable} * @struct */ goog.module.ModuleManager = function() { goog.module.ModuleManager.base(this, 'constructor'); /** * A mapping from module id to ModuleInfo object. * @private {Object<string, !goog.module.ModuleInfo>} */ this.moduleInfoMap_ = {}; // TODO (malteubl): Switch this to a reentrant design. /** * The ids of the currently loading modules. If batch mode is disabled, then * this array will never contain more than one element at a time. * @type {Array<string>} * @private */ this.loadingModuleIds_ = []; /** * The requested ids of the currently loading modules. This does not include * module dependencies that may also be loading. * @type {Array<string>} * @private */ this.requestedLoadingModuleIds_ = []; // TODO(user): Make these and other arrays that are used as sets be // actual sets. /** * All module ids that have ever been requested. In concurrent loading these * are the ones to subtract from future requests. * @type {!Array<string>} * @private */ this.requestedModuleIds_ = []; /** * A queue of the ids of requested but not-yet-loaded modules. The zero * position is the front of the queue. This is a 2-D array to group modules * together with other modules that should be batch loaded with them, if * batch loading is enabled. * @type {Array<Array<string>>} * @private */ this.requestedModuleIdsQueue_ = []; /** * The ids of the currently loading modules which have been initiated by user * actions. * @type {Array<string>} * @private */ this.userInitiatedLoadingModuleIds_ = []; /** * A map of callback types to the functions to call for the specified * callback type. * @type {Object<goog.module.ModuleManager.CallbackType, Array<Function>>} * @private */ this.callbackMap_ = {}; /** * Module info for the base module (the one that contains the module * manager code), which we set as the loading module so one can * register initialization callbacks in the base module. * * The base module is considered loaded when #setAllModuleInfo is called or * #setModuleContext is called, whichever comes first. * * @type {goog.module.ModuleInfo} * @private */ this.baseModuleInfo_ = new goog.module.ModuleInfo([], ''); /** * The module that is currently loading, or null if not loading anything. * @type {goog.module.ModuleInfo} * @private */ this.currentlyLoadingModule_ = this.baseModuleInfo_; /** * The id of the last requested initial module. When it loaded * the deferred in {@code this.initialModulesLoaded_} resolves. * @private {?string} */ this.lastInitialModuleId_ = null; /** * Deferred for when all initial modules have loaded. We currently block * sending additional module requests until this deferred resolves. In a * future optimization it may be possible to use the initial modules as * seeds for the module loader "requested module ids" and start making new * requests even sooner. * @private {!goog.async.Deferred} */ this.initialModulesLoaded_ = new goog.async.Deferred(); /** * A logger. * @private {goog.log.Logger} */ this.logger_ = goog.log.getLogger('goog.module.ModuleManager'); /** * Whether the batch mode (i.e. the loading of multiple modules with just one * request) has been enabled. * @private {boolean} */ this.batchModeEnabled_ = false; /** * Whether the module requests may be sent out of order. * @private {boolean} */ this.concurrentLoadingEnabled_ = false; /** * A loader for the modules that implements loadModules(ids, moduleInfoMap, * opt_successFn, opt_errorFn, opt_timeoutFn, opt_forceReload) method. * @private {goog.module.AbstractModuleLoader} */ this.loader_ = null; // TODO(user): Remove tracer. /** * Tracer that measures how long it takes to load a module. * @private {?number} */ this.loadTracer_ = null; /** * The number of consecutive failures that have happened upon module load * requests. * @private {number} */ this.consecutiveFailures_ = 0; /** * Determines if the module manager was just active before the processing of * the last data. * @private {boolean} */ this.lastActive_ = false; /** * Determines if the module manager was just user active before the processing * of the last data. The module manager is user active if any of the * user-initiated modules are loading or queued up to load. * @private {boolean} */ this.userLastActive_ = false; /** * The module context needed for module initialization. * @private {Object} */ this.moduleContext_ = null; }; goog.inherits(goog.module.ModuleManager, goog.Disposable); goog.addSingletonGetter(goog.module.ModuleManager); /** * The type of callbacks that can be registered with the module manager,. * @enum {string} */ goog.module.ModuleManager.CallbackType = { /** * Fired when an error has occurred. */ ERROR: 'error', /** * Fired when it becomes idle and has no more module loads to process. */ IDLE: 'idle', /** * Fired when it becomes active and has module loads to process. */ ACTIVE: 'active', /** * Fired when it becomes idle and has no more user-initiated module loads to * process. */ USER_IDLE: 'userIdle', /** * Fired when it becomes active and has user-initiated module loads to * process. */ USER_ACTIVE: 'userActive' }; /** * A non-HTTP status code indicating a corruption in loaded module. * This should be used by a ModuleLoader as a replacement for the HTTP code * given to the error handler function to indicated that the module was * corrupted. * This will set the forceReload flag on the loadModules method when retrying * module loading. * @type {number} */ goog.module.ModuleManager.CORRUPT_RESPONSE_STATUS_CODE = 8001; /** * Sets the batch mode as enabled or disabled for the module manager. * @param {boolean} enabled Whether the batch mode is to be enabled or not. */ goog.module.ModuleManager.prototype.setBatchModeEnabled = function(enabled) { this.batchModeEnabled_ = enabled; }; /** * Sets the concurrent loading mode as enabled or disabled for the module * manager. Requires a moduleloader implementation that supports concurrent * loads. The default {@see goog.module.ModuleLoader} does not. * @param {boolean} enabled */ goog.module.ModuleManager.prototype.setConcurrentLoadingEnabled = function( enabled) { this.concurrentLoadingEnabled_ = enabled; }; /** * Sets the module info for all modules. Should only be called once. * * @param {Object<Array<string>>} infoMap An object that contains a mapping * from module id (String) to list of required module ids (Array). */ goog.module.ModuleManager.prototype.setAllModuleInfo = function(infoMap) { for (var id in infoMap) { this.moduleInfoMap_[id] = new goog.module.ModuleInfo(infoMap[id], id); } if (!this.initialModulesLoaded_.hasFired()) { this.initialModulesLoaded_.callback(); } this.maybeFinishBaseLoad_(); }; /** * Sets the module info for all modules. Should only be called once. Also * marks modules that are currently being loaded. * * @param {string=} opt_info A string representation of the module dependency * graph, in the form: module1:dep1,dep2/module2:dep1,dep2 etc. * Where depX is the base-36 encoded position of the dep in the module list. * @param {Array<string>=} opt_loadingModuleIds A list of moduleIds that * are currently being loaded. */ goog.module.ModuleManager.prototype.setAllModuleInfoString = function( opt_info, opt_loadingModuleIds) { if (!goog.isString(opt_info)) { // The call to this method is generated in two steps, the argument is added // after some of the compilation passes. This means that the initial code // doesn't have any arguments and causes compiler errors. We make it // optional to satisfy this constraint. return; } var modules = opt_info.split('/'); var moduleIds = []; // Split the string into the infoMap of id->deps for (var i = 0; i < modules.length; i++) { var parts = modules[i].split(':'); var id = parts[0]; var deps; if (parts[1]) { deps = parts[1].split(','); for (var j = 0; j < deps.length; j++) { var index = parseInt(deps[j], 36); goog.asserts.assert( moduleIds[index], 'No module @ %s, dep of %s @ %s', index, id, i); deps[j] = moduleIds[index]; } } else { deps = []; } moduleIds.push(id); this.moduleInfoMap_[id] = new goog.module.ModuleInfo(deps, id); } if (opt_loadingModuleIds && opt_loadingModuleIds.length) { goog.array.extend(this.loadingModuleIds_, opt_loadingModuleIds); // The last module in the list of initial modules. When it has loaded all // initial modules have loaded. this.lastInitialModuleId_ = /** @type {?string} */ (goog.array.peek(opt_loadingModuleIds)); } else { if (!this.initialModulesLoaded_.hasFired()) { this.initialModulesLoaded_.callback(); } } this.maybeFinishBaseLoad_(); }; /** * Gets a module info object by id. * @param {string} id A module identifier. * @return {!goog.module.ModuleInfo} The module info. */ goog.module.ModuleManager.prototype.getModuleInfo = function(id) { return this.moduleInfoMap_[id]; }; /** * Sets the module uris. * * @param {Object} moduleUriMap The map of id/uris pairs for each module. */ goog.module.ModuleManager.prototype.setModuleUris = function(moduleUriMap) { for (var id in moduleUriMap) { this.moduleInfoMap_[id].setUris(moduleUriMap[id]); } }; /** * Gets the application-specific module loader. * @return {goog.module.AbstractModuleLoader} An object that has a * loadModules(ids, moduleInfoMap, opt_successFn, opt_errFn, * opt_timeoutFn, opt_forceReload) method. */ goog.module.ModuleManager.prototype.getLoader = function() { return this.loader_; }; /** * Sets the application-specific module loader. * @param {goog.module.AbstractModuleLoader} loader An object that has a * loadModules(ids, moduleInfoMap, opt_successFn, opt_errFn, * opt_timeoutFn, opt_forceReload) method. */ goog.module.ModuleManager.prototype.setLoader = function(loader) { this.loader_ = loader; }; /** * Gets the module context to use to initialize the module. * @return {Object} The context. */ goog.module.ModuleManager.prototype.getModuleContext = function() { return this.moduleContext_; }; /** * Sets the module context to use to initialize the module. * @param {Object} context The context. */ goog.module.ModuleManager.prototype.setModuleContext = function(context) { this.moduleContext_ = context; this.maybeFinishBaseLoad_(); }; <|fim▁hole|> /** * Determines if the ModuleManager is active * @return {boolean} TRUE iff the ModuleManager is active (i.e., not idle). */ goog.module.ModuleManager.prototype.isActive = function() { return this.loadingModuleIds_.length > 0; }; /** * Determines if the ModuleManager is user active * @return {boolean} TRUE iff the ModuleManager is user active (i.e., not idle). */ goog.module.ModuleManager.prototype.isUserActive = function() { return this.userInitiatedLoadingModuleIds_.length > 0; }; /** * Dispatches an ACTIVE or IDLE event if necessary. * @private */ goog.module.ModuleManager.prototype.dispatchActiveIdleChangeIfNeeded_ = function() { var lastActive = this.lastActive_; var active = this.isActive(); if (active != lastActive) { this.executeCallbacks_( active ? goog.module.ModuleManager.CallbackType.ACTIVE : goog.module.ModuleManager.CallbackType.IDLE); // Flip the last active value. this.lastActive_ = active; } // Check if the module manager is user active i.e., there are user initiated // modules being loaded or queued up to be loaded. var userLastActive = this.userLastActive_; var userActive = this.isUserActive(); if (userActive != userLastActive) { this.executeCallbacks_( userActive ? goog.module.ModuleManager.CallbackType.USER_ACTIVE : goog.module.ModuleManager.CallbackType.USER_IDLE); // Flip the last user active value. this.userLastActive_ = userActive; } }; /** * Preloads a module after a short delay. * * @param {string} id The id of the module to preload. * @param {number=} opt_timeout The number of ms to wait before adding the * module id to the loading queue (defaults to 0 ms). Note that the module * will be loaded asynchronously regardless of the value of this parameter. * @return {!goog.async.Deferred} A deferred object. */ goog.module.ModuleManager.prototype.preloadModule = function(id, opt_timeout) { var d = new goog.async.Deferred(); window.setTimeout( goog.bind(this.addLoadModule_, this, id, d), opt_timeout || 0); return d; }; /** * Prefetches a JavaScript module and its dependencies, which means that the * module will be downloaded, but not evaluated. To complete the module load, * the caller should also call load or execOnLoad after prefetching the module. * * @param {string} id The id of the module to prefetch. */ goog.module.ModuleManager.prototype.prefetchModule = function(id) { var moduleInfo = this.getModuleInfo(id); if (moduleInfo.isLoaded() || this.isModuleLoading(id)) { throw new Error('Module load already requested: ' + id); } else if (this.batchModeEnabled_) { throw new Error('Modules prefetching is not supported in batch mode'); } else { var idWithDeps = this.getNotYetLoadedTransitiveDepIds_(id); for (var i = 0; i < idWithDeps.length; i++) { this.loader_.prefetchModule( idWithDeps[i], this.moduleInfoMap_[idWithDeps[i]]); } } }; /** * Loads a single module for use with a given deferred. * * @param {string} id The id of the module to load. * @param {goog.async.Deferred} d A deferred object. * @private */ goog.module.ModuleManager.prototype.addLoadModule_ = function(id, d) { var moduleInfo = this.getModuleInfo(id); if (moduleInfo.isLoaded()) { d.callback(this.moduleContext_); return; } this.registerModuleLoadCallbacks_(id, moduleInfo, false, d); if (!this.isModuleLoading(id)) { this.loadModulesOrEnqueue_([id]); } }; /** * Loads a list of modules or, if some other module is currently being loaded, * appends the ids to the queue of requested module ids. Registers callbacks a * module that is currently loading and returns a fired deferred for a module * that is already loaded. * * @param {Array<string>} ids The id of the module to load. * @param {boolean=} opt_userInitiated If the load is a result of a user action. * @return {!Object<string, !goog.async.Deferred>} A mapping from id (String) * to deferred objects that will callback or errback when the load for that * id is finished. * @private */ goog.module.ModuleManager.prototype.loadModulesOrEnqueueIfNotLoadedOrLoading_ = function(ids, opt_userInitiated) { var uniqueIds = []; goog.array.removeDuplicates(ids, uniqueIds); var idsToLoad = []; var deferredMap = {}; for (var i = 0; i < uniqueIds.length; i++) { var id = uniqueIds[i]; var moduleInfo = this.getModuleInfo(id); if (!moduleInfo) { throw new Error('Unknown module: ' + id); } var d = new goog.async.Deferred(); deferredMap[id] = d; if (moduleInfo.isLoaded()) { d.callback(this.moduleContext_); } else { this.registerModuleLoadCallbacks_(id, moduleInfo, !!opt_userInitiated, d); if (!this.isModuleLoading(id)) { idsToLoad.push(id); } } } // If there are ids to load, load them, otherwise, they are all loading or // loaded. if (idsToLoad.length > 0) { this.loadModulesOrEnqueue_(idsToLoad); } return deferredMap; }; /** * Registers the callbacks and handles logic if it is a user initiated module * load. * * @param {string} id The id of the module to possibly load. * @param {!goog.module.ModuleInfo} moduleInfo The module identifier for the * given id. * @param {boolean} userInitiated If the load was user initiated. * @param {goog.async.Deferred} d A deferred object. * @private */ goog.module.ModuleManager.prototype.registerModuleLoadCallbacks_ = function( id, moduleInfo, userInitiated, d) { moduleInfo.registerCallback(d.callback, d); moduleInfo.registerErrback(function(err) { d.errback(Error(err)); }); // If it's already loading, we don't have to do anything besides handle // if it was user initiated if (this.isModuleLoading(id)) { if (userInitiated) { goog.log.info( this.logger_, 'User initiated module already loading: ' + id); this.addUserInitiatedLoadingModule_(id); this.dispatchActiveIdleChangeIfNeeded_(); } } else { if (userInitiated) { goog.log.info(this.logger_, 'User initiated module load: ' + id); this.addUserInitiatedLoadingModule_(id); } else { goog.log.info(this.logger_, 'Initiating module load: ' + id); } } }; /** * Initiates loading of a list of modules or, if a module is currently being * loaded, appends the modules to the queue of requested module ids. * * The caller should verify that the requested modules are not already loaded or * loading. {@link #loadModulesOrEnqueueIfNotLoadedOrLoading_} is a more lenient * alternative to this method. * * @param {Array<string>} ids The ids of the modules to load. * @private */ goog.module.ModuleManager.prototype.loadModulesOrEnqueue_ = function(ids) { // With concurrent loading we always just send off the request. if (this.concurrentLoadingEnabled_) { // For now we wait for initial modules to have downloaded as this puts the // loader in a good state for calculating the needed deps of additional // loads. // TODO(user): Make this wait unnecessary. this.initialModulesLoaded_.addCallback( goog.bind(this.loadModules_, this, ids)); } else { if (goog.array.isEmpty(this.loadingModuleIds_)) { this.loadModules_(ids); } else { this.requestedModuleIdsQueue_.push(ids); this.dispatchActiveIdleChangeIfNeeded_(); } } }; /** * Gets the amount of delay to wait before sending a request for more modules. * If a certain module request fails, we backoff a little bit and try again. * @return {number} Delay, in ms. * @private */ goog.module.ModuleManager.prototype.getBackOff_ = function() { // 5 seconds after one error, 20 seconds after 2. return Math.pow(this.consecutiveFailures_, 2) * 5000; }; /** * Loads a list of modules and any of their not-yet-loaded prerequisites. * If batch mode is enabled, the prerequisites will be loaded together with the * requested modules and all requested modules will be loaded at the same time. * * The caller should verify that the requested modules are not already loaded * and that no modules are currently loading before calling this method. * * @param {Array<string>} ids The ids of the modules to load. * @param {boolean=} opt_isRetry If the load is a retry of a previous load * attempt. * @param {boolean=} opt_forceReload Whether to bypass cache while loading the * module. * @private */ goog.module.ModuleManager.prototype.loadModules_ = function( ids, opt_isRetry, opt_forceReload) { if (!opt_isRetry) { this.consecutiveFailures_ = 0; } // Not all modules may be loaded immediately if batch mode is not enabled. var idsToLoadImmediately = this.processModulesForLoad_(ids); goog.log.info(this.logger_, 'Loading module(s): ' + idsToLoadImmediately); this.loadingModuleIds_ = idsToLoadImmediately; if (this.batchModeEnabled_) { this.requestedLoadingModuleIds_ = ids; } else { // If batch mode is disabled, we treat each dependency load as a separate // load. this.requestedLoadingModuleIds_ = goog.array.clone(idsToLoadImmediately); } // Dispatch an active/idle change if needed. this.dispatchActiveIdleChangeIfNeeded_(); if (goog.array.isEmpty(idsToLoadImmediately)) { // All requested modules and deps have been either loaded already or have // already been requested. return; } this.requestedModuleIds_.push.apply( this.requestedModuleIds_, idsToLoadImmediately); var loadFn = goog.bind( this.loader_.loadModules, this.loader_, goog.array.clone(idsToLoadImmediately), this.moduleInfoMap_, null, goog.bind( this.handleLoadError_, this, this.requestedLoadingModuleIds_, idsToLoadImmediately), goog.bind(this.handleLoadTimeout_, this), !!opt_forceReload); var delay = this.getBackOff_(); if (delay) { window.setTimeout(loadFn, delay); } else { loadFn(); } }; /** * Processes a list of module ids for loading. Checks if any of the modules are * already loaded and then gets transitive deps. Queues any necessary modules * if batch mode is not enabled. Returns the list of ids that should be loaded. * * @param {Array<string>} ids The ids that need to be loaded. * @return {!Array<string>} The ids to load, including dependencies. * @throws {Error} If the module is already loaded. * @private */ goog.module.ModuleManager.prototype.processModulesForLoad_ = function(ids) { for (var i = 0; i < ids.length; i++) { var moduleInfo = this.moduleInfoMap_[ids[i]]; if (moduleInfo.isLoaded()) { throw new Error('Module already loaded: ' + ids[i]); } } // Build a list of the ids of this module and any of its not-yet-loaded // prerequisite modules in dependency order. var idsWithDeps = []; for (var i = 0; i < ids.length; i++) { idsWithDeps = idsWithDeps.concat(this.getNotYetLoadedTransitiveDepIds_(ids[i])); } goog.array.removeDuplicates(idsWithDeps); if (!this.batchModeEnabled_ && idsWithDeps.length > 1) { var idToLoad = idsWithDeps.shift(); goog.log.info( this.logger_, 'Must load ' + idToLoad + ' module before ' + ids); // Insert the requested module id and any other not-yet-loaded prereqs // that it has at the front of the queue. var queuedModules = goog.array.map(idsWithDeps, function(id) { return [id]; }); this.requestedModuleIdsQueue_ = queuedModules.concat(this.requestedModuleIdsQueue_); return [idToLoad]; } else { return idsWithDeps; } }; /** * Builds a list of the ids of the not-yet-loaded modules that a particular * module transitively depends on, including itself. * * @param {string} id The id of a not-yet-loaded module. * @return {!Array<string>} An array of module ids in dependency order that's * guaranteed to end with the provided module id. * @private */ goog.module.ModuleManager.prototype.getNotYetLoadedTransitiveDepIds_ = function( id) { // NOTE(user): We want the earliest occurrence of a module, not the first // dependency we find. Therefore we strip duplicates at the end rather than // during. See the tests for concrete examples. var ids = []; if (!goog.array.contains(this.requestedModuleIds_, id)) { ids.push(id); } var depIds = goog.array.clone(this.getModuleInfo(id).getDependencies()); while (depIds.length) { var depId = depIds.pop(); if (!this.getModuleInfo(depId).isLoaded() && !goog.array.contains(this.requestedModuleIds_, depId)) { ids.unshift(depId); // We need to process direct dependencies first. Array.prototype.unshift.apply( depIds, this.getModuleInfo(depId).getDependencies()); } } goog.array.removeDuplicates(ids); return ids; }; /** * If we are still loading the base module, consider the load complete. * @private */ goog.module.ModuleManager.prototype.maybeFinishBaseLoad_ = function() { if (this.currentlyLoadingModule_ == this.baseModuleInfo_) { this.currentlyLoadingModule_ = null; var error = this.baseModuleInfo_.onLoad(goog.bind(this.getModuleContext, this)); if (error) { this.dispatchModuleLoadFailed_( goog.module.ModuleManager.FailureType.INIT_ERROR); } this.dispatchActiveIdleChangeIfNeeded_(); } }; /** * Records that a module was loaded. Also initiates loading the next module if * any module requests are queued. This method is called by code that is * generated and appended to each dynamic module's code at compilation time. * * @param {string} id A module id. */ goog.module.ModuleManager.prototype.setLoaded = function(id) { if (this.isDisposed()) { goog.log.warning( this.logger_, 'Module loaded after module manager was disposed: ' + id); return; } goog.log.info(this.logger_, 'Module loaded: ' + id); var error = this.moduleInfoMap_[id].onLoad(goog.bind(this.getModuleContext, this)); if (error) { this.dispatchModuleLoadFailed_( goog.module.ModuleManager.FailureType.INIT_ERROR); } // Remove the module id from the user initiated set if it existed there. goog.array.remove(this.userInitiatedLoadingModuleIds_, id); // Remove the module id from the loading modules if it exists there. goog.array.remove(this.loadingModuleIds_, id); if (goog.array.isEmpty(this.loadingModuleIds_)) { // No more modules are currently being loaded (e.g. arriving later in the // same HTTP response), so proceed to load the next module in the queue. this.loadNextModules_(); } if (this.lastInitialModuleId_ && id == this.lastInitialModuleId_) { if (!this.initialModulesLoaded_.hasFired()) { this.initialModulesLoaded_.callback(); } } // Dispatch an active/idle change if needed. this.dispatchActiveIdleChangeIfNeeded_(); }; /** * Gets whether a module is currently loading or in the queue, waiting to be * loaded. * @param {string} id A module id. * @return {boolean} TRUE iff the module is loading. */ goog.module.ModuleManager.prototype.isModuleLoading = function(id) { if (goog.array.contains(this.loadingModuleIds_, id)) { return true; } for (var i = 0; i < this.requestedModuleIdsQueue_.length; i++) { if (goog.array.contains(this.requestedModuleIdsQueue_[i], id)) { return true; } } return false; }; /** * Requests that a function be called once a particular module is loaded. * Client code can use this method to safely call into modules that may not yet * be loaded. For consistency, this method always calls the function * asynchronously -- even if the module is already loaded. Initiates loading of * the module if necessary, unless opt_noLoad is true. * * @param {string} moduleId A module id. * @param {Function} fn Function to execute when the module has loaded. * @param {Object=} opt_handler Optional handler under whose scope to execute * the callback. * @param {boolean=} opt_noLoad TRUE iff not to initiate loading of the module. * @param {boolean=} opt_userInitiated TRUE iff the loading of the module was * user initiated. * @param {boolean=} opt_preferSynchronous TRUE iff the function should be * executed synchronously if the module has already been loaded. * @return {!goog.module.ModuleLoadCallback} A callback wrapper that exposes * an abort and execute method. */ goog.module.ModuleManager.prototype.execOnLoad = function( moduleId, fn, opt_handler, opt_noLoad, opt_userInitiated, opt_preferSynchronous) { var moduleInfo = this.moduleInfoMap_[moduleId]; var callbackWrapper; if (moduleInfo.isLoaded()) { goog.log.info(this.logger_, moduleId + ' module already loaded'); // Call async so that code paths don't change between loaded and unloaded // cases. callbackWrapper = new goog.module.ModuleLoadCallback(fn, opt_handler); if (opt_preferSynchronous) { callbackWrapper.execute(this.moduleContext_); } else { window.setTimeout(goog.bind(callbackWrapper.execute, callbackWrapper), 0); } } else if (this.isModuleLoading(moduleId)) { goog.log.info(this.logger_, moduleId + ' module already loading'); callbackWrapper = moduleInfo.registerCallback(fn, opt_handler); if (opt_userInitiated) { goog.log.info( this.logger_, 'User initiated module already loading: ' + moduleId); this.addUserInitiatedLoadingModule_(moduleId); this.dispatchActiveIdleChangeIfNeeded_(); } } else { goog.log.info(this.logger_, 'Registering callback for module: ' + moduleId); callbackWrapper = moduleInfo.registerCallback(fn, opt_handler); if (!opt_noLoad) { if (opt_userInitiated) { goog.log.info(this.logger_, 'User initiated module load: ' + moduleId); this.addUserInitiatedLoadingModule_(moduleId); } goog.log.info(this.logger_, 'Initiating module load: ' + moduleId); this.loadModulesOrEnqueue_([moduleId]); } } return callbackWrapper; }; /** * Loads a module, returning a goog.async.Deferred for keeping track of the * result. * * @param {string} moduleId A module id. * @param {boolean=} opt_userInitiated If the load is a result of a user action. * @return {goog.async.Deferred} A deferred object. */ goog.module.ModuleManager.prototype.load = function( moduleId, opt_userInitiated) { return this.loadModulesOrEnqueueIfNotLoadedOrLoading_( [moduleId], opt_userInitiated)[moduleId]; }; /** * Loads a list of modules, returning a goog.async.Deferred for keeping track of * the result. * * @param {Array<string>} moduleIds A list of module ids. * @param {boolean=} opt_userInitiated If the load is a result of a user action. * @return {!Object<string, !goog.async.Deferred>} A mapping from id (String) * to deferred objects that will callback or errback when the load for that * id is finished. */ goog.module.ModuleManager.prototype.loadMultiple = function( moduleIds, opt_userInitiated) { return this.loadModulesOrEnqueueIfNotLoadedOrLoading_( moduleIds, opt_userInitiated); }; /** * Ensures that the module with the given id is listed as a user-initiated * module that is being loaded. This method guarantees that a module will never * get listed more than once. * @param {string} id Identifier of the module. * @private */ goog.module.ModuleManager.prototype.addUserInitiatedLoadingModule_ = function( id) { if (!goog.array.contains(this.userInitiatedLoadingModuleIds_, id)) { this.userInitiatedLoadingModuleIds_.push(id); } }; /** * Method called just before a module code is loaded. * @param {string} id Identifier of the module. */ goog.module.ModuleManager.prototype.beforeLoadModuleCode = function(id) { this.loadTracer_ = goog.debug.Trace.startTracer('Module Load: ' + id, 'Module Load'); if (this.currentlyLoadingModule_) { goog.log.error( this.logger_, 'beforeLoadModuleCode called with module "' + id + '" while module "' + this.currentlyLoadingModule_.getId() + '" is loading'); } this.currentlyLoadingModule_ = this.getModuleInfo(id); }; /** * Method called just after module code is loaded * @param {string} id Identifier of the module. */ goog.module.ModuleManager.prototype.afterLoadModuleCode = function(id) { if (!this.currentlyLoadingModule_ || id != this.currentlyLoadingModule_.getId()) { goog.log.error( this.logger_, 'afterLoadModuleCode called with module "' + id + '" while loading module "' + (this.currentlyLoadingModule_ && this.currentlyLoadingModule_.getId()) + '"'); } this.currentlyLoadingModule_ = null; goog.debug.Trace.stopTracer(this.loadTracer_); }; /** * Register an initialization callback for the currently loading module. This * should only be called by script that is executed during the evaluation of * a module's javascript. This is almost equivalent to calling the function * inline, but ensures that all the code from the currently loading module * has been loaded. This makes it cleaner and more robust than calling the * function inline. * * If this function is called from the base module (the one that contains * the module manager code), the callback is held until #setAllModuleInfo * is called, or until #setModuleContext is called, whichever happens first. * * @param {Function} fn A callback function that takes a single argument * which is the module context. * @param {Object=} opt_handler Optional handler under whose scope to execute * the callback. */ goog.module.ModuleManager.prototype.registerInitializationCallback = function( fn, opt_handler) { if (!this.currentlyLoadingModule_) { goog.log.error(this.logger_, 'No module is currently loading'); } else { this.currentlyLoadingModule_.registerEarlyCallback(fn, opt_handler); } }; /** * Register a late initialization callback for the currently loading module. * Callbacks registered via this function are executed similar to * {@see registerInitializationCallback}, but they are fired after all * initialization callbacks are called. * * @param {Function} fn A callback function that takes a single argument * which is the module context. * @param {Object=} opt_handler Optional handler under whose scope to execute * the callback. */ goog.module.ModuleManager.prototype.registerLateInitializationCallback = function(fn, opt_handler) { if (!this.currentlyLoadingModule_) { goog.log.error(this.logger_, 'No module is currently loading'); } else { this.currentlyLoadingModule_.registerCallback(fn, opt_handler); } }; /** * Sets the constructor to use for the module object for the currently * loading module. The constructor should derive from * {@see goog.module.BaseModule}. * @param {Function} fn The constructor function. */ goog.module.ModuleManager.prototype.setModuleConstructor = function(fn) { if (!this.currentlyLoadingModule_) { goog.log.error(this.logger_, 'No module is currently loading'); return; } this.currentlyLoadingModule_.setModuleConstructor(fn); }; /** * The possible reasons for a module load failure callback being fired. * @enum {number} */ goog.module.ModuleManager.FailureType = { /** 401 Status. */ UNAUTHORIZED: 0, /** Error status (not 401) returned multiple times. */ CONSECUTIVE_FAILURES: 1, /** Request timeout. */ TIMEOUT: 2, /** 410 status, old code gone. */ OLD_CODE_GONE: 3, /** The onLoad callbacks failed. */ INIT_ERROR: 4 }; /** * Handles a module load failure. * * @param {!Array<string>} requestedLoadingModuleIds Modules ids that were * requested in failed request. Does not included calculated dependencies. * @param {!Array<string>} requestedModuleIdsWithDeps All module ids requested * in the failed request including all dependencies. * @param {?number} status The error status. * @private */ goog.module.ModuleManager.prototype.handleLoadError_ = function( requestedLoadingModuleIds, requestedModuleIdsWithDeps, status) { this.consecutiveFailures_++; // Module manager was not designed to be reentrant. Reinstate the instance // var with actual value when request failed (Other requests may have // started already.) this.requestedLoadingModuleIds_ = requestedLoadingModuleIds; // Pretend we never requested the failed modules. goog.array.forEach( requestedModuleIdsWithDeps, goog.partial(goog.array.remove, this.requestedModuleIds_), this); if (status == 401) { // The user is not logged in. They've cleared their cookies or logged out // from another window. goog.log.info(this.logger_, 'Module loading unauthorized'); this.dispatchModuleLoadFailed_( goog.module.ModuleManager.FailureType.UNAUTHORIZED); // Drop any additional module requests. this.requestedModuleIdsQueue_.length = 0; } else if (status == 410) { // The requested module js is old and not available. this.requeueBatchOrDispatchFailure_( goog.module.ModuleManager.FailureType.OLD_CODE_GONE); this.loadNextModules_(); } else if (this.consecutiveFailures_ >= 3) { goog.log.info( this.logger_, 'Aborting after failure to load: ' + this.loadingModuleIds_); this.requeueBatchOrDispatchFailure_( goog.module.ModuleManager.FailureType.CONSECUTIVE_FAILURES); this.loadNextModules_(); } else { goog.log.info( this.logger_, 'Retrying after failure to load: ' + this.loadingModuleIds_); var forceReload = status == goog.module.ModuleManager.CORRUPT_RESPONSE_STATUS_CODE; this.loadModules_(this.requestedLoadingModuleIds_, true, forceReload); } }; /** * Handles a module load timeout. * @private */ goog.module.ModuleManager.prototype.handleLoadTimeout_ = function() { goog.log.info( this.logger_, 'Aborting after timeout: ' + this.loadingModuleIds_); this.requeueBatchOrDispatchFailure_( goog.module.ModuleManager.FailureType.TIMEOUT); this.loadNextModules_(); }; /** * Requeues batch loads that had more than one requested module * (i.e. modules that were not included as dependencies) as separate loads or * if there was only one requested module, fails that module with the received * cause. * @param {goog.module.ModuleManager.FailureType} cause The reason for the * failure. * @private */ goog.module.ModuleManager.prototype.requeueBatchOrDispatchFailure_ = function( cause) { // The load failed, so if there are more than one requested modules, then we // need to retry each one as a separate load. Otherwise, if there is only one // requested module, remove it and its dependencies from the queue. if (this.requestedLoadingModuleIds_.length > 1) { var queuedModules = goog.array.map( this.requestedLoadingModuleIds_, function(id) { return [id]; }); this.requestedModuleIdsQueue_ = queuedModules.concat(this.requestedModuleIdsQueue_); } else { this.dispatchModuleLoadFailed_(cause); } }; /** * Handles when a module load failed. * @param {goog.module.ModuleManager.FailureType} cause The reason for the * failure. * @private */ goog.module.ModuleManager.prototype.dispatchModuleLoadFailed_ = function( cause) { var failedIds = this.requestedLoadingModuleIds_; this.loadingModuleIds_.length = 0; // If any pending modules depend on the id that failed, // they need to be removed from the queue. var idsToCancel = []; for (var i = 0; i < this.requestedModuleIdsQueue_.length; i++) { var dependentModules = goog.array.filter( this.requestedModuleIdsQueue_[i], /** * Returns true if the requestedId has dependencies on the modules that * just failed to load. * @param {string} requestedId The module to check for dependencies. * @return {boolean} True if the module depends on failed modules. */ function(requestedId) { var requestedDeps = this.getNotYetLoadedTransitiveDepIds_(requestedId); return goog.array.some(failedIds, function(id) { return goog.array.contains(requestedDeps, id); }); }, this); goog.array.extend(idsToCancel, dependentModules); } // Also insert the ids that failed to load as ids to cancel. for (var i = 0; i < failedIds.length; i++) { goog.array.insert(idsToCancel, failedIds[i]); } // Remove ids to cancel from the queues. for (var i = 0; i < idsToCancel.length; i++) { for (var j = 0; j < this.requestedModuleIdsQueue_.length; j++) { goog.array.remove(this.requestedModuleIdsQueue_[j], idsToCancel[i]); } goog.array.remove(this.userInitiatedLoadingModuleIds_, idsToCancel[i]); } // Call the functions for error notification. var errorCallbacks = this.callbackMap_[goog.module.ModuleManager.CallbackType.ERROR]; if (errorCallbacks) { for (var i = 0; i < errorCallbacks.length; i++) { var callback = errorCallbacks[i]; for (var j = 0; j < idsToCancel.length; j++) { callback( goog.module.ModuleManager.CallbackType.ERROR, idsToCancel[j], cause); } } } // Call the errbacks on the module info. for (var i = 0; i < failedIds.length; i++) { if (this.moduleInfoMap_[failedIds[i]]) { this.moduleInfoMap_[failedIds[i]].onError(cause); } } // Clear the requested loading module ids. this.requestedLoadingModuleIds_.length = 0; this.dispatchActiveIdleChangeIfNeeded_(); }; /** * Loads the next modules on the queue. * @private */ goog.module.ModuleManager.prototype.loadNextModules_ = function() { while (this.requestedModuleIdsQueue_.length) { // Remove modules that are already loaded. var nextIds = goog.array.filter( this.requestedModuleIdsQueue_.shift(), function(id) { return !this.getModuleInfo(id).isLoaded(); }, this); if (nextIds.length > 0) { this.loadModules_(nextIds); return; } } // Dispatch an active/idle change if needed. this.dispatchActiveIdleChangeIfNeeded_(); }; /** * The function to call if the module manager is in error. * @param * {goog.module.ModuleManager.CallbackType|Array<goog.module.ModuleManager.CallbackType>} * types * The callback type. * @param {Function} fn The function to register as a callback. */ goog.module.ModuleManager.prototype.registerCallback = function(types, fn) { if (!goog.isArray(types)) { types = [types]; } for (var i = 0; i < types.length; i++) { this.registerCallback_(types[i], fn); } }; /** * Register a callback for the specified callback type. * @param {goog.module.ModuleManager.CallbackType} type The callback type. * @param {Function} fn The callback function. * @private */ goog.module.ModuleManager.prototype.registerCallback_ = function(type, fn) { var callbackMap = this.callbackMap_; if (!callbackMap[type]) { callbackMap[type] = []; } callbackMap[type].push(fn); }; /** * Call the callback functions of the specified type. * @param {goog.module.ModuleManager.CallbackType} type The callback type. * @private */ goog.module.ModuleManager.prototype.executeCallbacks_ = function(type) { var callbacks = this.callbackMap_[type]; for (var i = 0; callbacks && i < callbacks.length; i++) { callbacks[i](type); } }; /** @override */ goog.module.ModuleManager.prototype.disposeInternal = function() { goog.module.ModuleManager.base(this, 'disposeInternal'); // Dispose of each ModuleInfo object. goog.disposeAll( goog.object.getValues(this.moduleInfoMap_), this.baseModuleInfo_); this.moduleInfoMap_ = null; this.loadingModuleIds_ = null; this.requestedLoadingModuleIds_ = null; this.userInitiatedLoadingModuleIds_ = null; this.requestedModuleIdsQueue_ = null; this.callbackMap_ = null; };<|fim▁end|>
<|file_name|>ProxyViewList.js<|end_file_name|><|fim▁begin|>import React from 'react'; import PropTypes from 'prop-types'; import { FormattedMessage } from 'react-intl'; import { Headline } from '@folio/stripes/components'; import css from './ProxyViewList.css'; const ProxyViewList = ({ records, name, label, itemComponent, stripes }) => { const ComponentToRender = itemComponent; const items = records.map((record, index) => ( <ComponentToRender key={`item-${index}`} record={record} stripes={stripes} /> )); const noSponsorsFound = <FormattedMessage id="ui-users.permissions.noSponsorsFound" />; const noProxiesFound = <FormattedMessage id="ui-users.permissions.noProxiesFound" />; const noneFoundMsg = name === 'sponsors' ? noSponsorsFound : noProxiesFound; return ( <div className={css.list} data-test={name}> <Headline tag="h4" size="small" margin="small">{label}</Headline> {items.length ? items : <p className={css.isEmptyMessage}>{noneFoundMsg}</p>} </div> ); };<|fim▁hole|> ProxyViewList.propTypes = { records: PropTypes.arrayOf(PropTypes.object), itemComponent: PropTypes.func.isRequired, name: PropTypes.string.isRequired, label: PropTypes.node.isRequired, stripes: PropTypes.object.isRequired, }; export default ProxyViewList;<|fim▁end|>
<|file_name|>error.go<|end_file_name|><|fim▁begin|>/* Copyright (c) 2019 the Octant contributors. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package component import ( "fmt" "github.com/vmware-tanzu/octant/internal/util/json" ) // Error is a component for freetext // // +octant:component type Error struct { Base Config ErrorConfig `json:"config"` } // ErrorConfig is the contents of Text type ErrorConfig struct { Data string `json:"data,omitempty"` } // NewError creates a text component func NewError(title []TitleComponent, err error) *Error { return &Error{ Base: newBase(TypeError, title), Config: ErrorConfig{ Data: fmt.Sprintf("%+v", err), }, } } // SupportsTitle denotes this is a TextComponent. func (t *Error) SupportsTitle() {} type errorMarshal Error // MarshalJSON implements json.Marshaler func (t *Error) MarshalJSON() ([]byte, error) { m := errorMarshal(*t) m.Metadata.Type = TypeError return json.Marshal(&m) } // String returns the text content of the component. func (t *Error) String() string { return t.Config.Data } // LessThan returns true if this component's value is less than the argument supplied. func (t *Error) LessThan(i interface{}) bool { v, ok := i.(*Error) if !ok { return false }<|fim▁hole|> }<|fim▁end|>
return t.Config.Data < v.Config.Data
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2010-2013 Código Sur Sociedad Civil. # All rights reserved. # # This file is part of Cyclope. # # Cyclope is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Cyclope is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from django.conf import settings from django.contrib import admin from django import forms<|fim▁hole|>from cyclope.core.collections.admin import CollectibleAdmin from cyclope.admin import BaseContentAdmin from models import * from filebrowser.fields import FileBrowseField from filebrowser.base import FileObject from filebrowser.functions import handle_file_upload, convert_filename # This is a standard ClearableFileInput. # We just need to "translate" some data from the FileBrowseField class CustomFileInput(forms.widgets.ClearableFileInput): def render(self, name, value, attrs=None): # FileBrowseField has no url attribute so we set url to url_full if type(value) == FileObject: value.url = value.url_full return super(CustomFileInput, self).render(name, value, attrs) class MediaAdmin(CollectibleAdmin, BaseContentAdmin): inlines = CollectibleAdmin.inlines + BaseContentAdmin.inlines search_fields = ('name', 'description', ) list_filter = CollectibleAdmin.list_filter + ('creation_date',) def get_form(self, request, obj=None, **kwargs): media_file_field = self.model.media_file_field image_file_field = self.model.image_file_field form = super(MediaAdmin, self).get_form(request, obj, **kwargs) simple_widgets = False if not request.user.is_superuser: simple_widgets = True form.base_fields[media_file_field].widget = CustomFileInput() if image_file_field: form.base_fields[image_file_field].widget = CustomFileInput() form.simple = simple_widgets if obj: form.media_file_initial = getattr(obj, media_file_field) # This is a hack; if the field is required it will fail validation # when the user does not upload a file. # TODO(nicoechaniz): implement proper validation for this case form.base_fields[media_file_field].required = False if image_file_field: form.image_file_initial = getattr(obj, image_file_field) form.base_fields[image_file_field].required = False return form has_thumbnail = [Picture, MovieClip, FlashMovie] def media_admin_factory(media_model): class MediaLibraryForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(MediaLibraryForm, self).__init__(*args, **kwargs) author_choices = [('', '------')] for author in Author.objects.all(): if media_model in [ctype.model_class() for ctype in author.content_types.all()]: author_choices.append((author.id, author.name)) self.fields['author'].choices = author_choices def save(self, *args, **kwargs): # We override the standard behavior because we've overriden the FileBrowseField # with a simple ClearableFileInput if self.simple: abs_paths = {} instance = super(MediaLibraryForm, self).save(commit=False) image_file_field = instance.image_file_field file_fields = [ instance.media_file_field ] if image_file_field: file_fields.append(image_file_field) for f_field in file_fields: folder = media_model._meta.get_field_by_name(f_field)[0].directory abs_paths[f_field] = os.path.join( settings.MEDIA_ROOT, settings.FILEBROWSER_DIRECTORY, folder ) if f_field in self.files.keys(): f = self.files[f_field] f.name = convert_filename(f.name) name = handle_file_upload(abs_paths[f_field], f) setattr(instance, f_field, name) else: # TODO(nicoechaniz): this is ugly! refactor if f_field in ["image", "still"]: if hasattr(self, "image_file_initial"): setattr(instance, f_field, self.image_file_initial) else: if hasattr(self, "media_file_initial"): setattr(instance, f_field, self.media_file_initial) instance.save() return instance else: return super(MediaLibraryForm, self).save(*args, **kwargs) class Meta: model = media_model if media_model in has_thumbnail: list_display = ['name', 'published', 'thumbnail'] else: list_display = ['name', 'published'] list_display += CollectibleAdmin.list_display return type('%sAdmin' % media_model.__name__, (MediaAdmin,), {'form': MediaLibraryForm, 'list_display': list_display}) admin.site.register(Picture, media_admin_factory(Picture)) admin.site.register(SoundTrack, media_admin_factory(SoundTrack)) admin.site.register(MovieClip, media_admin_factory(MovieClip)) admin.site.register(Document, media_admin_factory(Document)) admin.site.register(FlashMovie, media_admin_factory(FlashMovie)) admin.site.register(RegularFile, media_admin_factory(RegularFile)) admin.site.register(ExternalContent, media_admin_factory(ExternalContent))<|fim▁end|>
from django.db import models from cyclope import settings as cyc_settings
<|file_name|>test_split.py<|end_file_name|><|fim▁begin|>"""Test the split module""" from __future__ import division import warnings import pytest import numpy as np from scipy.sparse import coo_matrix, csc_matrix, csr_matrix from scipy import stats from itertools import combinations from itertools import combinations_with_replacement from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_no_warnings from sklearn.utils.validation import _num_samples from sklearn.utils.mocking import MockDataFrame from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import GroupKFold from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import LeaveOneOut from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import PredefinedSplit from sklearn.model_selection import check_cv from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RepeatedKFold from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.linear_model import Ridge from sklearn.model_selection._split import _validate_shuffle_split from sklearn.model_selection._split import _CVIterableWrapper from sklearn.model_selection._split import _build_repr from sklearn.model_selection._split import CV_WARNING from sklearn.model_selection._split import NSPLIT_WARNING from sklearn.datasets import load_digits from sklearn.datasets import make_classification from sklearn.externals import six from sklearn.externals.six.moves import zip from sklearn.utils.fixes import comb from sklearn.svm import SVC X = np.ones(10) y = np.arange(10) // 2 P_sparse = coo_matrix(np.eye(5)) test_groups = ( np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3], ['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3']) digits = load_digits() class MockClassifier(object): """Dummy classifier to test the cross-validation""" def __init__(self, a=0, allow_nd=False): self.a = a self.allow_nd = allow_nd def fit(self, X, Y=None, sample_weight=None, class_prior=None, sparse_sample_weight=None, sparse_param=None, dummy_int=None, dummy_str=None, dummy_obj=None, callback=None): """The dummy arguments are to test that this fit function can accept non-array arguments through cross-validation, such as: - int - str (this is actually array-like) - object - function """ self.dummy_int = dummy_int self.dummy_str = dummy_str self.dummy_obj = dummy_obj if callback is not None: callback(self) if self.allow_nd: X = X.reshape(len(X), -1) if X.ndim >= 3 and not self.allow_nd: raise ValueError('X cannot be d') if sample_weight is not None: assert_true(sample_weight.shape[0] == X.shape[0], 'MockClassifier extra fit_param sample_weight.shape[0]' ' is {0}, should be {1}'.format(sample_weight.shape[0], X.shape[0])) if class_prior is not None: assert_true(class_prior.shape[0] == len(np.unique(y)), 'MockClassifier extra fit_param class_prior.shape[0]' ' is {0}, should be {1}'.format(class_prior.shape[0], len(np.unique(y)))) if sparse_sample_weight is not None: fmt = ('MockClassifier extra fit_param sparse_sample_weight' '.shape[0] is {0}, should be {1}') assert_true(sparse_sample_weight.shape[0] == X.shape[0], fmt.format(sparse_sample_weight.shape[0], X.shape[0])) if sparse_param is not None: fmt = ('MockClassifier extra fit_param sparse_param.shape ' 'is ({0}, {1}), should be ({2}, {3})') assert_true(sparse_param.shape == P_sparse.shape, fmt.format(sparse_param.shape[0], sparse_param.shape[1], P_sparse.shape[0], P_sparse.shape[1])) return self def predict(self, T): if self.allow_nd: T = T.reshape(len(T), -1) return T[:, 0] def score(self, X=None, Y=None): return 1. / (1 + np.abs(self.a)) def get_params(self, deep=False): return {'a': self.a, 'allow_nd': self.allow_nd} @ignore_warnings def test_cross_validator_with_default_params(): n_samples = 4 n_unique_groups = 4 n_splits = 2 p = 2 n_shuffle_splits = 10 # (the default value) X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) X_1d = np.array([1, 2, 3, 4]) y = np.array([1, 1, 2, 2]) groups = np.array([1, 2, 3, 4]) loo = LeaveOneOut() lpo = LeavePOut(p) kf = KFold(n_splits) skf = StratifiedKFold(n_splits) lolo = LeaveOneGroupOut() lopo = LeavePGroupsOut(p) ss = ShuffleSplit(random_state=0) ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2 loo_repr = "LeaveOneOut()" lpo_repr = "LeavePOut(p=2)" kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)" skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)" lolo_repr = "LeaveOneGroupOut()" lopo_repr = "LeavePGroupsOut(n_groups=2)" ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, " "test_size='default',\n train_size=None)") ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))" n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits, n_unique_groups, comb(n_unique_groups, p), n_shuffle_splits, 2] for i, (cv, cv_repr) in enumerate(zip( [loo, lpo, kf, skf, lolo, lopo, ss, ps], [loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr, ss_repr, ps_repr])): # Test if get_n_splits works correctly assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups)) # Test if the cross-validator works as expected even if # the data is 1d np.testing.assert_equal(list(cv.split(X, y, groups)), list(cv.split(X_1d, y, groups))) # Test that train, test indices returned are integers for train, test in cv.split(X, y, groups): assert_equal(np.asarray(train).dtype.kind, 'i') assert_equal(np.asarray(train).dtype.kind, 'i') # Test if the repr works without any errors assert_equal(cv_repr, repr(cv)) # ValueError for get_n_splits methods msg = "The 'X' parameter should not be None." assert_raise_message(ValueError, msg, loo.get_n_splits, None, y, groups) assert_raise_message(ValueError, msg, lpo.get_n_splits, None, y, groups) @pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22 def test_2d_y(): # smoke test for 2d y and multi-label n_samples = 30 rng = np.random.RandomState(1) X = rng.randint(0, 3, size=(n_samples, 2)) y = rng.randint(0, 3, size=(n_samples,)) y_2d = y.reshape(-1, 1) y_multilabel = rng.randint(0, 2, size=(n_samples, 3)) groups = rng.randint(0, 3, size=(n_samples,)) splitters = [LeaveOneOut(), LeavePOut(p=2), KFold(), StratifiedKFold(), RepeatedKFold(), RepeatedStratifiedKFold(), ShuffleSplit(), StratifiedShuffleSplit(test_size=.5), GroupShuffleSplit(), LeaveOneGroupOut(), LeavePGroupsOut(n_groups=2), GroupKFold(), TimeSeriesSplit(), PredefinedSplit(test_fold=groups)] for splitter in splitters: list(splitter.split(X, y, groups)) list(splitter.split(X, y_2d, groups)) try: list(splitter.split(X, y_multilabel, groups)) except ValueError as e: allowed_target_types = ('binary', 'multiclass') msg = "Supported target types are: {}. Got 'multilabel".format( allowed_target_types) assert msg in str(e) def check_valid_split(train, test, n_samples=None): # Use python sets to get more informative assertion failure messages train, test = set(train), set(test) # Train and test split should not overlap assert_equal(train.intersection(test), set()) if n_samples is not None: # Check that the union of train an test split cover all the indices assert_equal(train.union(test), set(range(n_samples))) def check_cv_coverage(cv, X, y, groups, expected_n_splits=None): n_samples = _num_samples(X) # Check that a all the samples appear at least once in a test fold if expected_n_splits is not None: assert_equal(cv.get_n_splits(X, y, groups), expected_n_splits) else: expected_n_splits = cv.get_n_splits(X, y, groups) collected_test_samples = set() iterations = 0 for train, test in cv.split(X, y, groups): check_valid_split(train, test, n_samples=n_samples) iterations += 1 collected_test_samples.update(test) # Check that the accumulated test samples cover the whole dataset assert_equal(iterations, expected_n_splits) if n_samples is not None: assert_equal(collected_test_samples, set(range(n_samples))) def test_kfold_valueerrors(): X1 = np.array([[1, 2], [3, 4], [5, 6]]) X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]) # Check that errors are raised if there is not enough samples (ValueError, next, KFold(4).split(X1)) # Check that a warning is raised if the least populated class has too few # members. y = np.array([3, 3, -1, -1, 3]) skf_3 = StratifiedKFold(3) assert_warns_message(Warning, "The least populated class", next, skf_3.split(X2, y)) # Check that despite the warning the folds are still computed even # though all the classes are not necessarily represented at on each # side of the split at each split with warnings.catch_warnings(): warnings.simplefilter("ignore") check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3) # Check that errors are raised if all n_groups for individual # classes are less than n_splits. y = np.array([3, 3, -1, -1, 2]) assert_raises(ValueError, next, skf_3.split(X2, y)) # Error when number of folds is <= 1 assert_raises(ValueError, KFold, 0) assert_raises(ValueError, KFold, 1) error_string = ("k-fold cross-validation requires at least one" " train/test split") assert_raise_message(ValueError, error_string, StratifiedKFold, 0) assert_raise_message(ValueError, error_string, StratifiedKFold, 1) # When n_splits is not integer: assert_raises(ValueError, KFold, 1.5) assert_raises(ValueError, KFold, 2.0) assert_raises(ValueError, StratifiedKFold, 1.5) assert_raises(ValueError, StratifiedKFold, 2.0) # When shuffle is not a bool: assert_raises(TypeError, KFold, n_splits=4, shuffle=None) def test_kfold_indices(): # Check all indices are returned in the test folds X1 = np.ones(18) kf = KFold(3) check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3) # Check all indices are returned in the test folds even when equal-sized # folds are not possible X2 = np.ones(17) kf = KFold(3) check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3) # Check if get_n_splits returns the number of folds assert_equal(5, KFold(5).get_n_splits(X2)) def test_kfold_no_shuffle(): # Manually check that KFold preserves the data ordering on toy datasets X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] splits = KFold(2).split(X2[:-1]) train, test = next(splits) assert_array_equal(test, [0, 1]) assert_array_equal(train, [2, 3]) train, test = next(splits) assert_array_equal(test, [2, 3]) assert_array_equal(train, [0, 1]) splits = KFold(2).split(X2) train, test = next(splits) assert_array_equal(test, [0, 1, 2]) assert_array_equal(train, [3, 4]) train, test = next(splits) assert_array_equal(test, [3, 4]) assert_array_equal(train, [0, 1, 2]) def test_stratified_kfold_no_shuffle(): # Manually check that StratifiedKFold preserves the data ordering as much # as possible on toy datasets in order to avoid hiding sample dependencies # when possible X, y = np.ones(4), [1, 1, 0, 0] splits = StratifiedKFold(2).split(X, y) train, test = next(splits) assert_array_equal(test, [0, 2]) assert_array_equal(train, [1, 3]) train, test = next(splits) assert_array_equal(test, [1, 3]) assert_array_equal(train, [0, 2]) X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0] splits = StratifiedKFold(2).split(X, y) train, test = next(splits) assert_array_equal(test, [0, 1, 3, 4]) assert_array_equal(train, [2, 5, 6]) train, test = next(splits) assert_array_equal(test, [2, 5, 6]) assert_array_equal(train, [0, 1, 3, 4]) # Check if get_n_splits returns the number of folds assert_equal(5, StratifiedKFold(5).get_n_splits(X, y)) # Make sure string labels are also supported X = np.ones(7) y1 = ['1', '1', '1', '0', '0', '0', '0'] y2 = [1, 1, 1, 0, 0, 0, 0] np.testing.assert_equal( list(StratifiedKFold(2).split(X, y1)), list(StratifiedKFold(2).split(X, y2))) def test_stratified_kfold_ratios(): # Check that stratified kfold preserves class ratios in individual splits # Repeat with shuffling turned off and on n_samples = 1000 X = np.ones(n_samples) y = np.array([4] * int(0.10 * n_samples) + [0] * int(0.89 * n_samples) + [1] * int(0.01 * n_samples)) for shuffle in (False, True): for train, test in StratifiedKFold(5, shuffle=shuffle).split(X, y): assert_almost_equal(np.sum(y[train] == 4) / len(train), 0.10, 2) assert_almost_equal(np.sum(y[train] == 0) / len(train), 0.89, 2) assert_almost_equal(np.sum(y[train] == 1) / len(train), 0.01, 2) assert_almost_equal(np.sum(y[test] == 4) / len(test), 0.10, 2) assert_almost_equal(np.sum(y[test] == 0) / len(test), 0.89, 2) assert_almost_equal(np.sum(y[test] == 1) / len(test), 0.01, 2) def test_kfold_balance(): # Check that KFold returns folds with balanced sizes for i in range(11, 17): kf = KFold(5).split(X=np.ones(i)) sizes = [] for _, test in kf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), i) def test_stratifiedkfold_balance(): # Check that KFold returns folds with balanced sizes (only when # stratification is possible) # Repeat with shuffling turned off and on X = np.ones(17) y = [0] * 3 + [1] * 14 for shuffle in (True, False): cv = StratifiedKFold(3, shuffle=shuffle) for i in range(11, 17): skf = cv.split(X[:i], y[:i]) sizes = [] for _, test in skf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), i) def test_shuffle_kfold(): # Check the indices are shuffled properly kf = KFold(3) kf2 = KFold(3, shuffle=True, random_state=0) kf3 = KFold(3, shuffle=True, random_state=1) X = np.ones(300) all_folds = np.zeros(300) for (tr1, te1), (tr2, te2), (tr3, te3) in zip( kf.split(X), kf2.split(X), kf3.split(X)): for tr_a, tr_b in combinations((tr1, tr2, tr3), 2): # Assert that there is no complete overlap assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1)) # Set all test indices in successive iterations of kf2 to 1 all_folds[te2] = 1 # Check that all indices are returned in the different test folds assert_equal(sum(all_folds), 300) def test_shuffle_kfold_stratifiedkfold_reproducibility(): # Check that when the shuffle is True multiple split calls produce the # same split when random_state is set X = np.ones(15) # Divisible by 3 y = [0] * 7 + [1] * 8 X2 = np.ones(16) # Not divisible by 3 y2 = [0] * 8 + [1] * 8 kf = KFold(3, shuffle=True, random_state=0) skf = StratifiedKFold(3, shuffle=True, random_state=0) for cv in (kf, skf): np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y))) np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2))) kf = KFold(3, shuffle=True) skf = StratifiedKFold(3, shuffle=True) for cv in (kf, skf): for data in zip((X, X2), (y, y2)): # Test if the two splits are different cv for (_, test_a), (_, test_b) in zip(cv.split(*data), cv.split(*data)): # cv.split(...) returns an array of tuples, each tuple # consisting of an array with train indices and test indices with pytest.raises(AssertionError, message="The splits for data, are same even" " when random state is not set"): np.testing.assert_array_equal(test_a, test_b) def test_shuffle_stratifiedkfold(): # Check that shuffling is happening when requested, and for proper # sample coverage X_40 = np.ones(40) y = [0] * 20 + [1] * 20 kf0 = StratifiedKFold(5, shuffle=True, random_state=0) kf1 = StratifiedKFold(5, shuffle=True, random_state=1) for (_, test0), (_, test1) in zip(kf0.split(X_40, y), kf1.split(X_40, y)): assert_not_equal(set(test0), set(test1)) check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5) def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372 # The digits samples are dependent: they are apparently grouped by authors # although we don't have any information on the groups segment locations # for this data. We can highlight this fact by computing k-fold cross- # validation with and without shuffling: we observe that the shuffling case # wrongly makes the IID assumption and is therefore too optimistic: it # estimates a much higher accuracy (around 0.93) than that the non # shuffling variant (around 0.81). X, y = digits.data[:600], digits.target[:600] model = SVC(C=10, gamma=0.005) n_splits = 3 cv = KFold(n_splits=n_splits, shuffle=False) mean_score = cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.92, mean_score) assert_greater(mean_score, 0.80) # Shuffling the data artificially breaks the dependency and hides the # overfitting of the model with regards to the writing style of the authors # by yielding a seriously overestimated score: cv = KFold(n_splits, shuffle=True, random_state=0) mean_score = cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.92) cv = KFold(n_splits, shuffle=True, random_state=1) mean_score = cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.92) # Similarly, StratifiedKFold should try to shuffle the data as little # as possible (while respecting the balanced class constraints) # and thus be able to detect the dependency by not overestimating # the CV score either. As the digits dataset is approximately balanced # the estimated mean score is close to the score measured with # non-shuffled KFold cv = StratifiedKFold(n_splits) mean_score = cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.93, mean_score) assert_greater(mean_score, 0.80) def test_shuffle_split(): ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X) ss2 = ShuffleSplit(test_size=2, random_state=0).split(X) ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X) for typ in six.integer_types: ss4 = ShuffleSplit(test_size=typ(2), random_state=0).split(X) for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4): assert_array_equal(t1[0], t2[0]) assert_array_equal(t2[0], t3[0]) assert_array_equal(t3[0], t4[0]) assert_array_equal(t1[1], t2[1]) assert_array_equal(t2[1], t3[1]) assert_array_equal(t3[1], t4[1]) @ignore_warnings def test_stratified_shuffle_split_init(): X = np.arange(7) y = np.asarray([0, 1, 1, 1, 2, 2, 2]) # Check that error is raised if there is a class with only one sample assert_raises(ValueError, next, StratifiedShuffleSplit(3, 0.2).split(X, y)) # Check that error is raised if the test set size is smaller than n_classes assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y)) # Check that error is raised if the train set size is smaller than # n_classes assert_raises(ValueError, next, StratifiedShuffleSplit(3, 3, 2).split(X, y)) X = np.arange(9) y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2]) # Check that errors are raised if there is not enough samples assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6) assert_raises(ValueError, next, StratifiedShuffleSplit(3, 8, 0.6).split(X, y)) assert_raises(ValueError, next, StratifiedShuffleSplit(3, 0.6, 8).split(X, y)) # Train size or test size too small assert_raises(ValueError, next, StratifiedShuffleSplit(train_size=2).split(X, y)) assert_raises(ValueError, next, StratifiedShuffleSplit(test_size=2).split(X, y)) def test_stratified_shuffle_split_respects_test_size(): y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]) test_size = 5 train_size = 10 sss = StratifiedShuffleSplit(6, test_size=test_size, train_size=train_size, random_state=0).split(np.ones(len(y)), y) for train, test in sss: assert_equal(len(train), train_size) assert_equal(len(test), test_size) def test_stratified_shuffle_split_iter(): ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), np.array([-1] * 800 + [1] * 50), np.concatenate([[i] * (100 + i) for i in range(11)]), [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3], ['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'], ] for y in ys: sss = StratifiedShuffleSplit(6, test_size=0.33, random_state=0).split(np.ones(len(y)), y) y = np.asanyarray(y) # To make it indexable for y[train] # this is how test-size is computed internally # in _validate_shuffle_split test_size = np.ceil(0.33 * len(y)) train_size = len(y) - test_size for train, test in sss: assert_array_equal(np.unique(y[train]), np.unique(y[test])) # Checks if folds keep classes proportions p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1]) / float(len(y[train]))) p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1]) / float(len(y[test]))) assert_array_almost_equal(p_train, p_test, 1) assert_equal(len(train) + len(test), y.size) assert_equal(len(train), train_size) assert_equal(len(test), test_size) assert_array_equal(np.lib.arraysetops.intersect1d(train, test), []) def test_stratified_shuffle_split_even(): # Test the StratifiedShuffleSplit, indices are drawn with a # equal chance n_folds = 5 n_splits = 1000 def assert_counts_are_ok(idx_counts, p): # Here we test that the distribution of the counts # per index is close enough to a binomial threshold = 0.05 / n_splits bf = stats.binom(n_splits, p) for count in idx_counts: prob = bf.pmf(count) assert_true(prob > threshold, "An index is not drawn with chance corresponding " "to even draws") for n_samples in (6, 22): groups = np.array((n_samples // 2) * [0, 1]) splits = StratifiedShuffleSplit(n_splits=n_splits, test_size=1. / n_folds, random_state=0) train_counts = [0] * n_samples test_counts = [0] * n_samples n_splits_actual = 0 for train, test in splits.split(X=np.ones(n_samples), y=groups): n_splits_actual += 1 for counter, ids in [(train_counts, train), (test_counts, test)]: for id in ids: counter[id] += 1 assert_equal(n_splits_actual, n_splits) n_train, n_test = _validate_shuffle_split( n_samples, test_size=1. / n_folds, train_size=1. - (1. / n_folds)) assert_equal(len(train), n_train) assert_equal(len(test), n_test) assert_equal(len(set(train).intersection(test)), 0) group_counts = np.unique(groups) assert_equal(splits.test_size, 1.0 / n_folds) assert_equal(n_train + n_test, len(groups)) assert_equal(len(group_counts), 2) ex_test_p = float(n_test) / n_samples ex_train_p = float(n_train) / n_samples assert_counts_are_ok(train_counts, ex_train_p) assert_counts_are_ok(test_counts, ex_test_p) def test_stratified_shuffle_split_overlap_train_test_bug(): # See https://github.com/scikit-learn/scikit-learn/issues/6121 for # the original bug report y = [0, 1, 2, 3] * 3 + [4, 5] * 5 X = np.ones_like(y) sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) train, test = next(sss.split(X=X, y=y)) # no overlap assert_array_equal(np.intersect1d(train, test), []) # complete partition assert_array_equal(np.union1d(train, test), np.arange(len(y))) def test_stratified_shuffle_split_multilabel(): # fix for issue 9037 for y in [np.array([[0, 1], [1, 0], [1, 0], [0, 1]]), np.array([[0, 1], [1, 1], [1, 1], [0, 1]])]: X = np.ones_like(y) sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) train, test = next(sss.split(X=X, y=y)) y_train = y[train] y_test = y[test] # no overlap assert_array_equal(np.intersect1d(train, test), []) # complete partition assert_array_equal(np.union1d(train, test), np.arange(len(y))) # correct stratification of entire rows # (by design, here y[:, 0] uniquely determines the entire row of y) expected_ratio = np.mean(y[:, 0]) assert_equal(expected_ratio, np.mean(y_train[:, 0])) assert_equal(expected_ratio, np.mean(y_test[:, 0])) def test_stratified_shuffle_split_multilabel_many_labels(): # fix in PR #9922: for multilabel data with > 1000 labels, str(row) # truncates with an ellipsis for elements in positions 4 through # len(row) - 4, so labels were not being correctly split using the powerset # method for transforming a multilabel problem to a multiclass one; this # test checks that this problem is fixed. row_with_many_zeros = [1, 0, 1] + [0] * 1000 + [1, 0, 1] row_with_many_ones = [1, 0, 1] + [1] * 1000 + [1, 0, 1] y = np.array([row_with_many_zeros] * 10 + [row_with_many_ones] * 100) X = np.ones_like(y) sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) train, test = next(sss.split(X=X, y=y)) y_train = y[train] y_test = y[test] # correct stratification of entire rows # (by design, here y[:, 4] uniquely determines the entire row of y) expected_ratio = np.mean(y[:, 4]) assert_equal(expected_ratio, np.mean(y_train[:, 4])) assert_equal(expected_ratio, np.mean(y_test[:, 4])) def test_predefinedsplit_with_kfold_split(): # Check that PredefinedSplit can reproduce a split generated by Kfold. folds = np.full(10, -1.) kf_train = [] kf_test = [] for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)): kf_train.append(train_ind) kf_test.append(test_ind) folds[test_ind] = i ps_train = [] ps_test = [] ps = PredefinedSplit(folds) # n_splits is simply the no of unique folds assert_equal(len(np.unique(folds)), ps.get_n_splits()) for train_ind, test_ind in ps.split(): ps_train.append(train_ind) ps_test.append(test_ind) assert_array_equal(ps_train, kf_train) assert_array_equal(ps_test, kf_test) def test_group_shuffle_split(): for groups_i in test_groups: X = y = np.ones(len(groups_i)) n_splits = 6 test_size = 1. / 3 slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0) # Make sure the repr works repr(slo) # Test that the length is correct assert_equal(slo.get_n_splits(X, y, groups=groups_i), n_splits) l_unique = np.unique(groups_i) l = np.asarray(groups_i) for train, test in slo.split(X, y, groups=groups_i): # First test: no train group is in the test set and vice versa l_train_unique = np.unique(l[train]) l_test_unique = np.unique(l[test]) assert_false(np.any(np.in1d(l[train], l_test_unique))) assert_false(np.any(np.in1d(l[test], l_train_unique))) # Second test: train and test add up to all the data assert_equal(l[train].size + l[test].size, l.size) # Third test: train and test are disjoint assert_array_equal(np.intersect1d(train, test), []) # Fourth test: # unique train and test groups are correct, +- 1 for rounding error assert_true(abs(len(l_test_unique) - round(test_size * len(l_unique))) <= 1) assert_true(abs(len(l_train_unique) - round((1.0 - test_size) * len(l_unique))) <= 1) def test_leave_one_p_group_out(): logo = LeaveOneGroupOut() lpgo_1 = LeavePGroupsOut(n_groups=1) lpgo_2 = LeavePGroupsOut(n_groups=2) # Make sure the repr works assert_equal(repr(logo), 'LeaveOneGroupOut()') assert_equal(repr(lpgo_1), 'LeavePGroupsOut(n_groups=1)') assert_equal(repr(lpgo_2), 'LeavePGroupsOut(n_groups=2)') assert_equal(repr(LeavePGroupsOut(n_groups=3)), 'LeavePGroupsOut(n_groups=3)') for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1), (lpgo_2, 2))): for i, groups_i in enumerate(test_groups): n_groups = len(np.unique(groups_i)) n_splits = (n_groups if p_groups_out == 1 else n_groups * (n_groups - 1) / 2) X = y = np.ones(len(groups_i)) # Test that the length is correct assert_equal(cv.get_n_splits(X, y, groups=groups_i), n_splits) groups_arr = np.asarray(groups_i) # Split using the original list / array / list of string groups_i for train, test in cv.split(X, y, groups=groups_i): # First test: no train group is in the test set and vice versa assert_array_equal(np.intersect1d(groups_arr[train], groups_arr[test]).tolist(), []) # Second test: train and test add up to all the data assert_equal(len(train) + len(test), len(groups_i)) # Third test: # The number of groups in test must be equal to p_groups_out assert_true(np.unique(groups_arr[test]).shape[0], p_groups_out) # check get_n_splits() with dummy parameters assert_equal(logo.get_n_splits(None, None, ['a', 'b', 'c', 'b', 'c']), 3) assert_equal(logo.get_n_splits(groups=[1.0, 1.1, 1.0, 1.2]), 3) assert_equal(lpgo_2.get_n_splits(None, None, np.arange(4)), 6) assert_equal(lpgo_1.get_n_splits(groups=np.arange(4)), 4) # raise ValueError if a `groups` parameter is illegal with assert_raises(ValueError): logo.get_n_splits(None, None, [0.0, np.nan, 0.0]) with assert_raises(ValueError): lpgo_2.get_n_splits(None, None, [0.0, np.inf, 0.0]) msg = "The 'groups' parameter should not be None." assert_raise_message(ValueError, msg, logo.get_n_splits, None, None, None) assert_raise_message(ValueError, msg, lpgo_1.get_n_splits, None, None, None) def test_leave_group_out_changing_groups(): # Check that LeaveOneGroupOut and LeavePGroupsOut work normally if # the groups variable is changed before calling split groups = np.array([0, 1, 2, 1, 1, 2, 0, 0]) X = np.ones(len(groups)) groups_changing = np.array(groups, copy=True) lolo = LeaveOneGroupOut().split(X, groups=groups) lolo_changing = LeaveOneGroupOut().split(X, groups=groups) lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups) lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups) groups_changing[:] = 0 for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]: for (train, test), (train_chan, test_chan) in zip(llo, llo_changing): assert_array_equal(train, train_chan) assert_array_equal(test, test_chan) # n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3 assert_equal( 3, LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X, groups=groups)) # n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups) assert_equal(3, LeaveOneGroupOut().get_n_splits(X, y=X, groups=groups)) def test_leave_one_p_group_out_error_on_fewer_number_of_groups(): X = y = groups = np.ones(0) assert_raise_message(ValueError, "Found array with 0 sample(s)", next, LeaveOneGroupOut().split(X, y, groups)) X = y = groups = np.ones(1) msg = ("The groups parameter contains fewer than 2 unique groups ({}). " "LeaveOneGroupOut expects at least 2.").format(groups) assert_raise_message(ValueError, msg, next, LeaveOneGroupOut().split(X, y, groups)) X = y = groups = np.ones(1) msg = ("The groups parameter contains fewer than (or equal to) n_groups " "(3) numbers of unique groups ({}). LeavePGroupsOut expects " "that at least n_groups + 1 (4) unique groups " "be present").format(groups) assert_raise_message(ValueError, msg, next, LeavePGroupsOut(n_groups=3).split(X, y, groups)) X = y = groups = np.arange(3) msg = ("The groups parameter contains fewer than (or equal to) n_groups " "(3) numbers of unique groups ({}). LeavePGroupsOut expects " "that at least n_groups + 1 (4) unique groups " "be present").format(groups) assert_raise_message(ValueError, msg, next, LeavePGroupsOut(n_groups=3).split(X, y, groups)) @ignore_warnings def test_repeated_cv_value_errors(): # n_repeats is not integer or <= 0 for cv in (RepeatedKFold, RepeatedStratifiedKFold): assert_raises(ValueError, cv, n_repeats=0) assert_raises(ValueError, cv, n_repeats=1.5) def test_repeated_kfold_determinstic_split(): X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] random_state = 258173307 rkf = RepeatedKFold( n_splits=2, n_repeats=2, random_state=random_state) # split should produce same and deterministic splits on # each call for _ in range(3): splits = rkf.split(X) train, test = next(splits) assert_array_equal(train, [2, 4]) assert_array_equal(test, [0, 1, 3]) train, test = next(splits) assert_array_equal(train, [0, 1, 3]) assert_array_equal(test, [2, 4]) train, test = next(splits) assert_array_equal(train, [0, 1]) assert_array_equal(test, [2, 3, 4]) train, test = next(splits) assert_array_equal(train, [2, 3, 4]) assert_array_equal(test, [0, 1]) assert_raises(StopIteration, next, splits) def test_get_n_splits_for_repeated_kfold(): n_splits = 3 n_repeats = 4 rkf = RepeatedKFold(n_splits, n_repeats) expected_n_splits = n_splits * n_repeats assert_equal(expected_n_splits, rkf.get_n_splits()) def test_get_n_splits_for_repeated_stratified_kfold(): n_splits = 3 n_repeats = 4 rskf = RepeatedStratifiedKFold(n_splits, n_repeats) expected_n_splits = n_splits * n_repeats assert_equal(expected_n_splits, rskf.get_n_splits()) def test_repeated_stratified_kfold_determinstic_split(): X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] y = [1, 1, 1, 0, 0] random_state = 1944695409 rskf = RepeatedStratifiedKFold( n_splits=2, n_repeats=2, random_state=random_state) # split should produce same and deterministic splits on # each call for _ in range(3): splits = rskf.split(X, y) train, test = next(splits) assert_array_equal(train, [1, 4]) assert_array_equal(test, [0, 2, 3]) train, test = next(splits) assert_array_equal(train, [0, 2, 3]) assert_array_equal(test, [1, 4]) train, test = next(splits) assert_array_equal(train, [2, 3]) assert_array_equal(test, [0, 1, 4]) train, test = next(splits) assert_array_equal(train, [0, 1, 4]) assert_array_equal(test, [2, 3]) assert_raises(StopIteration, next, splits) def test_train_test_split_errors(): assert_raises(ValueError, train_test_split) with warnings.catch_warnings(): # JvR: Currently, a future warning is raised if test_size is not # given. As that is the point of this test, ignore the future warning warnings.filterwarnings("ignore", category=FutureWarning) assert_raises(ValueError, train_test_split, range(3), train_size=1.1) assert_raises(ValueError, train_test_split, range(3), test_size=0.6, train_size=0.6) assert_raises(ValueError, train_test_split, range(3), test_size=np.float32(0.6), train_size=np.float32(0.6)) assert_raises(ValueError, train_test_split, range(3), test_size="wrong_type") assert_raises(ValueError, train_test_split, range(3), test_size=2, train_size=4) assert_raises(TypeError, train_test_split, range(3), some_argument=1.1) assert_raises(ValueError, train_test_split, range(3), range(42)) assert_raises(ValueError, train_test_split, range(10), shuffle=False, stratify=True) def test_train_test_split(): X = np.arange(100).reshape((10, 10)) X_s = coo_matrix(X) y = np.arange(10) # simple test split = train_test_split(X, y, test_size=None, train_size=.5) X_train, X_test, y_train, y_test = split assert_equal(len(y_test), len(y_train)) # test correspondence of X and y assert_array_equal(X_train[:, 0], y_train * 10) assert_array_equal(X_test[:, 0], y_test * 10) # don't convert lists to anything else by default split = train_test_split(X, X_s, y.tolist()) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_true(isinstance(y_train, list)) assert_true(isinstance(y_test, list)) # allow nd-arrays X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) split = train_test_split(X_4d, y_3d) assert_equal(split[0].shape, (7, 5, 3, 2)) assert_equal(split[1].shape, (3, 5, 3, 2)) assert_equal(split[2].shape, (7, 7, 11)) assert_equal(split[3].shape, (3, 7, 11)) # test stratification option y = np.array([1, 1, 1, 1, 2, 2, 2, 2]) for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6]): train, test = train_test_split(y, test_size=test_size, stratify=y, random_state=0) assert_equal(len(test), exp_test_size) assert_equal(len(test) + len(train), len(y)) # check the 1:1 ratio of ones and twos in the data is preserved assert_equal(np.sum(train == 1), np.sum(train == 2)) # test unshuffled split y = np.arange(10) for test_size in [2, 0.2]: train, test = train_test_split(y, shuffle=False, test_size=test_size) assert_array_equal(test, [8, 9]) assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6, 7]) @ignore_warnings def train_test_split_pandas(): # check train_test_split doesn't destroy pandas dataframe types = [MockDataFrame] try: from pandas import DataFrame types.append(DataFrame) except ImportError: pass for InputFeatureType in types: # X dataframe X_df = InputFeatureType(X) X_train, X_test = train_test_split(X_df) assert_true(isinstance(X_train, InputFeatureType)) assert_true(isinstance(X_test, InputFeatureType)) def train_test_split_sparse(): # check that train_test_split converts scipy sparse matrices # to csr, as stated in the documentation X = np.arange(100).reshape((10, 10)) sparse_types = [csr_matrix, csc_matrix, coo_matrix] for InputFeatureType in sparse_types: X_s = InputFeatureType(X) X_train, X_test = train_test_split(X_s) assert_true(isinstance(X_train, csr_matrix)) assert_true(isinstance(X_test, csr_matrix)) def train_test_split_mock_pandas(): # X mock dataframe X_df = MockDataFrame(X) X_train, X_test = train_test_split(X_df) assert_true(isinstance(X_train, MockDataFrame)) assert_true(isinstance(X_test, MockDataFrame)) X_train_arr, X_test_arr = train_test_split(X_df) def train_test_split_list_input(): # Check that when y is a list / list of string labels, it works. X = np.ones(7) y1 = ['1'] * 4 + ['0'] * 3 y2 = np.hstack((np.ones(4), np.zeros(3))) y3 = y2.tolist() for stratify in (True, False): X_train1, X_test1, y_train1, y_test1 = train_test_split( X, y1, stratify=y1 if stratify else None, random_state=0) X_train2, X_test2, y_train2, y_test2 = train_test_split( X, y2, stratify=y2 if stratify else None, random_state=0) X_train3, X_test3, y_train3, y_test3 = train_test_split( X, y3, stratify=y3 if stratify else None, random_state=0) np.testing.assert_equal(X_train1, X_train2) np.testing.assert_equal(y_train2, y_train3) np.testing.assert_equal(X_test1, X_test3) np.testing.assert_equal(y_test3, y_test2) @ignore_warnings def test_shufflesplit_errors(): # When the {test|train}_size is a float/invalid, error is raised at init assert_raises(ValueError, ShuffleSplit, test_size=None, train_size=None) assert_raises(ValueError, ShuffleSplit, test_size=2.0) assert_raises(ValueError, ShuffleSplit, test_size=1.0) assert_raises(ValueError, ShuffleSplit, test_size=0.1, train_size=0.95) assert_raises(ValueError, ShuffleSplit, train_size=1j) # When the {test|train}_size is an int, validation is based on the input X # and happens at split(...) assert_raises(ValueError, next, ShuffleSplit(test_size=11).split(X)) assert_raises(ValueError, next, ShuffleSplit(test_size=10).split(X)) assert_raises(ValueError, next, ShuffleSplit(test_size=8, train_size=3).split(X)) def test_shufflesplit_reproducible(): # Check that iterating twice on the ShuffleSplit gives the same # sequence of train-test when the random_state is given ss = ShuffleSplit(random_state=21) assert_array_equal(list(a for a, b in ss.split(X)), list(a for a, b in ss.split(X))) def test_stratifiedshufflesplit_list_input(): # Check that when y is a list / list of string labels, it works. sss = StratifiedShuffleSplit(test_size=2, random_state=42) X = np.ones(7) y1 = ['1'] * 4 + ['0'] * 3 y2 = np.hstack((np.ones(4), np.zeros(3))) y3 = y2.tolist() np.testing.assert_equal(list(sss.split(X, y1)), list(sss.split(X, y2))) np.testing.assert_equal(list(sss.split(X, y3)), list(sss.split(X, y2))) def test_train_test_split_allow_nans(): # Check that train_test_split allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) train_test_split(X, y, test_size=0.2, random_state=42) def test_check_cv(): X = np.ones(9) cv = check_cv(3, classifier=False) # Use numpy.testing.assert_equal which recursively compares # lists of lists np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1]) cv = check_cv(3, y_binary, classifier=True) np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_binary)), list(cv.split(X, y_binary))) y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])<|fim▁hole|> cv = check_cv(3, y_multiclass, classifier=True) np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass)), list(cv.split(X, y_multiclass))) # also works with 2d multiclass y_multiclass_2d = y_multiclass.reshape(-1, 1) cv = check_cv(3, y_multiclass_2d, classifier=True) np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass_2d)), list(cv.split(X, y_multiclass_2d))) assert_false(np.all( next(StratifiedKFold(3).split(X, y_multiclass_2d))[0] == next(KFold(3).split(X, y_multiclass_2d))[0])) X = np.ones(5) y_multilabel = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 1], [0, 0, 1, 0]]) cv = check_cv(3, y_multilabel, classifier=True) np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]]) cv = check_cv(3, y_multioutput, classifier=True) np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) assert_raises(ValueError, check_cv, cv="lolo") def test_cv_iterable_wrapper(): kf_iter = KFold(n_splits=5).split(X, y) kf_iter_wrapped = check_cv(kf_iter) # Since the wrapped iterable is enlisted and stored, # split can be called any number of times to produce # consistent results. np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)), list(kf_iter_wrapped.split(X, y))) # If the splits are randomized, successive calls to split yields different # results kf_randomized_iter = KFold(n_splits=5, shuffle=True).split(X, y) kf_randomized_iter_wrapped = check_cv(kf_randomized_iter) # numpy's assert_array_equal properly compares nested lists np.testing.assert_equal(list(kf_randomized_iter_wrapped.split(X, y)), list(kf_randomized_iter_wrapped.split(X, y))) try: np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)), list(kf_randomized_iter_wrapped.split(X, y))) splits_are_equal = True except AssertionError: splits_are_equal = False assert_false(splits_are_equal, "If the splits are randomized, " "successive calls to split should yield different results") def test_group_kfold(): rng = np.random.RandomState(0) # Parameters of the test n_groups = 15 n_samples = 1000 n_splits = 5 X = y = np.ones(n_samples) # Construct the test data tolerance = 0.05 * n_samples # 5 percent error allowed groups = rng.randint(0, n_groups, n_samples) ideal_n_groups_per_fold = n_samples // n_splits len(np.unique(groups)) # Get the test fold indices from the test set indices of each fold folds = np.zeros(n_samples) lkf = GroupKFold(n_splits=n_splits) for i, (_, test) in enumerate(lkf.split(X, y, groups)): folds[test] = i # Check that folds have approximately the same size assert_equal(len(folds), len(groups)) for i in np.unique(folds): assert_greater_equal(tolerance, abs(sum(folds == i) - ideal_n_groups_per_fold)) # Check that each group appears only in 1 fold for group in np.unique(groups): assert_equal(len(np.unique(folds[groups == group])), 1) # Check that no group is on both sides of the split groups = np.asarray(groups, dtype=object) for train, test in lkf.split(X, y, groups): assert_equal(len(np.intersect1d(groups[train], groups[test])), 0) # Construct the test data groups = np.array(['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean', 'Francis', 'Robert', 'Michel', 'Rachel', 'Lois', 'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean', 'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix', 'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky', 'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']) n_groups = len(np.unique(groups)) n_samples = len(groups) n_splits = 5 tolerance = 0.05 * n_samples # 5 percent error allowed ideal_n_groups_per_fold = n_samples // n_splits X = y = np.ones(n_samples) # Get the test fold indices from the test set indices of each fold folds = np.zeros(n_samples) for i, (_, test) in enumerate(lkf.split(X, y, groups)): folds[test] = i # Check that folds have approximately the same size assert_equal(len(folds), len(groups)) for i in np.unique(folds): assert_greater_equal(tolerance, abs(sum(folds == i) - ideal_n_groups_per_fold)) # Check that each group appears only in 1 fold with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) for group in np.unique(groups): assert_equal(len(np.unique(folds[groups == group])), 1) # Check that no group is on both sides of the split groups = np.asarray(groups, dtype=object) for train, test in lkf.split(X, y, groups): assert_equal(len(np.intersect1d(groups[train], groups[test])), 0) # groups can also be a list cv_iter = list(lkf.split(X, y, groups.tolist())) for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups), cv_iter): assert_array_equal(train1, train2) assert_array_equal(test1, test2) # Should fail if there are more folds than groups groups = np.array([1, 1, 1, 2, 2]) X = y = np.ones(len(groups)) assert_raises_regexp(ValueError, "Cannot have number of splits.*greater", next, GroupKFold(n_splits=3).split(X, y, groups)) def test_time_series_cv(): X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]] # Should fail if there are more folds than samples assert_raises_regexp(ValueError, "Cannot have number of folds.*greater", next, TimeSeriesSplit(n_splits=7).split(X)) tscv = TimeSeriesSplit(2) # Manually check that Time Series CV preserves the data # ordering on toy datasets splits = tscv.split(X[:-1]) train, test = next(splits) assert_array_equal(train, [0, 1]) assert_array_equal(test, [2, 3]) train, test = next(splits) assert_array_equal(train, [0, 1, 2, 3]) assert_array_equal(test, [4, 5]) splits = TimeSeriesSplit(2).split(X) train, test = next(splits) assert_array_equal(train, [0, 1, 2]) assert_array_equal(test, [3, 4]) train, test = next(splits) assert_array_equal(train, [0, 1, 2, 3, 4]) assert_array_equal(test, [5, 6]) # Check get_n_splits returns the correct number of splits splits = TimeSeriesSplit(2).split(X) n_splits_actual = len(list(splits)) assert_equal(n_splits_actual, tscv.get_n_splits()) assert_equal(n_splits_actual, 2) def _check_time_series_max_train_size(splits, check_splits, max_train_size): for (train, test), (check_train, check_test) in zip(splits, check_splits): assert_array_equal(test, check_test) assert_true(len(check_train) <= max_train_size) suffix_start = max(len(train) - max_train_size, 0) assert_array_equal(check_train, train[suffix_start:]) def test_time_series_max_train_size(): X = np.zeros((6, 1)) splits = TimeSeriesSplit(n_splits=3).split(X) check_splits = TimeSeriesSplit(n_splits=3, max_train_size=3).split(X) _check_time_series_max_train_size(splits, check_splits, max_train_size=3) # Test for the case where the size of a fold is greater than max_train_size check_splits = TimeSeriesSplit(n_splits=3, max_train_size=2).split(X) _check_time_series_max_train_size(splits, check_splits, max_train_size=2) # Test for the case where the size of each fold is less than max_train_size check_splits = TimeSeriesSplit(n_splits=3, max_train_size=5).split(X) _check_time_series_max_train_size(splits, check_splits, max_train_size=2) @pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22 def test_nested_cv(): # Test if nested cross validation works with different combinations of cv rng = np.random.RandomState(0) X, y = make_classification(n_samples=15, n_classes=2, random_state=0) groups = rng.randint(0, 5, 15) cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(), StratifiedKFold(), StratifiedShuffleSplit(n_splits=3, random_state=0)] for inner_cv, outer_cv in combinations_with_replacement(cvs, 2): gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]}, cv=inner_cv, error_score='raise', iid=False) cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv, fit_params={'groups': groups}) def test_train_test_default_warning(): assert_warns(FutureWarning, ShuffleSplit, train_size=0.75) assert_warns(FutureWarning, GroupShuffleSplit, train_size=0.75) assert_warns(FutureWarning, StratifiedShuffleSplit, train_size=0.75) assert_warns(FutureWarning, train_test_split, range(3), train_size=0.75) def test_nsplit_default_warn(): # Test that warnings are raised. Will be removed in 0.22 assert_warns_message(FutureWarning, NSPLIT_WARNING, KFold) assert_warns_message(FutureWarning, NSPLIT_WARNING, GroupKFold) assert_warns_message(FutureWarning, NSPLIT_WARNING, StratifiedKFold) assert_warns_message(FutureWarning, NSPLIT_WARNING, TimeSeriesSplit) assert_no_warnings(KFold, n_splits=5) assert_no_warnings(GroupKFold, n_splits=5) assert_no_warnings(StratifiedKFold, n_splits=5) assert_no_warnings(TimeSeriesSplit, n_splits=5) def test_check_cv_default_warn(): # Test that warnings are raised. Will be removed in 0.22 assert_warns_message(FutureWarning, CV_WARNING, check_cv) assert_warns_message(FutureWarning, CV_WARNING, check_cv, None) assert_no_warnings(check_cv, cv=5) def test_build_repr(): class MockSplitter: def __init__(self, a, b=0, c=None): self.a = a self.b = b self.c = c def __repr__(self): return _build_repr(self) assert_equal(repr(MockSplitter(5, 6)), "MockSplitter(a=5, b=6, c=None)")<|fim▁end|>
<|file_name|>show.js<|end_file_name|><|fim▁begin|>import PlanRequiredRoute from "../plan-required"; import Notify from 'ember-notify'; export default PlanRequiredRoute.extend({ model: function(params) { var _this = this; return this.store.find('entry', params.entry_id).then(function(entry) {<|fim▁hole|> if (meta.current_entry !== entry.get('entryDate')) { return entry.reload(); } else { return entry; } }, function(data) { if (data.status === 404) { // Set the meta data var meta = data.responseJSON.meta; _this.store.metaForType("entry", meta); // Build the dummy record, for use in the new form var entry = _this.store.createRecord('entry', { entryDate: params.entry_id }); return entry; } else { Notify.error(data.responseText, {closeAfter: 5000}); } }); }, setupController: function(controller, model) { this._super(controller, model); var meta = this.store.metadataFor("entry"); controller.setProperties({ nextEntry: meta.next_entry, randomEntry: meta.random_entry, prevEntry: meta.prev_entry, entryDatePretty: moment(model.get('entryDate')).format("MMMM Do, YYYY") }); } });<|fim▁end|>
// Force a reload if the meta data is out of date var meta = _this.store.metadataFor("entry");
<|file_name|>Player.java<|end_file_name|><|fim▁begin|>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package grupp07.model; import java.util.ArrayList; import javafx.scene.paint.Color; import javafx.util.Pair; /** * * @author gast */ public abstract class Player { private String name; private Color playerColor; /** This function should return the wished next move for player. * * @return a tuple of the wished move containing two Integers for player. */ public abstract Pair nextMove(); /** This function may use the supplied list of possible moves for * the actual game. The player returns a wished move, that may be one of * the supplied ones. * * @param coords is a list of possible moves for the current player to * choose from. This could guide a computer player for example. * @return */ public abstract Pair nextMove(ArrayList<Pair<Integer, Integer>> coords); /** This function gets the players color. <|fim▁hole|> * * @return the player's color. */ public Color getColor(){ return playerColor; } /** This function sets the players color. * * @param color the player to use color. */ public void setColor(Color color){ playerColor = color; } /** This function gets the name of the player. * * @return the name of the player */ public String getName(){ return name; } /** This function sets the players name * * @param name the player's name. */ public void setName(String name){ this.name = name; } }<|fim▁end|>
<|file_name|>column.mako.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ <%namespace name="helpers" file="/helpers.mako.rs" /> <%helpers:shorthand name="columns" sub_properties="column-count column-width" experimental="True" extra_prefixes="moz" spec="https://drafts.csswg.org/css-multicol/#propdef-columns"> use properties::longhands::{column_count, column_width}; pub fn parse_value<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Longhands, ParseError<'i>> { let mut column_count = None; let mut column_width = None; let mut autos = 0; loop { if input.try(|input| input.expect_ident_matching("auto")).is_ok() { // Leave the options to None, 'auto' is the initial value. autos += 1; continue } if column_count.is_none() { if let Ok(value) = input.try(|input| column_count::parse(context, input)) { column_count = Some(value); continue } } if column_width.is_none() { if let Ok(value) = input.try(|input| column_width::parse(context, input)) { column_width = Some(value); continue } } break } let values = autos + column_count.iter().len() + column_width.iter().len(); if values == 0 || values > 2 { Err(StyleParseError::UnspecifiedError.into()) } else { Ok(expanded! { column_count: unwrap_or_initial!(column_count), column_width: unwrap_or_initial!(column_width), }) } } impl<'a> ToCss for LonghandsToSerialize<'a> { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { try!(self.column_width.to_css(dest)); try!(write!(dest, " ")); self.column_count.to_css(dest) } } </%helpers:shorthand> <%helpers:shorthand name="column-rule" products="gecko" extra_prefixes="moz" sub_properties="column-rule-width column-rule-style column-rule-color" spec="https://drafts.csswg.org/css-multicol/#propdef-column-rule"> use properties::longhands::{column_rule_width, column_rule_style}; use properties::longhands::column_rule_color; pub fn parse_value<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Longhands, ParseError<'i>> { % for name in "width style color".split(): let mut column_rule_${name} = None; % endfor let mut any = false; loop { % for name in "width style color".split(): if column_rule_${name}.is_none() { if let Ok(value) = input.try(|input| column_rule_${name}::parse(context, input)) { column_rule_${name} = Some(value); any = true; continue }<|fim▁hole|> } % endfor break } if any { Ok(expanded! { column_rule_width: unwrap_or_initial!(column_rule_width), column_rule_style: unwrap_or_initial!(column_rule_style), column_rule_color: unwrap_or_initial!(column_rule_color), }) } else { Err(StyleParseError::UnspecifiedError.into()) } } impl<'a> ToCss for LonghandsToSerialize<'a> { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { self.column_rule_width.to_css(dest)?; dest.write_str(" ")?; self.column_rule_style.to_css(dest)?; dest.write_str(" ")?; self.column_rule_color.to_css(dest) } } </%helpers:shorthand><|fim▁end|>
<|file_name|>EditablePanel.java<|end_file_name|><|fim▁begin|>/** * Licensed to the Austrian Association for Software Tool Integration (AASTI) * under one or more contributor license agreements. See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. The AASTI licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.openengsb.ui.admin.tree.editablePanel; import org.apache.wicket.ajax.AjaxRequestTarget; import org.apache.wicket.ajax.form.AjaxFormComponentUpdatingBehavior; import org.apache.wicket.markup.html.form.TextField; import org.apache.wicket.markup.html.panel.Panel; import org.apache.wicket.model.IModel; @SuppressWarnings("serial")<|fim▁hole|> public EditablePanel(String id, IModel<String> inputModel) { super(id); TextField<String> field = new TextField<String>("textfield", inputModel); add(field); field.add(new AjaxFormComponentUpdatingBehavior("onblur") { @Override protected void onUpdate(AjaxRequestTarget target) { } }); } }<|fim▁end|>
public class EditablePanel extends Panel {
<|file_name|>stringUtils.spec.ts<|end_file_name|><|fim▁begin|>/** * @copyright 2009-2019 Vanilla Forums Inc. * @license GPL-2.0-only */ import { hashString, splitStringLoosely, matchAtMention, labelize } from "./stringUtils"; describe("hashString()", () => { it("the same string always results in the same value", () => { const str = "a; lksdjfl;aska;lskd fjaskl;dfj al;skdjfalsjkdfa;lksdjfl;kasdjflksaf;kbfjal;skdfbjanv;slkdfjbals;dkjfslkadfj;alsdjf;oiawjef;oiawbejvf;ioawbevf;aoiwebfjaov;wifebvl"; expect(hashString(str)).toEqual(hashString(str)); }); it("different strings hash to different values", () => { const str1 = "a;slkdfjl;askdjfkl;asdjfkl;asjdfl;"; const str2 = "a;sldkfjal;skdfjl;kasjdfl;k;laksjdf;laksjdf;laksjdf;lkajsd;lkfjaskl;dfjals;kdfjnal;skdjbfl;kasbdjfv;laskjbdfal;skdjfalv;skdjfalskdbjnfav;bslkdfjnalv;ksdfjbalskdfbjalvsk.dfjbalsv;kdbfjalsv;kdfjbadklsfjals"; expect(hashString(str1)).not.toEqual(hashString(str2)); }); }); type ParamsResultTuple = [string, string, string[]]; describe("splitStringLoosely()", () => { const paramsAndResults: ParamsResultTuple[] = [ ["Test", "te", ["", "Te", "st"]], ["Stéphane", "Stéph", ["", "Stéph", "ane"]], ["Stéphane", "Stëph", ["", "Stéph", "ane"]], ["Stéphane", "St", ["", "St", "éphane"]], ["TestTest", "Te", ["", "Te", "st", "Te", "st"]], ["Tæst", "T", ["", "T", "æs", "t", ""]], ["Tæst", "Tæ", ["", "Tæ", "st"]], ["Tææst", "Tææ", ["", "Tææ", "st"]], ]; paramsAndResults.forEach(([fullString, subString, result], index) => { it(`Case ${index}`, () => { expect(splitStringLoosely(fullString, subString)).toEqual(result); }); }); }); function testSubjectsAndMatches(subjectsAndMatches: object) { Object.entries(subjectsAndMatches).map(([subject, match]) => { it(subject, () => { const result = matchAtMention(subject, true); if (result === null) { expect(result).toEqual(match); } else { expect(result.match).toEqual(match); } }); }); }<|fim▁hole|>describe("matching @mentions", () => { describe("simple mentions", () => { const goodSubjects = { "@System": "System", "Sometext @System": "System", "asdfasdf @joe": "joe", }; testSubjectsAndMatches(goodSubjects); }); describe("special characters", () => { const goodSubjects = { [`@"Séche"`]: "Séche", [`Something @"Séche"`]: "Séche", [`@"Umuüûū"`]: "Umuüûū", [`@Séche`]: "Séche", // Unquoted accent character [`@Umuüûū"`]: 'Umuüûū"', }; testSubjectsAndMatches(goodSubjects); }); describe("names with spaces", () => { const goodSubjects = { [`@"Someon asdf `]: "Someon asdf ", [`@"someone with a closed space"`]: "someone with a closed space", [`@"What about multiple spaces? `]: "What about multiple spaces? ", }; const badSubjects = { "@someone with non-wrapped spaces": null, "@Some ": null, }; testSubjectsAndMatches(goodSubjects); testSubjectsAndMatches(badSubjects); }); describe("Closing characters", () => { const goodSubjects = { [`@Other Mention at end after linebreak @System`]: "System", [` Newline with special char @"Umuüûū"`]: "Umuüûū", }; const badSubjects = { [`@"Close on quote" other thing`]: null, }; testSubjectsAndMatches(goodSubjects); testSubjectsAndMatches(badSubjects); }); }); describe("labelize()", () => { const tests = [ ["fooBar", "Foo Bar"], ["foo bar", "Foo Bar"], ["fooID", "Foo ID"], ["fooURL", "Foo URL"], ["foo_bar", "Foo Bar"], ["foo-bar", "Foo Bar"], ["foo-bar-baz", "Foo Bar Baz"], ["foo bar baz", "Foo Bar Baz"], [ `foo bar baz`, "Foo Bar Baz", ], ]; tests.forEach(([str, expected]) => { it("str", () => { expect(labelize(str)).toBe(expected); }); }); });<|fim▁end|>
<|file_name|>unwind-unique.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // error-pattern:fail #![allow(unknown_features)] #![feature(box_syntax)] fn failfn() { panic!(); }<|fim▁hole|> fn main() { box 0; failfn(); }<|fim▁end|>
<|file_name|>apps.py<|end_file_name|><|fim▁begin|><|fim▁hole|>class AutodoappConfig(AppConfig): name = 'AutoDoApp'<|fim▁end|>
from django.apps import AppConfig
<|file_name|>algorithm.py<|end_file_name|><|fim▁begin|>import hashlib def hash_list(): return str(hashlib.algorithms_guaranteed) def hash_text(algorithm_array, text, pass_count): result_dict = {} # Type checking if type(pass_count) is not int: return [False, {"error": "Pass count should be of 'integer' type."}] elif type(text) is not str:<|fim▁hole|> # Bounds checking avail_alg_set = set(algorithm_array) & set(hashlib.algorithms_guaranteed) if pass_count > 1000000 or pass_count <= 0: return [False, {"error": "Pass count should be larger than 0 and smaller than 1000000."}] elif len(avail_alg_set) == 0: return [False, {"error": "None of these hash algorithms are available."}] # There is no error case; do the hash computations for every function for function in avail_alg_set: hash_val = text for _ in range(pass_count): hash_val = getattr(hashlib, function)(hash_val.encode()).hexdigest() result_dict[function] = hash_val return [True, result_dict]<|fim▁end|>
return [False, {"error": "Text should be of 'string' type."}] elif type(algorithm_array) is not list: return [False, {"error": "Algorithm list should be of 'list' type."}]
<|file_name|>idna.go<|end_file_name|><|fim▁begin|>// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:generate go run gen.go gen_trieval.go gen_common.go // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to // deal with the transition from IDNA2003. // // IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC // 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. // UTS #46 is defined in http://www.unicode.org/reports/tr46. // See http://unicode.org/cldr/utility/idna.jsp for a visualization of the // differences between these two standards. package idna // import "golang.org/x/text/internal/export/idna" import ( "fmt" "strings" "unicode/utf8" "golang.org/x/text/secure/bidirule" "golang.org/x/text/unicode/bidi" "golang.org/x/text/unicode/norm" ) // NOTE: Unlike common practice in Go APIs, the functions will return a // sanitized domain name in case of errors. Browsers sometimes use a partially // evaluated string as lookup. // TODO: the current error handling is, in my opinion, the least opinionated. // Other strategies are also viable, though: // Option 1) Return an empty string in case of error, but allow the user to // specify explicitly which errors to ignore. // Option 2) Return the partially evaluated string if it is itself a valid // string, otherwise return the empty string in case of error. // Option 3) Option 1 and 2. // Option 4) Always return an empty string for now and implement Option 1 as // needed, and document that the return string may not be empty in case of // error in the future. // I think Option 1 is best, but it is quite opinionated. // ToASCII is a wrapper for Punycode.ToASCII. func ToASCII(s string) (string, error) { return Punycode.process(s, true) } // ToUnicode is a wrapper for Punycode.ToUnicode. func ToUnicode(s string) (string, error) { return Punycode.process(s, false) } // An Option configures a Profile at creation time. type Option func(*options) // Transitional sets a Profile to use the Transitional mapping as defined in UTS // #46. This will cause, for example, "ß" to be mapped to "ss". Using the // transitional mapping provides a compromise between IDNA2003 and IDNA2008 // compatibility. It is used by most browsers when resolving domain names. This // option is only meaningful if combined with MapForLookup. func Transitional(transitional bool) Option { return func(o *options) { o.transitional = true } } // VerifyDNSLength sets whether a Profile should fail if any of the IDN parts // are longer than allowed by the RFC. func VerifyDNSLength(verify bool) Option { return func(o *options) { o.verifyDNSLength = verify } } // RemoveLeadingDots removes leading label separators. Leading runes that map to // dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. // // This is the behavior suggested by the UTS #46 and is adopted by some // browsers. func RemoveLeadingDots(remove bool) Option { return func(o *options) { o.removeLeadingDots = remove } } // ValidateLabels sets whether to check the mandatory label validation criteria // as defined in Section 5.4 of RFC 5891. This includes testing for correct use // of hyphens ('-'), normalization, validity of runes, and the context rules. func ValidateLabels(enable bool) Option { return func(o *options) { // Don't override existing mappings, but set one that at least checks // normalization if it is not set. if o.mapping == nil && enable { o.mapping = normalize } o.trie = trie o.validateLabels = enable o.fromPuny = validateFromPunycode } } // StrictDomainName limits the set of permissible ASCII characters to those // allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the // hyphen). This is set by default for MapForLookup and ValidateForRegistration. // // This option is useful, for instance, for browsers that allow characters // outside this range, for example a '_' (U+005F LOW LINE). See // http://www.rfc-editor.org/std/std3.txt for more details This option // corresponds to the UseSTD3ASCIIRules option in UTS #46. func StrictDomainName(use bool) Option { return func(o *options) { o.trie = trie o.useSTD3Rules = use o.fromPuny = validateFromPunycode } } // NOTE: the following options pull in tables. The tables should not be linked // in as long as the options are not used. // BidiRule enables the Bidi rule as defined in RFC 5893. Any application // that relies on proper validation of labels should include this rule. func BidiRule() Option { return func(o *options) { o.bidirule = bidirule.ValidString } } // ValidateForRegistration sets validation options to verify that a given IDN is // properly formatted for registration as defined by Section 4 of RFC 5891. func ValidateForRegistration() Option { return func(o *options) { o.mapping = validateRegistration StrictDomainName(true)(o) ValidateLabels(true)(o) VerifyDNSLength(true)(o) BidiRule()(o) } } // MapForLookup sets validation and mapping options such that a given IDN is // transformed for domain name lookup according to the requirements set out in // Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, // RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option // to add this check. // // The mappings include normalization and mapping case, width and other // compatibility mappings. func MapForLookup() Option { return func(o *options) { o.mapping = validateAndMap StrictDomainName(true)(o) ValidateLabels(true)(o) } } type options struct { transitional bool useSTD3Rules bool validateLabels bool verifyDNSLength bool removeLeadingDots bool trie *idnaTrie // fromPuny calls validation rules when converting A-labels to U-labels. fromPuny func(p *Profile, s string) error // mapping implements a validation and mapping step as defined in RFC 5895 // or UTS 46, tailored to, for example, domain registration or lookup. mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) // bidirule, if specified, checks whether s conforms to the Bidi Rule // defined in RFC 5893. bidirule func(s string) bool } // A Profile defines the configuration of an IDNA mapper. type Profile struct { options } func apply(o *options, opts []Option) { for _, f := range opts { f(o) } } // New creates a new Profile. // // With no options, the returned Profile is the most permissive and equals the // Punycode Profile. Options can be passed to further restrict the Profile. The // MapForLookup and ValidateForRegistration options set a collection of options, // for lookup and registration purposes respectively, which can be tailored by // adding more fine-grained options, where later options override earlier // options. func New(o ...Option) *Profile { p := &Profile{} apply(&p.options, o) return p } // ToASCII converts a domain or domain label to its ASCII form. For example, // ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and // ToASCII("golang") is "golang". If an error is encountered it will return // an error and a (partially) processed result. func (p *Profile) ToASCII(s string) (string, error) { return p.process(s, true) } // ToUnicode converts a domain or domain label to its Unicode form. For example, // ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and // ToUnicode("golang") is "golang". If an error is encountered it will return // an error and a (partially) processed result. func (p *Profile) ToUnicode(s string) (string, error) { pp := *p pp.transitional = false return pp.process(s, false) } // String reports a string with a description of the profile for debugging // purposes. The string format may change with different versions. func (p *Profile) String() string { s := "" if p.transitional { s = "Transitional" } else { s = "NonTransitional" } if p.useSTD3Rules { s += ":UseSTD3Rules" } if p.validateLabels { s += ":ValidateLabels" } if p.verifyDNSLength { s += ":VerifyDNSLength" } return s } var ( // Punycode is a Profile that does raw punycode processing with a minimum // of validation. Punycode *Profile = punycode // Lookup is the recommended profile for looking up domain names, according // to Section 5 of RFC 5891. The exact configuration of this profile may // change over time. Lookup *Profile = lookup // Display is the recommended profile for displaying domain names. // The configuration of this profile may change over time. Display *Profile = display // Registration is the recommended profile for checking whether a given // IDN is valid for registration, according to Section 4 of RFC 5891. Registration *Profile = registration punycode = &Profile{} lookup = &Profile{options{ transitional: true, useSTD3Rules: true, validateLabels: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateAndMap, bidirule: bidirule.ValidString, }}<|fim▁hole|> validateLabels: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateAndMap, bidirule: bidirule.ValidString, }} registration = &Profile{options{ useSTD3Rules: true, validateLabels: true, verifyDNSLength: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateRegistration, bidirule: bidirule.ValidString, }} // TODO: profiles // Register: recommended for approving domain names: don't do any mappings // but rather reject on invalid input. Bundle or block deviation characters. ) type labelError struct{ label, code_ string } func (e labelError) code() string { return e.code_ } func (e labelError) Error() string { return fmt.Sprintf("idna: invalid label %q", e.label) } type runeError rune func (e runeError) code() string { return "P1" } func (e runeError) Error() string { return fmt.Sprintf("idna: disallowed rune %U", e) } // process implements the algorithm described in section 4 of UTS #46, // see http://www.unicode.org/reports/tr46. func (p *Profile) process(s string, toASCII bool) (string, error) { var err error var isBidi bool if p.mapping != nil { s, isBidi, err = p.mapping(p, s) } // Remove leading empty labels. if p.removeLeadingDots { for ; len(s) > 0 && s[0] == '.'; s = s[1:] { } } // TODO: allow for a quick check of the tables data. // It seems like we should only create this error on ToASCII, but the // UTS 46 conformance tests suggests we should always check this. if err == nil && p.verifyDNSLength && s == "" { err = &labelError{s, "A4"} } labels := labelIter{orig: s} for ; !labels.done(); labels.next() { label := labels.label() if label == "" { // Empty labels are not okay. The label iterator skips the last // label if it is empty. if err == nil && p.verifyDNSLength { err = &labelError{s, "A4"} } continue } if strings.HasPrefix(label, acePrefix) { u, err2 := decode(label[len(acePrefix):]) if err2 != nil { if err == nil { err = err2 } // Spec says keep the old label. continue } isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight labels.set(u) if err == nil && p.validateLabels { err = p.fromPuny(p, u) } if err == nil { // This should be called on NonTransitional, according to the // spec, but that currently does not have any effect. Use the // original profile to preserve options. err = p.validateLabel(u) } } else if err == nil { err = p.validateLabel(label) } } if isBidi && p.bidirule != nil && err == nil { for labels.reset(); !labels.done(); labels.next() { if !p.bidirule(labels.label()) { err = &labelError{s, "B"} break } } } if toASCII { for labels.reset(); !labels.done(); labels.next() { label := labels.label() if !ascii(label) { a, err2 := encode(acePrefix, label) if err == nil { err = err2 } label = a labels.set(a) } n := len(label) if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { err = &labelError{label, "A4"} } } } s = labels.result() if toASCII && p.verifyDNSLength && err == nil { // Compute the length of the domain name minus the root label and its dot. n := len(s) if n > 0 && s[n-1] == '.' { n-- } if len(s) < 1 || n > 253 { err = &labelError{s, "A4"} } } return s, err } func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { // TODO: consider first doing a quick check to see if any of these checks // need to be done. This will make it slower in the general case, but // faster in the common case. mapped = norm.NFC.String(s) isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft return mapped, isBidi, nil } func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { // TODO: filter need for normalization in loop below. if !norm.NFC.IsNormalString(s) { return s, false, &labelError{s, "V1"} } for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) if sz == 0 { return s, bidi, runeError(utf8.RuneError) } bidi = bidi || info(v).isBidi(s[i:]) // Copy bytes not copied so far. switch p.simplify(info(v).category()) { // TODO: handle the NV8 defined in the Unicode idna data set to allow // for strict conformance to IDNA2008. case valid, deviation: case disallowed, mapped, unknown, ignored: r, _ := utf8.DecodeRuneInString(s[i:]) return s, bidi, runeError(r) } i += sz } return s, bidi, nil } func (c info) isBidi(s string) bool { if !c.isMapped() { return c&attributesMask == rtl } // TODO: also store bidi info for mapped data. This is possible, but a bit // cumbersome and not for the common case. p, _ := bidi.LookupString(s) switch p.Class() { case bidi.R, bidi.AL, bidi.AN: return true } return false } func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { var ( b []byte k int ) // combinedInfoBits contains the or-ed bits of all runes. We use this // to derive the mayNeedNorm bit later. This may trigger normalization // overeagerly, but it will not do so in the common case. The end result // is another 10% saving on BenchmarkProfile for the common case. var combinedInfoBits info for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) if sz == 0 { b = append(b, s[k:i]...) b = append(b, "\ufffd"...) k = len(s) if err == nil { err = runeError(utf8.RuneError) } break } combinedInfoBits |= info(v) bidi = bidi || info(v).isBidi(s[i:]) start := i i += sz // Copy bytes not copied so far. switch p.simplify(info(v).category()) { case valid: continue case disallowed: if err == nil { r, _ := utf8.DecodeRuneInString(s[start:]) err = runeError(r) } continue case mapped, deviation: b = append(b, s[k:start]...) b = info(v).appendMapping(b, s[start:i]) case ignored: b = append(b, s[k:start]...) // drop the rune case unknown: b = append(b, s[k:start]...) b = append(b, "\ufffd"...) } k = i } if k == 0 { // No changes so far. if combinedInfoBits&mayNeedNorm != 0 { s = norm.NFC.String(s) } } else { b = append(b, s[k:]...) if norm.NFC.QuickSpan(b) != len(b) { b = norm.NFC.Bytes(b) } // TODO: the punycode converters require strings as input. s = string(b) } return s, bidi, err } // A labelIter allows iterating over domain name labels. type labelIter struct { orig string slice []string curStart int curEnd int i int } func (l *labelIter) reset() { l.curStart = 0 l.curEnd = 0 l.i = 0 } func (l *labelIter) done() bool { return l.curStart >= len(l.orig) } func (l *labelIter) result() string { if l.slice != nil { return strings.Join(l.slice, ".") } return l.orig } func (l *labelIter) label() string { if l.slice != nil { return l.slice[l.i] } p := strings.IndexByte(l.orig[l.curStart:], '.') l.curEnd = l.curStart + p if p == -1 { l.curEnd = len(l.orig) } return l.orig[l.curStart:l.curEnd] } // next sets the value to the next label. It skips the last label if it is empty. func (l *labelIter) next() { l.i++ if l.slice != nil { if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { l.curStart = len(l.orig) } } else { l.curStart = l.curEnd + 1 if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { l.curStart = len(l.orig) } } } func (l *labelIter) set(s string) { if l.slice == nil { l.slice = strings.Split(l.orig, ".") } l.slice[l.i] = s } // acePrefix is the ASCII Compatible Encoding prefix. const acePrefix = "xn--" func (p *Profile) simplify(cat category) category { switch cat { case disallowedSTD3Mapped: if p.useSTD3Rules { cat = disallowed } else { cat = mapped } case disallowedSTD3Valid: if p.useSTD3Rules { cat = disallowed } else { cat = valid } case deviation: if !p.transitional { cat = valid } case validNV8, validXV8: // TODO: handle V2008 cat = valid } return cat } func validateFromPunycode(p *Profile, s string) error { if !norm.NFC.IsNormalString(s) { return &labelError{s, "V1"} } // TODO: detect whether string may have to be normalized in the following // loop. for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) if sz == 0 { return runeError(utf8.RuneError) } if c := p.simplify(info(v).category()); c != valid && c != deviation { return &labelError{s, "V6"} } i += sz } return nil } const ( zwnj = "\u200c" zwj = "\u200d" ) type joinState int8 const ( stateStart joinState = iota stateVirama stateBefore stateBeforeVirama stateAfter stateFAIL ) var joinStates = [][numJoinTypes]joinState{ stateStart: { joiningL: stateBefore, joiningD: stateBefore, joinZWNJ: stateFAIL, joinZWJ: stateFAIL, joinVirama: stateVirama, }, stateVirama: { joiningL: stateBefore, joiningD: stateBefore, }, stateBefore: { joiningL: stateBefore, joiningD: stateBefore, joiningT: stateBefore, joinZWNJ: stateAfter, joinZWJ: stateFAIL, joinVirama: stateBeforeVirama, }, stateBeforeVirama: { joiningL: stateBefore, joiningD: stateBefore, joiningT: stateBefore, }, stateAfter: { joiningL: stateFAIL, joiningD: stateBefore, joiningT: stateAfter, joiningR: stateStart, joinZWNJ: stateFAIL, joinZWJ: stateFAIL, joinVirama: stateAfter, // no-op as we can't accept joiners here }, stateFAIL: { 0: stateFAIL, joiningL: stateFAIL, joiningD: stateFAIL, joiningT: stateFAIL, joiningR: stateFAIL, joinZWNJ: stateFAIL, joinZWJ: stateFAIL, joinVirama: stateFAIL, }, } // validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are // already implicitly satisfied by the overall implementation. func (p *Profile) validateLabel(s string) (err error) { if s == "" { if p.verifyDNSLength { return &labelError{s, "A4"} } return nil } if !p.validateLabels { return nil } trie := p.trie // p.validateLabels is only set if trie is set. if len(s) > 4 && s[2] == '-' && s[3] == '-' { return &labelError{s, "V2"} } if s[0] == '-' || s[len(s)-1] == '-' { return &labelError{s, "V3"} } // TODO: merge the use of this in the trie. v, sz := trie.lookupString(s) x := info(v) if x.isModifier() { return &labelError{s, "V5"} } // Quickly return in the absence of zero-width (non) joiners. if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { return nil } st := stateStart for i := 0; ; { jt := x.joinType() if s[i:i+sz] == zwj { jt = joinZWJ } else if s[i:i+sz] == zwnj { jt = joinZWNJ } st = joinStates[st][jt] if x.isViramaModifier() { st = joinStates[st][joinVirama] } if i += sz; i == len(s) { break } v, sz = trie.lookupString(s[i:]) x = info(v) } if st == stateFAIL || st == stateAfter { return &labelError{s, "C"} } return nil } func ascii(s string) bool { for i := 0; i < len(s); i++ { if s[i] >= utf8.RuneSelf { return false } } return true }<|fim▁end|>
display = &Profile{options{ useSTD3Rules: true,
<|file_name|>2015-edition-warning.rs<|end_file_name|><|fim▁begin|>// run-rustfix #![allow(non_camel_case_types)]<|fim▁hole|>//~^ ERROR `await` is a keyword //~| WARN was previously accepted pub struct await; //~^ ERROR `await` is a keyword //~| WARN was previously accepted } } use outer_mod::await::await; //~^ ERROR `await` is a keyword //~| ERROR `await` is a keyword //~| WARN was previously accepted //~| WARN was previously accepted fn main() { match await { await => {} } //~^ ERROR `await` is a keyword //~| ERROR `await` is a keyword //~| WARN was previously accepted //~| WARN was previously accepted }<|fim▁end|>
#![deny(keyword_idents)] mod outer_mod { pub mod await {