repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
JulyKikuAkita/PythonPrac | cs15211/LongestWordInDictionaryThroughDeleting.py | 1 | 4177 | __source__ = 'https://leetcode.com/problems/longest-word-in-dictionary-through-deleting/'
# Time: O(nlogn + n*x), Sorting takes O(nlogn) and isSubsequence takes O(x)
# Space: O(logn) Sorting takes O(logn) space in average case.
#
# Description: 524. Longest Word in Dictionary through Deleting
#
# Given a string and a string dictionary, find the longest string in the dictionary
# that can be formed by deleting some characters of the given string.
# If there are more than one possible results, return the longest word with the smallest lexicographical order.
# If there is no possible result, return the empty string.
#
# Example 1:
# Input:
# s = "abpcplea", d = ["ale","apple","monkey","plea"]
#
# Output:
# "apple"
# Example 2:
# Input:
# s = "abpcplea", d = ["a","b","c"]
#
# Output:
# "a"
#
# Note:
# All the strings in the input will only contain lower-case letters.
# The size of the dictionary won't exceed 1,000.
# The length of all the strings in the input won't exceed 1,000.
# Hide Company Tags Google
# Hide Tags Two Pointers Sort
#
import unittest
# Let's check whether each word is a subsequence of S individually by "best" order
# (largest size, then lexicographically smallest.) Then if we find a match,
# we know the word being considered must be the best possible answer,
# since better answers were already considered beforehand.
#
# Let's figure out how to check if a needle (word) is a subsequence of a haystack (S).
# This is a classic problem with the following solution: walk through S,
# keeping track of the position (i) of the needle that indicates that word[i:]
# still remains to be matched to S at this point in time. Whenever word[i] matches the current character in S,
# we only have to match word[i+1:], so we increment i. At the end of this process, i == len(word)
# if and only if we've matched every character in word to some character in S in order of our walk.
#
# 584ms 19.60%
class Solution(object):
def findLongestWord(self, s, d):
"""
:type s: str
:type d: List[str]
:rtype: str
"""
d.sort(key = lambda x: (-len(x), x))
for word in d:
i = 0
for c in s:
if i < len(word) and word[i] == c:
i += 1
if i == len(word):
return word
return ""
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/longest-word-in-dictionary-through-deleting/solution/
1.
An alternate, more efficient solution which avoids sorting the dictionary:
# 33ms 65.40%
class Solution {
public String findLongestWord(String s, List<String> d) {
String res = "";
for (String key : d) {
int i = 0;
for ( char c : s.toCharArray()) {
if ( i < key.length() && c == key.charAt(i)) i++;
}
if (i == key.length() && key.length() >= res.length()) {
if (key.length() > res.length() || key.compareTo(res) < 0) { //asec
res = key;
}
}
}
return res;
}
}
2.
Idea is sort the dictionary d first by length DESC then lexicographical ASC
and test if p is SubSequence of s. The first match is the answer.
# 106ms 4.57%
class Solution {
public String findLongestWord(String s, List<String> d) {
if (s.length() == 0 || d.size() == 0) return "";
//sort dict:
Collections.sort(d, (a, b) -> {
return s2.length() != s1.length() ?
s2.length() - s1.length() : //desc
s1.compareTo(s2); //asec
});
for (String key : d) {
if (s.length() < key.length()) continue;
if (isSubSeq(key, s)) return key;
}
return "";
}
public boolean isSubSeq(String needle, String Hay) {
int i = 0;
for (char c : Hay.toCharArray()) {
if (i < needle.length() && c == needle.charAt(i)) {
i++;
}
}
return i == needle.length();
}
}
'''
| apache-2.0 | -5,894,769,081,862,545,000 | 31.889764 | 111 | 0.594925 | false |
uclouvain/osis_louvain | assessments/forms/score_file.py | 1 | 1880 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import forms
from django.utils.translation import ugettext_lazy as _
class ScoreFileForm(forms.Form):
file = forms.FileField(error_messages={'required': _('no_file_submitted')})
def clean_file(self):
file = self.cleaned_data['file']
content_type = file.content_type.split('/')[1]
valid_content_type = 'vnd.openxmlformats-officedocument.spreadsheetml.sheet' in content_type
if ".xlsx" not in file.name or not valid_content_type:
self.add_error('file', forms.ValidationError(_('file_must_be_xlsx'), code='invalid'))
return file
| agpl-3.0 | -502,586,721,529,759,000 | 47.179487 | 100 | 0.654604 | false |
hsharsha/perfrunner | perfrunner/tests/functional.py | 1 | 1852 | import unittest
from perfrunner.__main__ import get_options
from perfrunner.helpers.memcached import MemcachedHelper
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.helpers.rest import RestHelper
from perfrunner.settings import ClusterSpec, TestConfig
from perfrunner.tests import TargetIterator
class FunctionalTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
options, _args = get_options()
override = \
_args and (arg.split('.') for arg in ' '.join(_args).split(','))
self.cluster_spec = ClusterSpec()
self.cluster_spec.parse(options.cluster_spec_fname)
self.test_config = TestConfig()
self.test_config.parse(options.test_config_fname, override)
self.target_iterator = TargetIterator(self.cluster_spec,
self.test_config)
self.memcached = MemcachedHelper(self.test_config)
self.remote = RemoteHelper(self.cluster_spec, self.test_config)
self.rest = RestHelper(self.cluster_spec)
super(FunctionalTest, self).__init__(*args, **kwargs)
class MemcachedTests(FunctionalTest):
def test_num_threads(self):
expected_threads = self.test_config.cluster.num_cpus
if expected_threads is None:
cores = self.remote.detect_number_cores()
expected_threads = int(0.75 * cores)
for target in self.target_iterator:
host = target.node.split(':')[0]
port = self.rest.get_memcached_port(target.node)
stats = self.memcached.get_stats(host, port, target.bucket,
stats='')
num_threads = int(stats['threads'])
self.assertEqual(num_threads, expected_threads)
if __name__ == '__main__':
unittest.main(argv=['functional.py'])
| apache-2.0 | 919,988,435,475,458,000 | 37.583333 | 76 | 0.636609 | false |
mfasq1Monash/FIT3140 | interpreter.py | 1 | 6491 | '''
Author: Michael Asquith, Aaron Gruneklee
Created: 2014.12.08
Last Modified: 2014.12.23
Interpreter for a simple functional programming language.
Access with interpret(command)
Based on Peter Norvig's Lispy interpreter, http://norvig.com/lispy.html
'''
import math, operator as op
from robotio import RobotIO
Symbol = str
class VariableAlreadyPresentException(Exception):
pass
class FunctionAlreadyDefinedException(Exception):
pass
class VariableAlreadySetException(Exception):
pass
class VariableNotFoundException(Exception):
pass
class InterpretedList(list):
pass
class Procedure(object):
"""A user-defined method for the interpreter"""
def __init__(self, parms, stats, env, inter):
self.parameters = parms
self.statements = stats
self.environment = env
self.interpreter = inter
def __call__(self, *args):
localVariables = Environment(self.parameters, args, self.environment)
return self.interpreter.evaluate(self.statements, localVariables)
class Environment(dict):
"""A set of variables for the interpreter or a method within it."""
def __init__(self, parms=(), expressions=(), outer=None):
"""When evaluating, procedures will pass in their parameters"""
self.update(zip(parms, expressions))
self.outer = outer
def find(self, variable):
"""Returns the lowest level Environment which has variable"""
if variable in self:
return self
try:
return self.outer.find(variable)
except AttributeError:
raise VariableNotFoundException
def add_new(self, variable, value):
"""Adds a new definition to the environment. If the variable is already present, raises a KeyAlreadyPresentError"""
if variable in self:
raise(VariableAlreadyPresentException)
self[variable] = value
class Interpreter:
"""After initialising an interpreter, run expressions by calling interpret.
"""
def __init__(self, newRobotIO):
"""Creates an interpreter with standard math operations and variables.
Can send input/output to newRobotIO
"""
self.global_environment = self.standard_environment()
self.robotio = newRobotIO
def interpret(self, code):
"""Parses and executes code a string in the form of:
(method_name argument1 argument2)
Arguments which are expressions must be placed in brackets.
Arguments which are not expressions must not be placed in brackets.
"""
return self.evaluate(self.parse(code))
def parse(self, code):
"Read an expression from a string."
return self.read_from_tokens(self.tokenize(code))
def tokenize(self, s):
"Convert a string into a list of tokens."
return s.replace('(',' ( ').replace(')',' ) ').split()
def read_from_tokens(self, tokens):
"Read an expression from a sequence of tokens."
if len(tokens) == 0:
raise SyntaxError('unexpected EOF while reading')
token = tokens.pop(0)
if '(' == token:
L = []
while tokens[0] != ')':
L.append(self.read_from_tokens(tokens))
tokens.pop(0) # pop off ')'
return L
elif ')' == token:
raise SyntaxError('unexpected )')
else:
return self.atom(token)
def atom(self, token):
"Numbers become numbers, booleans become booleans, everything else become symbols."
try:
return int(token)
except ValueError:
if token.lower() == 'true':
return True
elif token.lower() == 'false':
return False
else:
return Symbol(token)
def standard_environment(self):
"Creates the base variable environment"
env = Environment()
env.update(vars(math))
env.update({
'+':op.add, '-':op.sub, '*':op.mul, '/':op.div,
'>':op.gt, '<':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq,
'define':None, 'if':None, 'set':None, 'comment':None,
'%': lambda x,y: abs(x % y),
'and': lambda x,y: x and y,
'or': lambda x,y: x or y,
'not': lambda x: not x,
'move': lambda x: self.robotio.move(x),
'turn': lambda x: self.robotio.turn(x),
'detect-wall': lambda x: self.robotio.detect_wall(x),
'detect-goal': lambda x: self.robotio.detect_goal(x),
'[]': InterpretedList(),
'build': lambda x,y: InterpretedList([x] + y),
'head': lambda x: x[0],
'tail': lambda x: InterpretedList(x[1:])
})
return env
def evaluate(self, x, env=None):
if env == None:
env = self.global_environment
# If x is a list, must be evaluating a method
if isinstance(x, list):
if isinstance(x, InterpretedList):
return x
method = x.pop(0)
# Defines a function
if method == 'define':
try:
self.global_environment.add_new(x[0], Procedure(x[1], x[2], env, self))
except VariableAlreadyPresentException:
raise FunctionAlreadyDefinedException
# If statement. [Test, consequences, alternative]
elif method == 'if':
if self.evaluate(x[0]):
return self.evaluate(x[1])
return self.evaluate(x[2])
# Sets a variable
elif method == 'set':
try:
env.add_new(x[0], self.evaluate(x[1],env))
except VariableAlreadyPresentException:
raise VariableAlreadySetException
return
elif method == 'comment':
return
# Executes all other functions
else:
method = self.evaluate(method, self.global_environment)
args = [self.evaluate(variable, env) for variable in x]
return method(*args)
elif isinstance(x, Symbol):
return self.evaluate(env.find(x)[x])
else:
return x
| mit | 5,197,515,918,788,058,000 | 33.343915 | 123 | 0.558312 | false |
scottrice/Ice | ice/tasks/engine.py | 1 | 1118 | # encoding: utf-8
import os
from pysteam import paths as steam_paths
from pysteam import shortcuts
from pysteam import steam as steam_module
from ice import backups
from ice import configuration
from ice import consoles
from ice import emulators
from ice import paths
from ice import settings
from ice.logs import logger
from ice.persistence.config_file_backing_store import ConfigFileBackingStore
class TaskEngine(object):
def __init__(self, steam):
self.steam = steam
logger.debug("Initializing Ice")
# We want to ignore the anonymous context, cause theres no reason to sync
# ROMs for it since you cant log in as said user.
is_user_context = lambda context: context.user_id != 'anonymous'
self.users = filter(is_user_context, steam_module.local_user_contexts(self.steam))
def run(self, tasks, app_settings, dry_run=False):
if self.steam is None:
logger.error("Cannot run Ice because Steam doesn't appear to be installed")
return
logger.info("=========== Starting Ice ===========")
for task in tasks:
task(app_settings, self.users, dry_run=dry_run)
| mit | 4,875,944,689,318,500,000 | 29.216216 | 86 | 0.723614 | false |
glidernet/python-ogn-client | tests/client/test_AprsClient.py | 1 | 6185 | import unittest
import unittest.mock as mock
from ogn.parser import parse
from ogn.client.client import create_aprs_login, AprsClient
from ogn.client.settings import APRS_APP_NAME, APRS_APP_VER, APRS_KEEPALIVE_TIME
class AprsClientTest(unittest.TestCase):
def test_create_aprs_login(self):
basic_login = create_aprs_login('klaus', -1, 'myApp', '0.1')
self.assertEqual('user klaus pass -1 vers myApp 0.1\n', basic_login)
login_with_filter = create_aprs_login('klaus', -1, 'myApp', '0.1', 'r/48.0/11.0/100')
self.assertEqual('user klaus pass -1 vers myApp 0.1 filter r/48.0/11.0/100\n', login_with_filter)
def test_initialisation(self):
client = AprsClient(aprs_user='testuser', aprs_filter='')
self.assertEqual(client.aprs_user, 'testuser')
self.assertEqual(client.aprs_filter, '')
@mock.patch('ogn.client.client.socket')
def test_connect_full_feed(self, mock_socket):
client = AprsClient(aprs_user='testuser', aprs_filter='')
client.connect()
client.sock.send.assert_called_once_with('user testuser pass -1 vers {} {}\n'.format(
APRS_APP_NAME, APRS_APP_VER).encode('ascii'))
client.sock.makefile.assert_called_once_with('rb')
@mock.patch('ogn.client.client.socket')
def test_connect_client_defined_filter(self, mock_socket):
client = AprsClient(aprs_user='testuser', aprs_filter='r/50.4976/9.9495/100')
client.connect()
client.sock.send.assert_called_once_with('user testuser pass -1 vers {} {} filter r/50.4976/9.9495/100\n'.format(
APRS_APP_NAME, APRS_APP_VER).encode('ascii'))
client.sock.makefile.assert_called_once_with('rb')
@mock.patch('ogn.client.client.socket')
def test_disconnect(self, mock_socket):
client = AprsClient(aprs_user='testuser', aprs_filter='')
client.connect()
client.disconnect()
client.sock.shutdown.assert_called_once_with(0)
client.sock.close.assert_called_once_with()
self.assertTrue(client._kill)
@mock.patch('ogn.client.client.socket')
def test_run(self, mock_socket):
import socket
mock_socket.error = socket.error
client = AprsClient(aprs_user='testuser', aprs_filter='')
client.connect()
client.sock_file.readline = mock.MagicMock()
client.sock_file.readline.side_effect = [b'Normal text blabla',
b'my weird character \xc2\xa5',
UnicodeDecodeError('funnycodec', b'\x00\x00', 1, 2, 'This is just a fake reason!'),
b'... show must go on',
BrokenPipeError(),
b'... and on',
ConnectionResetError(),
b'... and on',
socket.error(),
b'... and on',
b'',
b'... and on',
KeyboardInterrupt()]
try:
client.run(callback=lambda msg: print("got: {}".format(msg)), autoreconnect=True)
except KeyboardInterrupt:
pass
finally:
client.disconnect()
@mock.patch('ogn.client.client.time')
@mock.patch('ogn.client.client.socket')
def test_run_keepalive(self, mock_socket, mock_time):
import socket
mock_socket.error = socket.error
client = AprsClient(aprs_user='testuser', aprs_filter='')
client.connect()
client.sock_file.readline = mock.MagicMock()
client.sock_file.readline.side_effect = [b'Normal text blabla',
KeyboardInterrupt()]
mock_time.side_effect = [0, 0, APRS_KEEPALIVE_TIME + 1, APRS_KEEPALIVE_TIME + 1]
timed_callback = mock.MagicMock()
try:
client.run(callback=lambda msg: print("got: {}".format(msg)), timed_callback=timed_callback)
except KeyboardInterrupt:
pass
finally:
client.disconnect()
timed_callback.assert_called_with(client)
def test_reset_kill_reconnect(self):
client = AprsClient(aprs_user='testuser', aprs_filter='')
client.connect()
# .run() should be allowed to execute after .connect()
mock_callback = mock.MagicMock(
side_effect=lambda raw_msg: client.disconnect())
self.assertFalse(client._kill)
client.run(callback=mock_callback, autoreconnect=True)
# After .disconnect(), client._kill should be True
self.assertTrue(client._kill)
self.assertEqual(mock_callback.call_count, 1)
# After we reconnect, .run() should be able to run again
mock_callback.reset_mock()
client.connect()
client.run(callback=mock_callback, autoreconnect=True)
self.assertEqual(mock_callback.call_count, 1)
def test_50_live_messages(self):
print("Enter")
self.remaining_messages = 50
def process_message(raw_message):
if raw_message[0] == '#':
return
try:
message = parse(raw_message)
print("{}: {}".format(message['aprs_type'], raw_message))
except NotImplementedError as e:
print("{}: {}".format(e, raw_message))
return
if self.remaining_messages > 0:
self.remaining_messages -= 1
else:
raise KeyboardInterrupt
client = AprsClient(aprs_user='testuser', aprs_filter='')
client.connect()
try:
client.run(callback=process_message, autoreconnect=True)
except KeyboardInterrupt:
pass
finally:
client.disconnect()
self.assertTrue(True)
| agpl-3.0 | -3,735,583,712,359,570,000 | 40.233333 | 132 | 0.551657 | false |
TheWitchers/Team | TestingArea/TESTZONE_methods.py | 1 | 1111 | __author__ = 'dvir'
import tkFileDialog
import sqlite3
conn = sqlite3.connect(tkFileDialog.askopenfilename())
c = conn.cursor()
# using example db
def ex_show_purch(price):
l = []
for row in c.execute("SELECT symbol FROM stocks WHERE price > " + str(price) + ""):
print row
l.append(row)
print l
return l
ex_show_purch(raw_input("Enter Price: "))
# for project db
def show_purch(name):
l = []
for row in c.execute("SELECT * FROM Purchaseses WHERE nickname = '" + name + "'"):
print row
l.append(row)
print l
return l
def correct_user(id, pas):
if len(c.execute("SELECT * FROM Users WHERE username = '" + id + "' AND password = '" + pas + "'")) > 0:
print "user exists"
else:
print "user does not exist"
def has_inf(col, tbl, info):
if len(c.execute(
"SELECT '" + col + "' FROM Users WHERE username = '" + id + "' AND '" + col + "' = '" + info + "'")) > 0:
print col + "already exists"
else:
print col + " is OK"
| gpl-2.0 | 6,712,637,197,793,229,000 | 24.25 | 181 | 0.531953 | false |
MagicStack/asyncpg | asyncpg/transaction.py | 1 | 8297 | # Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
import enum
from . import connresource
from . import exceptions as apg_errors
class TransactionState(enum.Enum):
NEW = 0
STARTED = 1
COMMITTED = 2
ROLLEDBACK = 3
FAILED = 4
ISOLATION_LEVELS = {'read_committed', 'serializable', 'repeatable_read'}
ISOLATION_LEVELS_BY_VALUE = {
'read committed': 'read_committed',
'serializable': 'serializable',
'repeatable read': 'repeatable_read',
}
class Transaction(connresource.ConnectionResource):
"""Represents a transaction or savepoint block.
Transactions are created by calling the
:meth:`Connection.transaction() <connection.Connection.transaction>`
function.
"""
__slots__ = ('_connection', '_isolation', '_readonly', '_deferrable',
'_state', '_nested', '_id', '_managed')
def __init__(self, connection, isolation, readonly, deferrable):
super().__init__(connection)
if isolation and isolation not in ISOLATION_LEVELS:
raise ValueError(
'isolation is expected to be either of {}, '
'got {!r}'.format(ISOLATION_LEVELS, isolation))
self._isolation = isolation
self._readonly = readonly
self._deferrable = deferrable
self._state = TransactionState.NEW
self._nested = False
self._id = None
self._managed = False
async def __aenter__(self):
if self._managed:
raise apg_errors.InterfaceError(
'cannot enter context: already in an `async with` block')
self._managed = True
await self.start()
async def __aexit__(self, extype, ex, tb):
try:
self._check_conn_validity('__aexit__')
except apg_errors.InterfaceError:
if extype is GeneratorExit:
# When a PoolAcquireContext is being exited, and there
# is an open transaction in an async generator that has
# not been iterated fully, there is a possibility that
# Pool.release() would race with this __aexit__(), since
# both would be in concurrent tasks. In such case we
# yield to Pool.release() to do the ROLLBACK for us.
# See https://github.com/MagicStack/asyncpg/issues/232
# for an example.
return
else:
raise
try:
if extype is not None:
await self.__rollback()
else:
await self.__commit()
finally:
self._managed = False
@connresource.guarded
async def start(self):
"""Enter the transaction or savepoint block."""
self.__check_state_base('start')
if self._state is TransactionState.STARTED:
raise apg_errors.InterfaceError(
'cannot start; the transaction is already started')
con = self._connection
if con._top_xact is None:
if con._protocol.is_in_transaction():
raise apg_errors.InterfaceError(
'cannot use Connection.transaction() in '
'a manually started transaction')
con._top_xact = self
else:
# Nested transaction block
if self._isolation:
top_xact_isolation = con._top_xact._isolation
if top_xact_isolation is None:
top_xact_isolation = ISOLATION_LEVELS_BY_VALUE[
await self._connection.fetchval(
'SHOW transaction_isolation;')]
if self._isolation != top_xact_isolation:
raise apg_errors.InterfaceError(
'nested transaction has a different isolation level: '
'current {!r} != outer {!r}'.format(
self._isolation, top_xact_isolation))
self._nested = True
if self._nested:
self._id = con._get_unique_id('savepoint')
query = 'SAVEPOINT {};'.format(self._id)
else:
query = 'BEGIN'
if self._isolation == 'read_committed':
query += ' ISOLATION LEVEL READ COMMITTED'
elif self._isolation == 'repeatable_read':
query += ' ISOLATION LEVEL REPEATABLE READ'
elif self._isolation == 'serializable':
query += ' ISOLATION LEVEL SERIALIZABLE'
if self._readonly:
query += ' READ ONLY'
if self._deferrable:
query += ' DEFERRABLE'
query += ';'
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.STARTED
def __check_state_base(self, opname):
if self._state is TransactionState.COMMITTED:
raise apg_errors.InterfaceError(
'cannot {}; the transaction is already committed'.format(
opname))
if self._state is TransactionState.ROLLEDBACK:
raise apg_errors.InterfaceError(
'cannot {}; the transaction is already rolled back'.format(
opname))
if self._state is TransactionState.FAILED:
raise apg_errors.InterfaceError(
'cannot {}; the transaction is in error state'.format(
opname))
def __check_state(self, opname):
if self._state is not TransactionState.STARTED:
if self._state is TransactionState.NEW:
raise apg_errors.InterfaceError(
'cannot {}; the transaction is not yet started'.format(
opname))
self.__check_state_base(opname)
async def __commit(self):
self.__check_state('commit')
if self._connection._top_xact is self:
self._connection._top_xact = None
if self._nested:
query = 'RELEASE SAVEPOINT {};'.format(self._id)
else:
query = 'COMMIT;'
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.COMMITTED
async def __rollback(self):
self.__check_state('rollback')
if self._connection._top_xact is self:
self._connection._top_xact = None
if self._nested:
query = 'ROLLBACK TO {};'.format(self._id)
else:
query = 'ROLLBACK;'
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.ROLLEDBACK
@connresource.guarded
async def commit(self):
"""Exit the transaction or savepoint block and commit changes."""
if self._managed:
raise apg_errors.InterfaceError(
'cannot manually commit from within an `async with` block')
await self.__commit()
@connresource.guarded
async def rollback(self):
"""Exit the transaction or savepoint block and rollback changes."""
if self._managed:
raise apg_errors.InterfaceError(
'cannot manually rollback from within an `async with` block')
await self.__rollback()
def __repr__(self):
attrs = []
attrs.append('state:{}'.format(self._state.name.lower()))
if self._isolation is not None:
attrs.append(self._isolation)
if self._readonly:
attrs.append('readonly')
if self._deferrable:
attrs.append('deferrable')
if self.__class__.__module__.startswith('asyncpg.'):
mod = 'asyncpg'
else:
mod = self.__class__.__module__
return '<{}.{} {} {:#x}>'.format(
mod, self.__class__.__name__, ' '.join(attrs), id(self))
| apache-2.0 | -3,851,821,235,338,135,000 | 33.861345 | 78 | 0.556105 | false |
turtledb/0install | zeroinstall/injector/qdom.py | 1 | 3485 | """A quick DOM implementation.
Python's xml.dom is very slow. The xml.sax module is also slow (as it imports urllib2).
This is our light-weight version.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from xml.parsers import expat
import zeroinstall
from zeroinstall.injector import versions
_parsed_version = versions.parse_version(zeroinstall.version)
class Element(object):
"""An XML element.
@ivar uri: the element's namespace
@type uri: str
@ivar name: the element's localName
@type name: str
@ivar attrs: the element's attributes (key is in the form [namespace " "] localName)
@type attrs: {str: str}
@ivar childNodes: children
@type childNodes: [L{Element}]
@ivar content: the text content
@type content: str"""
__slots__ = ['uri', 'name', 'attrs', 'childNodes', 'content']
def __init__(self, uri, name, attrs):
"""@type uri: str
@type name: str
@type attrs: {str: str}"""
self.uri = uri
self.name = name
self.attrs = attrs.copy()
self.content = None
self.childNodes = []
def __str__(self):
"""@rtype: str"""
attrs = [n + '=' + self.attrs[n] for n in self.attrs]
start = '<{%s}%s %s' % (self.uri, self.name, ' '.join(attrs))
if self.childNodes:
return start + '>' + '\n'.join(map(str, self.childNodes)) + ('</%s>' % (self.name))
elif self.content:
return start + '>' + self.content + ('</%s>' % (self.name))
else:
return start + '/>'
def getAttribute(self, name):
"""@type name: str
@rtype: str"""
return self.attrs.get(name, None)
class QSAXhandler(object):
"""SAXHandler that builds a tree of L{Element}s"""
def __init__(self, filter_for_version = False):
"""@param filter_for_version: skip elements if their if-0install-version attribute doesn't match L{zeroinstall.version} (since 1.13).
@type filter_for_version: bool
@rtype: bool"""
self.stack = []
if filter_for_version:
self.filter_range = lambda expr: versions.parse_version_expression(expr)(_parsed_version)
else:
self.filter_range = lambda x: True
def startElementNS(self, fullname, attrs):
"""@type fullname: str
@type attrs: {str: str}"""
split = fullname.split(' ', 1)
if len(split) == 2:
self.stack.append(Element(split[0], split[1], attrs))
else:
self.stack.append(Element(None, fullname, attrs))
self.contents = ''
def characters(self, data):
"""@type data: str"""
self.contents += data
def endElementNS(self, name):
"""@type name: str"""
contents = self.contents.strip()
self.stack[-1].content = contents
self.contents = ''
new = self.stack.pop()
if self.stack:
target_versions = new.attrs.get('if-0install-version')
if target_versions and not self.filter_range(target_versions):
return
self.stack[-1].childNodes.append(new)
else:
self.doc = new
def parse(source, filter_for_version = False):
"""Parse an XML stream into a tree of L{Element}s.
@param source: data to parse
@type source: file
@param filter_for_version: skip elements if their if-0install-version attribute doesn't match L{zeroinstall.version} (since 1.13).
@type filter_for_version: bool
@return: the root
@rtype: L{Element}"""
handler = QSAXhandler(filter_for_version)
parser = expat.ParserCreate(namespace_separator = ' ')
parser.StartElementHandler = handler.startElementNS
parser.EndElementHandler = handler.endElementNS
parser.CharacterDataHandler = handler.characters
parser.ParseFile(source)
return handler.doc
| lgpl-2.1 | -237,805,872,924,645,440 | 29.840708 | 135 | 0.682927 | false |
bandit145/ans-between | src/dictops.py | 1 | 1886 | #TODO: allow missing params and args lists to pass tests
from src import logging
class dict_mgm:
#creates ansible command to run
def make_play(data,db_data,location):
if dict_mgm.data_check(data, db_data) == 'OK':
command = 'ansible-playbook {location}'.format(location=location)
#did and incredi bad if else thing
logging.debug(data.keys())
command+=data['name']+' '
if 'params' in data.keys():
command+= dict_mgm.sort_params(data['params'])
if 'args' in data.keys():
command+= dict_mgm.sort_args(data['args'])
if 'password' in data.keys():
password = data['password']
else:
password = None
logging.debug(command)
logging.debug(password)
return command, password
else:
return 'Error', None
#check integrity of submitted data compared to its schema model
def data_check(data,db_data):
logging.debug(data)
logging.debug(db_data)
if len(data) != len(db_data):
logging.debug('triggered 1')
return 'Error'
if data.keys() != db_data.keys():
logging.debug('triggered 2')
return 'Error'
if len(data.values()) != len(db_data.values()):
logging.debug('triggered 3')
return 'Error'
#for playbooks that have no params/args
try:
if len(data['params']) != len(db_data['params']):
logging.debug('triggered 4')
return 'Error'
except KeyError:
pass
try:
if len(data['args']) != len(db_data['args']):
logging.debug('triggered 5')
return 'Error'
except KeyError:
pass
logging.debug('OK')
return 'OK'
def sort_params(params):#deals with param dics
command = ''
for item in params:
keys= list(item.keys())
values= list(item.values())
logging.debug(keys)
logging.debug(values)
command+=keys[0]+' '+values[0]+' '
return command
def sort_args(args): #deals with args list
command = ''
for arg in args:
command+= arg+' '
return command | mit | -4,789,335,474,906,966,000 | 25.208333 | 68 | 0.656416 | false |
phense/check_duplicate_files | check_duplicate_files.py | 1 | 21660 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""check_duplicate_files.py
Finds all duplicate files in given directories using a hash-algorithm.
After scanning the filesystem for possible duplicate files (all files with a unique
filesize are dismissed, except for Images when selecting the perceptual hash
algorithm). All possible candidate duplicate files are hashed. With pre-filtering,
this module is extremely fast on large file-sets since only a handful of files
need to actually hbe ashed.
Standard use: python3 check_duplicate_files -i /some/folder ./out.txt
"""
# FEATURE(zyrkon): ignore/include win/linux/mac hidden file
# FEATURE(zyrkon): implement multiprocessor for hashing
# FEATURE(zyrkon): find broken symbolic links
# FEATURE(zyrkon): find empty files and directories
# FEATURE(zyrkon): --size 20M-1G to find files between 20mb and 1gb (example)
# FEATURE(zyrkon): maybe a GUI
__author__ = 'Peter Hense ([email protected])'
__copyright__ = 'Copyright (c) 2015, Peter Hense'
__license__ = 'Apache License Version 2.0'
__credits__ = '' # ['List', 'of', 'programmers']
__status__ = 'Development' # Prototype / Development / Production
__version__ = '0.8'
import os
import sys
if sys.version_info < (3, 0):
sys.stdout.write("Sorry, requires Python 3.x, not Python 2.x\n")
sys.exit(1)
import codecs
import datetime
import hashlib
import json
import operator
import signal
from argparse import ArgumentParser
from argparse import ArgumentTypeError
from collections import defaultdict
from tqdm import *
from stat import *
try:
from PIL import Image # Pillow (modern PIL fork)
except ImportError:
IMG_LIB_ERROR = True
else:
IMG_LIB_ERROR = False
FILEREADERROR = 255
def generate_hashes(filelist, image_list, hashtype, pHash):
""" Main-Module for handling all File-Hashing and saving the hash-results
Args:
filelist: List of file-paths to REGULAR FILES to run a normal hash-algorithm on
image_list: List of file-paths of images to run a perceptual hash-algorithm on
hashtype: hash-algorithm to use for normal files (default=md5)
pHash: boolean switch to activate perceptual image-hashing
Returns:
d_list_hash: dictionary with lists of files sorted by hash-value (key)
errorlist: list of files that could not be accessed / read
"""
d_list_hash = defaultdict(list)
errorlist = []
for file_path in tqdm(filelist, 'hashing', None, True):
hash = _hash(file_path, hashtype)
if hash != FILEREADERROR:
d_list_hash[hash].append(file_path)
else:
errorlist.append(file_path)
if pHash: # perceptual image hashing
d_list_hash_img = defaultdict(list)
for file_path in tqdm(image_list, 'hashing images:', None, True):
hash = _perceptive_hash(file_path)
if hash != FILEREADERROR:
d_list_hash_img[hash].append(file_path)
else:
errorlist.append(file_path)
# calculate hamming-distance between all image-hashes to find
# outliners (hamming distance of two perceptual hashes < 4 means the images
# are basically the same)
index_list = [key for key in d_list_hash_img]
deleted_index_keys = []
for hash1 in tqdm(index_list, 'calculating', None, True):
if hash1 in deleted_index_keys:
continue
for hash2 in index_list:
if hash1 == hash2:
continue # same entry in list
if hash2 in deleted_index_keys:
continue
if _hamming_distance(hash1, hash2) < 4:
d_list_hash_img[hash1] += d_list_hash_img[hash2]
del d_list_hash_img[hash2]
deleted_index_keys.append(hash2)
# Filter out all unique entries from our resultset
_delete_unique_entries(d_list_hash)
if pHash:
_delete_unique_entries(d_list_hash_img)
d_list_hash.update(d_list_hash_img)
return d_list_hash, errorlist
def _perceptive_hash(file_path, hash_size = 8):
"""Calculates a hash-value from an image
Conversion uses a resized, grayscaled pixel-array of the image, converting
the pixel-array to a number-array (differences between neighboring pixels)
and finally converting these values to a hex-string of length hash_size
Args:
file_path: Path to an Image File
hash_size: Size of the generated hash string
Returns:
hash_string: generated hash string
"""
# if memory consumption is to high for many images, it is posisble to use
# with open (file_path, 'rb') as f:
# image = Image.open(f)
# ...
# del image
try:
image = Image.open(file_path)
except:
return FILEREADERROR
# Grayscale and shrink the image in one step
image = image.convert('L').resize((hash_size + 1, hash_size), Image.ANTIALIAS)
pixels = list(image.getdata())
# Compage adjacent pixels
difference = []
for row in range(hash_size):
for col in range(hash_size):
pixel_left = image.getpixel((col, row))
pixel_right = image.getpixel((col +1, row))
difference.append(pixel_left > pixel_right)
# Convert binary array to hexadecimal string
decimal_value = 0
hex_string = []
for index, value in enumerate(difference):
if value:
decimal_value += 2**(index % 8)
if (index % 8) == 7:
hex_string.append(hex(decimal_value)[2:].rjust(2, '0'))
decimal_value = 0
return ''.join(hex_string)
def _hash(file_path, hashtype):
"""Uses a specified standard hash-algorithm to hash a regular file
Args:
file_path: file_path to a regular file that can be hashed
hashtype: version of hash-algorithm, default = md5
Returns:
hash: hash-string of the hashed file
Raises:
Returns global const FILEREADERROR on IOError
"""
try:
with open(file_path, 'rb') as f:
contents = f.read()
except:
return FILEREADERROR
hasher = getattr(hashlib, hashtype.lower(), hashlib.md5)
return hasher(contents).hexdigest()
def _hamming_distance(string1, string2):
""" Calculates the Hamming Distance of two strings, fast version
Args:
string1, string2: two strings of the same length
Returns:
Integer describing the Hamming Distance of the input strings
"""
assert len(string1) == len(string2)
ne = operator.ne # faster than '!=' and 'str.__ne__'
return sum(map(ne, string1, string2))
def scan_directories(directories, pHash):
""" creates file-lists from given directories
Recursively walks the given directories and their subdirectories, checking
all including files and their file-sizes. These are saved inside a dictionary
and pre-filtered by filesize. Optional separate handling of image-files.
Args:
directories: List of directories to crawl
pHash: boolean switch to active separate handling of image-files
Returns:
prefiltered_files: List of files with their file-paths
images: List of image-files if pHash is set, else an empty list
errorlist: List of files that could not be accessed
"""
extensions = ('.jpg', '.jpeg', '.png', '.bmp')
d_list_filesize = defaultdict(list)
images = []
errorlist = []
count = 0
print('Scanning directories...')
# code could be a lot smaller with `if pHash` inside the innermost loop
# it would also lead to a LOT of unnessary checking
if not pHash: # use normal hash on all files
for root_dir in directories:
for path, subdirList, fileList in os.walk(root_dir):
for fname in fileList:
qualified_filename = os.path.join(path, fname)
try: # denied permission for os.stat
st = os.stat(qualified_filename)
if S_ISREG(st.st_mode):
d_list_filesize[st.st_size].append(qualified_filename)
count += 1
except:
errorlist.append(qualified_filename)
count += 1
else: # split list of normal- and image-files
for root_dir in directories:
for path, subdirList, fileList in os.walk(root_dir):
for fname in fileList:
qualified_filename = os.path.join(path, fname)
if fname.endswith(extensions):
images.append(qualified_filename)
count += 1
else:
try:
st = os.stat(qualified_filename)
if S_ISREG(st.st_mode):
d_list_filesize[st.st_size].append(qualified_filename)
count += 1
except:
errorlist.append(qualified_filename)
count += 1
# Statistic
print('\nFiles found: %s' % count)
# pre-filter all files with unique filesize
# this is where we need the dictionary
_delete_unique_entries(d_list_filesize)
# put all filtered files in a list for easier handling
prefiltered_files = [path for paths in d_list_filesize.values() for path in paths]
# Statistic
print('Possible candidates: %s\n' % (prefiltered_files.__len__() + images.__len__()))
return prefiltered_files, images, errorlist
def _delete_unique_entries(dictionary):
""" removes all Lists from a dictionary that contain a single element
Args:
dictionary a dictionary of type defaultdict(set) or defaultdict(list)
"""
mark_for_delete = []
for key in dictionary:
if dictionary[key].__len__() == 1:
mark_for_delete.append(key)
for i in mark_for_delete:
del dictionary[i]
return
def write_output_text(d_list_hash, errorlist, outfile):
""" Writes result of this module in textform to a file
Args:
d_list_hash: found duplicates in form of a dictionary (key = hash-value)
outfile: the path and filename to write the output into (needs write-permission)
errorlist: list of files that could not be accessed
"""
write_errorlist = []
try:
with codecs.open(outfile, 'w', encoding='utf-8') as f:
f.write('\nThe Following File-Duplicates where found:')
f.write('\n==========================================\n')
for key in d_list_hash:
f.write('Hash: %s\n' %key)
for file_path in d_list_hash[key]:
try:
f.write('%s \n' % os.path.normcase(file_path))
except:
write_errorlist.append(file_path)
f.write('-------------------\n')
if errorlist.__len__() > 0:
f.write('\nThe Following Files could not be accessed:')
f.write('\n==========================================\n')
for error in errorlist:
try:
f.write('%s\n' % os.path.normcase(error))
except:
write_errorlist.append(error)
f.flush()
except: #IOError, UnicodeEncodeError
print('\n- Error - Could not open Output File.\n')
if write_errorlist.__len__() > 0:
print('- Error - These files could not be written to output file:\n')
for write_error in write_errorlist:
print('%s\n' % os.path.normcase(write_error))
print('(Please check your filesystem encoding)\n')
return
def write_output_bash(d_list_hash, outfile, create_link):
""" Writes result of this module as a bash script to a file
Args:
d_list_hash: found duplicates in form of a dictionary (key = hash-value)
outfile: the path and filename to write the output into (needs write-permission)
create_link: boolean switch to select, if a deleted file should be
replaced by a hardlink
"""
write_errorlist = []
try:
with codecs.open(outfile, 'w', encoding='utf-8') as f:
f.write('#!/bin/bash\n\n')
f.write('# This script is machine generated and might do harm to your\n')
f.write('# running system.\n')
f.write('# Please check this script carefully before running\n')
if create_link:
f.write('printf "replacing duplicates with hardlinks..."\n')
else:
f.write('printf "deleting duplicates..."\n')
for key in d_list_hash:
try:
original = os.path.normcase(d_list_hash[key][0])
f.write('# ------------------\n')
f.write('# Original: %s\n' % original)
for copy in d_list_hash[key][1:]:
f.write('rm %s\n' % copy)
if create_link:
f.write('ln %s %s\n' % (original, os.path.normcase(copy)))
except:
write_errorlist.append(file_path)
f.flush()
except: #IOError, UnicodeEncodeError
print('\n- Error - Could not open Output File.\n')
if write_errorlist.__len__() > 0:
print('- Error - These files could not be written to output file:\n')
for write_error in write_errorlist:
print('%s\n' % write_error)
print('(Please check your filesystem encoding)\n')
return
def write_output_win(d_list_hash, outfile, create_link):
""" Writes result of this module as a batch script to a file
Args:
d_list_hash: found duplicates in form of a dictionary (key = hash-value)
outfile: the path and filename to write the output into (needs write-permission)
create_link: boolean switch to select, if a deleted file should be
replaced by a hardlink
"""
write_errorlist = []
try:
with codecs.open(outfile, 'w', encoding='utf-8') as f:
f.write('@ECHO OFF\n\n')
f.write('REM This script is machine generated and might do harm to your\n')
f.write('REM running system.\n')
f.write('REM Please check this script carefully before running\n')
if create_link:
f.write('ECHO "replacing duplicates with hardlinks..."\n')
else:
f.write('ECHO "deleting duplicates..."\n')
for key in d_list_hash:
try:
original = os.path.normcase(d_list_hash[key][0])
f.write('REM ------------------\n')
f.write('REM Original: %s\n' % original)
for copy in d_list_hash[key][1:]:
f.write('DEL %s\n' % copy)
if create_link:
f.write('mklink /H %s %s\n' % (os.path.normcase(copy), original))
except:
write_errorlist.append(file_path)
f.flush()
except: #IOError, UnicodeEncodeError
print('\n- Error - Could not open Output File.\n')
if write_errorlist.__len__() > 0:
print('- Error - These files could not be written to output file:\n')
for write_error in write_errorlist:
print('%s\n' % write_error)
print('(Please check your filesystem encoding)\n')
return
def write_output_json(d_list_hash, outfile):
""" Writes result of this module as JSON to a file
Args:
d_list_hash: found duplicates in form of a dictionary (key = hash-value)
outfile: the path and filename to write the output into (needs write-permission)
"""
try:
with codecs.open(outfile, 'w', encoding='utf-8') as f:
json.dump(d_list_hash, f, ensure_ascii=False, indent=4)
except:
print('\n- Error - Could not write JSON Data to file')
return
def _query_yes_no(question, default="yes"):
"""User Console Interaction for Y/N Questions.
Args:
question: String containing a Question that needs User input
default: select the default answer of the question
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def _signal_handler(signal, frame):
sys.exit('Aborting...')
def _readable_dir(prospective_dir):
""" Checks if a given string is a valid path on the file-system
Args:
prospective_dir: file-path as String
Returns:
prospective_dir if checks are passed
Raises:
ArgumentTypeError if checks fail
"""
if not os.path.isdir(prospective_dir):
raise ArgumentTypeError('readable_dir:{0} is not a valid path'.format(prospective_dir))
if os.access(prospective_dir, os.R_OK):
return prospective_dir
else:
raise ArgumentTypeError('readable_dir:{0} is not a readable dir'.format(prospective_dir))
def main():
signal.signal(signal.SIGINT, _signal_handler)
signal.signal(signal.SIGTERM, _signal_handler)
start_time = datetime.datetime.now()
parser = ArgumentParser(description = 'Check Duplicate Files')
parser.add_argument('-i', action = 'append', dest = 'dir',
type = _readable_dir,
help = 'add directory to list for duplicate search'
)
parser.add_argument('--hash', action = 'store', dest = 'hashtype',
default = 'md5',
help = 'select hash-type (md5 (default), sha1, sha224, sha256, sha384, sha512)'
)
parser.add_argument('-p', '--perceptual-hashing', action = 'store_true',
dest = 'pHash', default = False,
help = 'enables perceptual hashing of images'
)
parser.add_argument('-o', '--output-format', action = 'store', dest = 'outformat',
default = 'text',
help = 'select output format (text, json, bash_rm, bash_link, win_del, win_link)'
)
parser.add_argument('outfile', #nargs='?',
help = 'output file for found duplicates'
)
parser.add_argument('--version', action='version',
version='%(prog)s {version}'.format(version=__version__))
args = parser.parse_args()
# disable perceptual hashing (normal hashes on all files) when PIL LIB could
# not be loaded and it is not enabled
pHash = ((not IMG_LIB_ERROR) and args.pHash)
if not pHash:
print('(Perceptual Image Scan disabled)')
# Scan all directories and find duplicates by filesize
prefiltered_filelist, images, read_errors = scan_directories(args.dir, pHash)
# Ask the user if he wants to continue, now that he knows how
# many files need to be hashed. Exclude the query-time from
# execution time
time_query = datetime.datetime.now()
if not _query_yes_no('Do you want to continue?', 'yes'):
sys.exit(0)
timedelta_query = datetime.datetime.now() - time_query # timedelta
# generate the hashed and calculate the execution time
# append possible new read-errors to the general error-list
d_list_hash = defaultdict(list)
d_list_hash, read_errors2 = generate_hashes(prefiltered_filelist, images, args.hashtype, pHash)
read_errors += read_errors2
execution_time = datetime.datetime.now() - start_time # timedelta
execution_time -= timedelta_query # timedelta
# write output
output = ['text', 'json', 'bash_rm', 'bash_link', 'win_del', 'win_link']
if args.outformat in output:
if args.outformat == 'text':
write_output_text(d_list_hash, read_errors, args.outfile)
elif args.outformat == 'json':
write_output_json(d_list_hash, args.outfile)
elif args.outformat == 'bash_rm':
write_output_bash(d_list_hash, args.outfile, False)
elif args.outformat == 'bash_link':
write_output_bash(d_list_hash, args.outfile, True)
elif args.outformat == 'win_del':
write_output_win(d_list_hash, args.outfile, False)
elif args.outformat == 'win_link':
write_output_win(d_list_hash, args.outfile, True)
else:
write_output_text(d_list_hash, read_errors, args.outfile)
print('\nExecution Time: %s.%s seconds' % (execution_time.seconds,
execution_time.microseconds))
# done
sys.exit(0)
if __name__ == '__main__':
main() | apache-2.0 | -369,360,307,060,061,250 | 34.862583 | 105 | 0.580979 | false |
petrpulc/git-cmp | checkers/references.py | 1 | 1113 | """
Reference level checker (existence of given references or all refs/heads ans refs/tags).
"""
from common import Common
from utils import check_diff
def __filter(reference_list):
return set(reference for reference in
reference_list if reference.split('/')[1] in ('heads', 'tags'))
def check():
"""
Run the checker on references.
"""
print("=== References")
if Common.args.references is None:
o_refs = __filter(Common.original.listall_references())
n_refs = __filter(Common.new.listall_references())
check_diff(o_refs, n_refs, "References", 2)
else:
o_refs = set()
for reference in Common.args.references:
if reference not in Common.original.listall_references():
print(" {} does not exist, please report".format(reference))
exit(1)
if reference not in Common.new.listall_references():
print(" {} expected, but not found".format(reference))
exit(1)
o_refs.add(reference)
print(" OK")
Common.references = o_refs
| mit | 2,314,850,003,653,533,000 | 30.8 | 88 | 0.602875 | false |
kubeflow/kfp-tekton | backend/api/python_http_client/kfp_server_api/__init__.py | 1 | 3915 | # coding: utf-8
# flake8: noqa
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition.
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
__version__ = "1.5.0"
# import apis into sdk package
from kfp_server_api.api.experiment_service_api import ExperimentServiceApi
from kfp_server_api.api.healthz_service_api import HealthzServiceApi
from kfp_server_api.api.job_service_api import JobServiceApi
from kfp_server_api.api.pipeline_service_api import PipelineServiceApi
from kfp_server_api.api.pipeline_upload_service_api import PipelineUploadServiceApi
from kfp_server_api.api.run_service_api import RunServiceApi
# import ApiClient
from kfp_server_api.api_client import ApiClient
from kfp_server_api.configuration import Configuration
from kfp_server_api.exceptions import OpenApiException
from kfp_server_api.exceptions import ApiTypeError
from kfp_server_api.exceptions import ApiValueError
from kfp_server_api.exceptions import ApiKeyError
from kfp_server_api.exceptions import ApiException
# import models into sdk package
from kfp_server_api.models.api_cron_schedule import ApiCronSchedule
from kfp_server_api.models.api_experiment import ApiExperiment
from kfp_server_api.models.api_experiment_storage_state import ApiExperimentStorageState
from kfp_server_api.models.api_get_healthz_response import ApiGetHealthzResponse
from kfp_server_api.models.api_get_template_response import ApiGetTemplateResponse
from kfp_server_api.models.api_job import ApiJob
from kfp_server_api.models.api_list_experiments_response import ApiListExperimentsResponse
from kfp_server_api.models.api_list_jobs_response import ApiListJobsResponse
from kfp_server_api.models.api_list_pipeline_versions_response import ApiListPipelineVersionsResponse
from kfp_server_api.models.api_list_pipelines_response import ApiListPipelinesResponse
from kfp_server_api.models.api_list_runs_response import ApiListRunsResponse
from kfp_server_api.models.api_parameter import ApiParameter
from kfp_server_api.models.api_periodic_schedule import ApiPeriodicSchedule
from kfp_server_api.models.api_pipeline import ApiPipeline
from kfp_server_api.models.api_pipeline_runtime import ApiPipelineRuntime
from kfp_server_api.models.api_pipeline_spec import ApiPipelineSpec
from kfp_server_api.models.api_pipeline_version import ApiPipelineVersion
from kfp_server_api.models.api_read_artifact_response import ApiReadArtifactResponse
from kfp_server_api.models.api_relationship import ApiRelationship
from kfp_server_api.models.api_report_run_metrics_request import ApiReportRunMetricsRequest
from kfp_server_api.models.api_report_run_metrics_response import ApiReportRunMetricsResponse
from kfp_server_api.models.api_resource_key import ApiResourceKey
from kfp_server_api.models.api_resource_reference import ApiResourceReference
from kfp_server_api.models.api_resource_type import ApiResourceType
from kfp_server_api.models.api_run import ApiRun
from kfp_server_api.models.api_run_detail import ApiRunDetail
from kfp_server_api.models.api_run_metric import ApiRunMetric
from kfp_server_api.models.api_run_storage_state import ApiRunStorageState
from kfp_server_api.models.api_status import ApiStatus
from kfp_server_api.models.api_trigger import ApiTrigger
from kfp_server_api.models.api_url import ApiUrl
from kfp_server_api.models.job_mode import JobMode
from kfp_server_api.models.protobuf_any import ProtobufAny
from kfp_server_api.models.report_run_metrics_response_report_run_metric_result import ReportRunMetricsResponseReportRunMetricResult
from kfp_server_api.models.report_run_metrics_response_report_run_metric_result_status import ReportRunMetricsResponseReportRunMetricResultStatus
from kfp_server_api.models.run_metric_format import RunMetricFormat
| apache-2.0 | 8,307,729,000,647,385,000 | 53.375 | 145 | 0.844955 | false |
mikegagnon/sidenote | prefix-links.py | 1 | 1725 | #!/usr/bin/env python
#
# This is free and unencumbered software released into the public domain.
#
# Sometimes you want to include one sidenote document into another.
# One way you could do that is copy the .md files from one project into another.
# However, this creates a risk of link-tag collisions. I.e. one project
# defines ~foo and the other project also defines ~foo.
#
# prefix-links.py solves this problem. It takes a .md file as input, then
# prefixes each link tag with a random string. Therefore ~foo becomes
# ~4C5FGAL2foo
#
# Then you can safely include .md files from multiple projects into another
# project
#
from sidenote import *
import argparse
import random
import re
import string
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
key = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
def obscure(filename):
with open(filename) as f:
lines = f.readlines()
for line in lines:
newline = ""
# tokenize the line into links and non-links
for part in LINK_PARSER.split(line):
if LINK_PARSER.match(part):
newpart = part.replace("(##", "(##" + key)
newline += newpart
else:
newline += part
if TILDE_ANCHOR_PARSER.match(newline):
newline = newline.replace("~", "~" + key)
print newline,
if __name__=="__main__":
parser = argparse.ArgumentParser(description='"Obscure" links in a Sidenote document')
parser.add_argument('file', type=str,
help='the markdown file to obscure')
args = parser.parse_args()
obscure(args.file)
| unlicense | 1,335,531,607,694,736,400 | 30.363636 | 115 | 0.662029 | false |
slipstream/SlipStreamClient | client/src/main/python/slipstream/Logger.py | 1 | 1610 | import os
import errno
import logging
from logging.handlers import RotatingFileHandler
class Logger(object):
LOGGER_NAME = 'SSClient'
LOGFILE_MAXBYTES = 2*1024*1024
LOGFILE_BACKUPCOUNT = 5
LOGFILE_FORMAT = "%(asctime)s:%(levelname)s:%(name)s:%(message)s"
log_file = '/var/log/slipstream/client/slipstream-node.log'
def __init__(self, config_holder):
self.log_to_file = True
self.log_level = 'info'
self.logger_name = ''
config_holder.assign(self)
self.logger = None
self._configure_logger()
def _configure_logger(self):
self.logger = logging.getLogger(self.logger_name or Logger.LOGGER_NAME)
numeric_level = getattr(logging, self.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % self.log_level)
self.logger.setLevel(numeric_level)
formatter = logging.Formatter(self.LOGFILE_FORMAT)
if self.log_to_file:
self._create_log_dir()
handler = RotatingFileHandler(self.log_file,
maxBytes=self.LOGFILE_MAXBYTES,
backupCount=self.LOGFILE_BACKUPCOUNT)
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
self.logger.addHandler(handler)
def _create_log_dir(self):
log_dir = os.path.dirname(self.log_file)
try:
os.makedirs(log_dir)
except OSError, ex:
if ex.errno != errno.EEXIST:
raise
def get_logger(self):
return self.logger
| apache-2.0 | -4,789,312,884,300,418,000 | 30.568627 | 79 | 0.618012 | false |
kpreid/shinysdr | shinysdr/plugins/wspr/test_blocks.py | 1 | 3942 | # Copyright 2017 Kevin Reid and the ShinySDR contributors
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.internet import defer
from twisted.trial import unittest
from twisted.internet import task
from shinysdr.plugins.wspr.blocks import WSPRFilter, WAVIntervalSink
from shinysdr.plugins.wspr.interfaces import IWAVIntervalListener
class TestWSPRFilter(unittest.TestCase):
def test_for_smoke(self):
WSPRFilter(48000)
class TestWAVIntervalSink(unittest.TestCase):
def setUp(self):
self.clock = task.Clock()
self.listener = FakeListener()
self.sink = WAVIntervalSink(
interval=120,
duration=115,
listener=self.listener,
sample_rate=48000,
_callLater=self.clock.callLater,
_time=self.clock.seconds,
_deferToThread=self.deferToThread)
def deferToThread(self, f, *args, **kwargs):
"""What thread?"""
return defer.succeed(f(*args, **kwargs))
def test_listener_interface(self):
verifyObject(IWAVIntervalListener, self.listener)
def advance_to_next_interval(self):
self.clock.advance(120 - (self.clock.seconds() % 120))
def test_time(self):
self.sink.start_running()
# initially nothing has happened.
self.assertFalse(self.listener._filesClosed)
self.assertFalse(self.listener._filesOpened)
# start of first interval.
self.advance_to_next_interval()
self.assertEqual(self.listener._filesOpened, ['120'])
# just before end of first interval.
self.clock.advance(114)
self.assertEqual(self.listener._filesClosed, [])
# end of first interval.
self.clock.advance(1)
self.assertEqual(self.listener._filesClosed, ['120'])
# next interval begins.
self.advance_to_next_interval()
self.assertEqual(self.listener._filesOpened, ['120', '240'])
self.assertEqual(self.listener._filesClosed, ['120'])
def test_start(self):
# nothing is scheduled
self.assertFalse(self.clock.getDelayedCalls())
# until we start it
self.sink.start_running()
self.assertEqual(len(self.clock.getDelayedCalls()), 1)
# and starting it again doesn't start it twice.
self.sink.start_running()
self.assertEqual(len(self.clock.getDelayedCalls()), 1)
# More things to test, but so little time.
#
# What if interval == duration? (Currently undefined behavior)
#
# What if there's an error in opening or closing the wav file?
#
# Are the interactions with the wavfile_sink block being done in a thread?
# They block on aquiring locks and file IO.
#
# Are the internal connections sane?
@implementer(IWAVIntervalListener)
class FakeListener(object):
def __init__(self):
self._filesOpened = []
self._filesClosed = []
def fileClosed(self, filename):
self._filesClosed.append(filename)
def fileOpened(self, filename):
self._filesOpened.append(filename)
def filename(self, time):
return str(int(time))
| gpl-3.0 | 4,385,461,148,180,318,700 | 31.04878 | 82 | 0.678082 | false |
psychopy/psychopy | psychopy/sound/microphone.py | 1 | 35191 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Audio recording using a microphone.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = ['Microphone']
import sys
import psychopy.logging as logging
from psychopy.constants import NOT_STARTED, STARTED
from psychopy.preferences import prefs
from .audioclip import *
from .audiodevice import *
from .exceptions import *
import numpy as np
_hasPTB = True
try:
import psychtoolbox.audio as audio
except (ImportError, ModuleNotFoundError):
logging.warning(
"The 'psychtoolbox' library cannot be loaded but is required for audio "
"capture (use `pip install psychtoolbox` to get it). Microphone "
"recording will be unavailable this session. Note that opening a "
"microphone stream will raise an error.")
_hasPTB = False
class RecordingBuffer(object):
"""Class for a storing a recording from a stream.
Think of instances of this class behaving like an audio tape whereas the
`Microphone` class is the tape recorder. Samples taken from the stream are
written to the tape which stores the data.
Used internally by the `Microphone` class, users usually do not create
instances of this class themselves.
Parameters
----------
sampleRateHz : int
Sampling rate for audio recording in Hertz (Hz). By default, 48kHz
(``sampleRateHz=480000``) is used which is adequate for most consumer
grade microphones (headsets and built-in).
channels : int
Number of channels to record samples to `1=Mono` and `2=Stereo`.
maxRecordingSize : int
Maximum recording size in kilobytes (Kb). Since audio recordings tend to
consume a large amount of system memory, one might want to limit the
size of the recording buffer to ensure that the application does not run
out of memory. By default, the recording buffer is set to 24000 KB (or
24 MB). At a sample rate of 48kHz, this will result in 62.5 seconds of
continuous audio being recorded before the buffer is full.
policyWhenFull : str
What to do when the recording buffer is full and cannot accept any more
samples. If 'ignore', samples will be silently dropped and the `isFull`
property will be set to `True`. If 'warn', a warning will be logged and
the `isFull` flag will be set. Finally, if 'error' the application will
raise an exception.
"""
def __init__(self, sampleRateHz=SAMPLE_RATE_48kHz, channels=2,
maxRecordingSize=24000, policyWhenFull='ignore'):
self._channels = channels
self._sampleRateHz = sampleRateHz
self._maxRecordingSize = maxRecordingSize
self._samples = None # `ndarray` created in _allocRecBuffer`
self._offset = 0 # recording offset
self._lastSample = 0 # offset of the last sample from stream
self._spaceRemaining = None # set in `_allocRecBuffer`
self._totalSamples = None # set in `_allocRecBuffer`
# check if the value is valid
if policyWhenFull not in ['ignore', 'warn', 'error']:
raise ValueError("Invalid value for `policyWhenFull`.")
self._policyWhenFull = policyWhenFull
self._warnedRecBufferFull = False
self._loops = 0
self._allocRecBuffer()
def _allocRecBuffer(self):
"""Allocate the recording buffer. Called internally if properties are
changed."""
# allocate another array
nBytes = self._maxRecordingSize * 1000
recArraySize = int((nBytes / self._channels) / (np.float32()).itemsize)
self._samples = np.zeros(
(recArraySize, self._channels), dtype=np.float32, order='C')
# sanity check
assert self._samples.nbytes == nBytes
self._totalSamples = len(self._samples)
self._spaceRemaining = self._totalSamples
@property
def samples(self):
"""Reference to the actual sample buffer (`ndarray`)."""
return self._samples
@property
def bufferSecs(self):
"""Capacity of the recording buffer in seconds (`float`)."""
return self._totalSamples / self._sampleRateHz
@property
def nbytes(self):
"""Number of bytes the recording buffer occupies in memory (`int`)."""
return self._samples.nbytes
@property
def sampleBytes(self):
"""Number of bytes per sample (`int`)."""
return np.float32().itemsize
@property
def spaceRemaining(self):
"""The space remaining in the recording buffer (`int`). Indicates the
number of samples that the buffer can still add before overflowing.
"""
return self._spaceRemaining
@property
def isFull(self):
"""Is the recording buffer full (`bool`)."""
return self._spaceRemaining <= 0
@property
def totalSamples(self):
"""Total number samples the recording buffer can hold (`int`)."""
return self._totalSamples
@property
def writeOffset(self):
"""Index in the sample buffer where new samples will be written when
`write()` is called (`int`).
"""
return self._offset
@property
def lastSample(self):
"""Index of the last sample recorded (`int`). This can be used to slice
the recording buffer, only getting data from the beginning to place
where the last sample was written to.
"""
return self._lastSample
@property
def loopCount(self):
"""Number of times the recording buffer restarted (`int`). Only valid if
`loopback` is ``True``."""
return self._loops
@property
def maxRecordingSize(self):
"""Maximum recording size in kilobytes (`int`).
Since audio recordings tend to consume a large amount of system memory,
one might want to limit the size of the recording buffer to ensure that
the application does not run out of memory. By default, the recording
buffer is set to 24000 KB (or 24 MB). At a sample rate of 48kHz, this
will result in 62.5 seconds of continuous audio being recorded before
the buffer is full.
Setting this value will allocate another recording buffer of appropriate
size. Avoid doing this in any time sensitive parts of your application.
"""
return self._maxRecordingSize
@maxRecordingSize.setter
def maxRecordingSize(self, value):
value = int(value)
# don't do this unless the value changed
if value == self._maxRecordingSize:
return
# if different than last value, update the recording buffer
self._maxRecordingSize = value
self._allocRecBuffer()
def seek(self, offset, absolute=False):
"""Set the write offset.
Use this to specify where to begin writing samples the next time `write`
is called. You should call `seek(0)` when starting a new recording.
Parameters
----------
offset : int
Position in the sample buffer to set.
absolute : bool
Use absolute positioning. Use relative positioning if `False` where
the value of `offset` will be added to the current offset. Default
is `False`.
"""
if not absolute:
self._offset += offset
else:
self._offset = absolute
assert 0 <= self._offset < self._totalSamples
self._spaceRemaining = self._totalSamples - self._offset
def write(self, samples):
"""Write samples to the recording buffer.
Parameters
----------
samples : ArrayLike
Samples to write to the recording buffer, usually of a stream. Must
have the same number of dimensions as the internal array.
Returns
-------
int
Number of samples overflowed. If this is zero then all samples have
been recorded, if not, the number of samples rejected is given.
"""
nSamples = len(samples)
if self.isFull:
if self._policyWhenFull == 'ignore':
return nSamples # samples lost
elif self._policyWhenFull == 'warn':
if not self._warnedRecBufferFull:
logging.warning(
f"Audio recording buffer filled! This means that no "
f"samples are saved beyond {round(self.bufferSecs, 6)} "
f"seconds. Specify a larger recording buffer next time "
f"to avoid data loss.")
logging.flush()
self._warnedRecBufferFull = True
return nSamples
elif self._policyWhenFull == 'error':
raise AudioRecordingBufferFullError(
"Cannot write samples, recording buffer is full.")
else:
return nSamples # whatever
if not nSamples: # no samples came out of the stream, just return
return
if self._spaceRemaining >= nSamples:
self._lastSample = self._offset + nSamples
audioData = samples[:, :]
else:
self._lastSample = self._offset + self._spaceRemaining
audioData = samples[:self._spaceRemaining, :]
self._samples[self._offset:self._lastSample, :] = audioData
self._offset += nSamples
self._spaceRemaining -= nSamples
# Check if the recording buffer is now full. Next call to `poll` will
# not record anything.
if self._spaceRemaining <= 0:
self._spaceRemaining = 0
d = nSamples - self._spaceRemaining
return 0 if d < 0 else d
def clear(self):
# reset all live attributes
self._samples = None
self._offset = 0
self._lastSample = 0
self._spaceRemaining = None
self._totalSamples = None
# reallocate buffer
self._allocRecBuffer()
def getSegment(self, start=0, end=None):
"""Get a segment of recording data as an `AudioClip`.
Parameters
----------
start : float or int
Absolute time in seconds for the start of the clip.
end : float or int
Absolute time in seconds for the end of the clip. If `None` the time
at the last sample is used.
Returns
-------
AudioClip
Audio clip object with samples between `start` and `end`.
"""
idxStart = int(start * self._sampleRateHz)
idxEnd = self._lastSample if end is None else int(
end * self._sampleRateHz)
return AudioClip(
np.array(self._samples[idxStart:idxEnd, :],
dtype=np.float32, order='C'),
sampleRateHz=self._sampleRateHz)
class Microphone(object):
"""Class for recording audio from a microphone or input stream.
Creating an instance of this class will open a stream using the specified
device. Streams should remain open for the duration of your session. When a
stream is opened, a buffer is allocated to store samples coming off it.
Samples from the input stream will written to the buffer once
:meth:`~Microphone.start()` is called.
Parameters
----------
device : int or `~psychopy.sound.AudioDevice`
Audio capture device to use. You may specify the device either by index
(`int`) or descriptor (`AudioDevice`).
sampleRateHz : int
Sampling rate for audio recording in Hertz (Hz). By default, 48kHz
(``sampleRateHz=480000``) is used which is adequate for most consumer
grade microphones (headsets and built-in).
channels : int
Number of channels to record samples to `1=Mono` and `2=Stereo`.
streamBufferSecs : float
Stream buffer size to pre-allocate for the specified number of seconds.
The default is 2.0 seconds which is usually sufficient.
maxRecordingSize : int
Maximum recording size in kilobytes (Kb). Since audio recordings tend to
consume a large amount of system memory, one might want to limit the
size of the recording buffer to ensure that the application does not run
out of memory. By default, the recording buffer is set to 24000 KB (or
24 MB). At a sample rate of 48kHz, this will result in 62.5 seconds of
continuous audio being recorded before the buffer is full.
audioLatencyMode : int or None
Audio latency mode to use, values range between 0-4. If `None`, the
setting from preferences will be used. Using `3` (exclusive mode) is
adequate for most applications and required if using WASAPI on Windows
for other settings (such audio quality) to take effect. Symbolic
constants `psychopy.sound.audiodevice.AUDIO_PTB_LATENCY_CLASS_` can also
be used.
audioRunMode : int
Run mode for the recording device. Default is standby-mode (`0`) which
allows the system to put the device to sleep. However when the device is
needed, waking the device results in some latency. Using a run mode of
`1` will keep the microphone running (or 'hot') with reduces latency
when th recording is started. Cannot be set when after initialization at
this time.
Examples
--------
Capture 10 seconds of audio from the primary microphone::
import psychopy.core as core
import psychopy.sound.Microphone as Microphone
mic = Microphone(bufferSecs=10.0) # open the microphone
mic.start() # start recording
core.wait(10.0) # wait 10 seconds
mic.stop() # stop recording
audioClip = mic.getRecording()
print(audioClip.duration) # should be ~10 seconds
audioClip.save('test.wav') # save the recorded audio as a 'wav' file
The prescribed method for making long recordings is to poll the stream once
per frame (or every n-th frame)::
mic = Microphone(bufferSecs=2.0)
mic.start() # start recording
# main trial drawing loop
mic.poll()
win.flip() # calling the window flip function
mic.stop() # stop recording
audioClip = mic.getRecording()
"""
# Force the use of WASAPI for audio capture on Windows. If `True`, only
# WASAPI devices will be returned when calling static method
# `Microphone.getDevices()`
enforceWASAPI = True
def __init__(self,
device=None,
sampleRateHz=None,
channels=2,
streamBufferSecs=2.0,
maxRecordingSize=24000,
policyWhenFull='warn',
audioLatencyMode=None,
audioRunMode=0):
if not _hasPTB: # fail if PTB is not installed
raise ModuleNotFoundError(
"Microphone audio capture requires package `psychtoolbox` to "
"be installed.")
# get information about the selected device
devices = Microphone.getDevices()
if isinstance(device, AudioDeviceInfo):
self._device = device
elif isinstance(device, (int, float)):
devicesByIndex = {d.deviceIndex: d for d in devices}
if device in devicesByIndex:
self._device = devicesByIndex[device]
else:
raise AudioInvalidCaptureDeviceError(
'No suitable audio recording devices found matching index '
'{}.'.format(device))
else:
# get default device, first enumerated usually
if not devices:
raise AudioInvalidCaptureDeviceError(
'No suitable audio recording devices found on this system. '
'Check connections and try again.')
self._device = devices[0] # use first
logging.info('Using audio device #{} ({}) for audio capture'.format(
self._device.deviceIndex, self._device.deviceName))
# error if specified device is not suitable for capture
if not self._device.isCapture:
raise AudioInvalidCaptureDeviceError(
'Specified audio device not suitable for audio recording. '
'Has no input channels.')
# get the sample rate
self._sampleRateHz = \
self._device.defaultSampleRate if sampleRateHz is None else int(
sampleRateHz)
logging.debug('Set stream sample rate to {} Hz'.format(
self._sampleRateHz))
# set the audio latency mode
if audioLatencyMode is None:
self._audioLatencyMode = int(prefs.hardware["audioLatencyMode"])
else:
self._audioLatencyMode = audioLatencyMode
logging.debug('Set audio latency mode to {}'.format(
self._audioLatencyMode))
assert 0 <= self._audioLatencyMode <= 4 # sanity check for pref
# set the number of recording channels
self._channels = \
self._device.inputChannels if channels is None else int(channels)
logging.debug('Set recording channels to {} ({})'.format(
self._channels, 'stereo' if self._channels > 1 else 'mono'))
if self._channels > self._device.inputChannels:
raise AudioInvalidDeviceError(
'Invalid number of channels for audio input specified.')
# internal recording buffer size in seconds
assert isinstance(streamBufferSecs, (float, int))
self._streamBufferSecs = float(streamBufferSecs)
# PTB specific stuff
self._mode = 2 # open a stream in capture mode
# Handle for the recording stream, should only be opened once per
# session
logging.debug('Opening audio stream for device #{}'.format(
self._device.deviceIndex))
self._stream = audio.Stream(
device_id=self._device.deviceIndex,
latency_class=self._audioLatencyMode,
mode=self._mode,
freq=self._sampleRateHz,
channels=self._channels)
logging.debug('Stream opened')
assert isinstance(audioRunMode, (float, int)) and \
(audioRunMode == 0 or audioRunMode == 1)
self._audioRunMode = int(audioRunMode)
self._stream.run_mode = self._audioRunMode
logging.debug('Set run mode to `{}`'.format(
self._audioRunMode))
# set latency bias
self._stream.latency_bias = 0.0
logging.debug('Set stream latency bias to {} ms'.format(
self._stream.latency_bias))
# pre-allocate recording buffer, called once
self._stream.get_audio_data(self._streamBufferSecs)
logging.debug(
'Allocated stream buffer to hold {} seconds of data'.format(
self._streamBufferSecs))
# status flag
self._statusFlag = NOT_STARTED
# setup recording buffer
self._recording = RecordingBuffer(
sampleRateHz=self._sampleRateHz,
channels=self._channels,
maxRecordingSize=maxRecordingSize,
policyWhenFull=policyWhenFull
)
# setup clips and transcripts dicts
self.clips = {}
self.lastClip = None
self.scripts = {}
self.lastScript = None
logging.debug('Audio capture device #{} ready'.format(
self._device.deviceIndex))
@staticmethod
def getDevices():
"""Get a `list` of audio capture device (i.e. microphones) descriptors.
On Windows, only WASAPI devices are used.
Returns
-------
list
List of `AudioDevice` descriptors for suitable capture devices. If
empty, no capture devices have been found.
"""
try:
Microphone.enforceWASAPI = bool(prefs.hardware["audioForceWASAPI"])
except KeyError:
pass # use default if option not present in settings
# query PTB for devices
if Microphone.enforceWASAPI and sys.platform == 'win32':
allDevs = audio.get_devices(device_type=13)
else:
allDevs = audio.get_devices()
# make sure we have an array of descriptors
allDevs = [allDevs] if isinstance(allDevs, dict) else allDevs
# create list of descriptors only for capture devices
inputDevices = [desc for desc in [
AudioDeviceInfo.createFromPTBDesc(dev) for dev in allDevs]
if desc.isCapture]
return inputDevices
# def warmUp(self):
# """Warm-/wake-up the audio stream.
#
# On some systems the first time `start` is called incurs additional
# latency, whereas successive calls do not. To deal with this, it is
# recommended that you run this warm-up routine prior to capturing audio
# samples. By default, this routine is called when instancing a new
# microphone object.
#
# """
# # We should put an actual test here to see if timing stabilizes after
# # multiple invocations of this function.
# self._stream.start()
# self._stream.stop()
@property
def recording(self):
"""Reference to the current recording buffer (`RecordingBuffer`)."""
return self._recording
@property
def recBufferSecs(self):
"""Capacity of the recording buffer in seconds (`float`)."""
return self.recording.bufferSecs
@property
def maxRecordingSize(self):
"""Maximum recording size in kilobytes (`int`).
Since audio recordings tend to consume a large amount of system memory,
one might want to limit the size of the recording buffer to ensure that
the application does not run out. By default, the recording buffer is
set to 64000 KB (or 64 MB). At a sample rate of 48kHz, this will result
in about. Using stereo audio (``nChannels == 2``) requires twice the
buffer over mono (``nChannels == 2``) for the same length clip.
Setting this value will allocate another recording buffer of appropriate
size. Avoid doing this in any time sensitive parts of your application.
"""
return self._recording.maxRecordingSize
@maxRecordingSize.setter
def maxRecordingSize(self, value):
self._recording.maxRecordingSize = value
@property
def latencyBias(self):
"""Latency bias to add when starting the microphone (`float`).
"""
return self._stream.latency_bias
@latencyBias.setter
def latencyBias(self, value):
self._stream.latency_bias = float(value)
@property
def audioLatencyMode(self):
"""Audio latency mode in use (`int`). Cannot be set after
initialization.
"""
return self._audioLatencyMode
@property
def streamBufferSecs(self):
"""Size of the internal audio storage buffer in seconds (`float`).
To ensure all data is captured, there must be less time elapsed between
subsequent `getAudioClip` calls than `bufferSecs`.
"""
return self._streamBufferSecs
@property
def status(self):
"""Status flag for the microphone. Value can be one of
``psychopy.constants.STARTED`` or ``psychopy.constants.NOT_STARTED``.
For detailed stream status information, use the
:attr:`~psychopy.sound.microphone.Microphone.streamStatus` property.
"""
if hasattr(self, "_statusFlag"):
return self._statusFlag
@status.setter
def status(self, value):
self._statusFlag = value
@property
def streamStatus(self):
"""Status of the audio stream (`AudioDeviceStatus` or `None`).
See :class:`~psychopy.sound.AudioDeviceStatus` for a complete overview
of available status fields. This property has a value of `None` if
the stream is presently closed.
Examples
--------
Get the capture start time of the stream::
# assumes mic.start() was called
captureStartTime = mic.status.captureStartTime
Check if microphone recording is active::
isActive = mic.status.active
Get the number of seconds recorded up to this point::
recordedSecs = mic.status.recordedSecs
"""
currentStatus = self._stream.status
if currentStatus != -1:
return AudioDeviceStatus.createFromPTBDesc(currentStatus)
@property
def isRecBufferFull(self):
"""`True` if there is an overflow condition with the recording buffer.
If this is `True`, then `poll()` is still collecting stream samples but
is no longer writing them to anything, causing stream samples to be
lost.
"""
return self._recording.isFull
@property
def isStarted(self):
"""``True`` if stream recording has been started (`bool`)."""
return self.status == STARTED
def start(self, when=None, waitForStart=0, stopTime=None):
"""Start an audio recording.
Calling this method will begin capturing samples from the microphone and
writing them to the buffer.
Parameters
----------
when : float, int or None
When to start the stream. If the time specified is a floating point
(absolute) system time, the device will attempt to begin recording
at that time. If `None` or zero, the system will try to start
recording as soon as possible.
waitForStart : bool
Wait for sound onset if `True`.
stopTime : float, int or None
Number of seconds to record. If `None` or `-1`, recording will
continue forever until `stop` is called.
Returns
-------
float
Absolute time the stream was started.
"""
# check if the stream has been
if self.isStarted:
raise AudioStreamError(
"Cannot start a stream, already started.")
if self._stream is None:
raise AudioStreamError("Stream not ready.")
# reset the writing 'head'
self._recording.seek(0, absolute=True)
# reset warnings
# self._warnedRecBufferFull = False
startTime = self._stream.start(
repetitions=0,
when=when,
wait_for_start=int(waitForStart),
stop_time=stopTime)
# recording has begun or is scheduled to do so
self._statusFlag = STARTED
logging.debug(
'Scheduled start of audio capture for device #{} at t={}.'.format(
self._device.deviceIndex, startTime))
return startTime
def record(self, when=None, waitForStart=0, stopTime=None):
"""Start an audio recording (alias of `.start()`).
Calling this method will begin capturing samples from the microphone and
writing them to the buffer.
Parameters
----------
when : float, int or None
When to start the stream. If the time specified is a floating point
(absolute) system time, the device will attempt to begin recording
at that time. If `None` or zero, the system will try to start
recording as soon as possible.
waitForStart : bool
Wait for sound onset if `True`.
stopTime : float, int or None
Number of seconds to record. If `None` or `-1`, recording will
continue forever until `stop` is called.
Returns
-------
float
Absolute time the stream was started.
"""
return self.start(
when=when,
waitForStart=waitForStart,
stopTime=stopTime)
def stop(self, blockUntilStopped=True, stopTime=None):
"""Stop recording audio.
Call this method to end an audio recording if in progress. This will
simply halt recording and not close the stream. Any remaining samples
will be polled automatically and added to the recording buffer.
Parameters
----------
blockUntilStopped : bool
Halt script execution until the stream has fully stopped.
stopTime : float or None
Scheduled stop time for the stream in system time. If `None`, the
stream will stop as soon as possible.
Returns
-------
tuple
Tuple containing `startTime`, `endPositionSecs`, `xruns` and
`estStopTime`.
"""
if not self.isStarted:
raise AudioStreamError(
"Cannot stop a stream that has not been started.")
# poll remaining samples, if any
if not self.isRecBufferFull:
self.poll()
startTime, endPositionSecs, xruns, estStopTime = self._stream.stop(
block_until_stopped=int(blockUntilStopped),
stopTime=stopTime)
self._statusFlag = NOT_STARTED
logging.debug(
('Device #{} stopped capturing audio samples at estimated time '
't={}. Total overruns: {} Total recording time: {}').format(
self._device.deviceIndex, estStopTime, xruns, endPositionSecs))
return startTime, endPositionSecs, xruns, estStopTime
def pause(self, blockUntilStopped=True, stopTime=None):
"""Pause a recording (alias of `.stop`).
Call this method to end an audio recording if in progress. This will
simply halt recording and not close the stream. Any remaining samples
will be polled automatically and added to the recording buffer.
Parameters
----------
blockUntilStopped : bool
Halt script execution until the stream has fully stopped.
stopTime : float or None
Scheduled stop time for the stream in system time. If `None`, the
stream will stop as soon as possible.
Returns
-------
tuple
Tuple containing `startTime`, `endPositionSecs`, `xruns` and
`estStopTime`.
"""
return self.stop(blockUntilStopped=blockUntilStopped, stopTime=stopTime)
def close(self):
"""Close the stream.
Should not be called until you are certain you're done with it. Ideally,
you should never close and reopen the same stream within a single
session.
"""
self._stream.close()
logging.debug('Stream closed')
def poll(self):
"""Poll audio samples.
Calling this method adds audio samples collected from the stream buffer
to the recording buffer that have been captured since the last `poll`
call. Time between calls of this function should be less than
`bufferSecs`. You do not need to call this if you call `stop` before
the time specified by `bufferSecs` elapses since the `start` call.
Can only be called between called of `start` (or `record`) and `stop`
(or `pause`).
Returns
-------
int
Number of overruns in sampling.
"""
if not self.isStarted:
raise AudioStreamError(
"Cannot poll samples from audio device, not started.")
# figure out what to do with this other information
audioData, absRecPosition, overflow, cStartTime = \
self._stream.get_audio_data()
if overflow:
logging.warning(
"Audio stream buffer overflow, some audio samples have been "
"lost! To prevent this, ensure `Microphone.poll()` is being "
"called often enough, or increase the size of the audio buffer "
"with `bufferSecs`.")
overruns = self._recording.write(audioData)
return overruns
def bank(self, tag=None, transcribe=False, **kwargs):
"""Store current buffer as a clip within the microphone object.
This method is used internally by the Microphone component in Builder,
don't use it for other applications. Either `stop()` or `pause()` must
be called before calling this method.
Parameters
----------
tag : str or None
Label for the clip.
transcribe : bool or str
Set to the name of a transcription engine (e.g. "GOOGLE") to
transcribe using that engine, or set as `False` to not transcribe.
kwargs : dict
Additional keyword arguments to pass to
:class:`~psychopy.sound.AudioClip.transcribe()`.
"""
# make sure the tag exists in both clips and transcripts dicts
if tag not in self.clips:
self.clips[tag] = []
if tag not in self.scripts:
self.scripts[tag] = []
# append current recording to clip list according to tag
self.lastClip = self.getRecording()
self.clips[tag].append(self.lastClip)
# append current clip's transcription according to tag
if transcribe:
if transcribe in ('Built-in', True, 'BUILT_IN', 'BUILT-IN',
'Built-In', 'built-in'):
engine = "sphinx"
elif type(transcribe) == str:
engine = transcribe
self.lastScript = self.lastClip.transcribe(
engine=engine, **kwargs)
else:
self.lastScript = "Transcription disabled."
self.scripts[tag].append(self.lastScript)
# clear recording buffer
self._recording.clear()
# return banked items
if transcribe:
return self.lastClip, self.lastScript
else:
return self.lastClip
def clear(self):
"""Wipe all clips. Deletes previously banked audio clips.
"""
# clear clips
self.clips = {}
# clear recording
self._recording.clear()
def flush(self):
"""Get a copy of all banked clips, then clear the clips from storage."""
# get copy of clips dict
clips = self.clips.copy()
# clear
self.clear()
return clips
def getRecording(self):
"""Get audio data from the last microphone recording.
Call this after `stop` to get the recording as an `AudioClip` object.
Raises an error if a recording is in progress.
Returns
-------
AudioClip
Recorded data between the last calls to `start` (or `record`) and
`stop`.
"""
if self.isStarted:
raise AudioStreamError(
"Cannot get audio clip, recording was in progress. Be sure to "
"call `Microphone.stop` first.")
return self._recording.getSegment() # full recording
if __name__ == "__main__":
pass
| gpl-3.0 | 1,498,106,364,352,994,300 | 34.909184 | 80 | 0.613424 | false |
fyookball/electrum | ios/ElectronCash/app.py | 1 | 2878 | #
# This file is:
# Copyright (C) 2018 Calin Culianu <[email protected]>
#
# MIT License
#
import os
from electroncash_gui.ios_native.monkeypatches import MonkeyPatches
from electroncash.util import set_verbosity
from electroncash_gui.ios_native import ElectrumGui
from electroncash_gui.ios_native.utils import call_later, get_user_dir, cleanup_tmp_dir, is_debug_build, NSLogSuppress, NSLog
from electroncash.simple_config import SimpleConfig
from electroncash.networks import set_mainnet, set_testnet
# NB: This is called from appdelegate.py "application_didFinishLaunchingWithOptions_"
def main():
cleanup_tmp_dir()
config_options = {
'verbose': is_debug_build(),
'cmd': 'gui',
'gui': 'ios_native',
'cwd': os.getcwd(),
'whitelist_servers_only' : True, # on iOS we force only the whitelist ('preferred') servers only for now as a security measure
'testnet': 'EC_TESTNET' in os.environ, # You can set the env when testing using Xcode "Scheme" editor
}
if config_options.get('testnet'):
set_testnet()
else:
set_mainnet()
set_verbosity(config_options.get('verbose'), timestamps=False, thread_id=False)
NSLogSuppress(not config_options.get('verbose'))
MonkeyPatches.patch()
config = SimpleConfig(config_options, read_user_dir_function = get_user_dir)
gui = ElectrumGui(config)
call_later(0.010, gui.main) # this is required for the activity indicator to actually animate. Switch to a direct call if not using activity indicator on Splash2
_printStats(config_options) # Prints some startup/debug stats such as Python version and SSL version (this is done in another thread to hopefully not impact startup overhead too much, as importing ssl may be a bit heavy)
return "Bitcoin Cash FTW!"
def _printStats(config_options):
import threading
def thrdfunc(config_options):
# lazy init of SSL
import ssl, sys
from electroncash import version, ecc_fast, schnorr
NSLog("Electron Cash lib version: %s (using server protocol: %s)", version.PACKAGE_VERSION, version.PROTOCOL_VERSION)
NSLog("Python version: %s", ' '.join(sys.version.split('\n')))
NSLog("OpenSSL version: %s", ssl.OPENSSL_VERSION)
NSLog("Fast ECC: %s Fast Schnorr: %s", str(ecc_fast.is_using_fast_ecc()), str(schnorr.has_fast_sign()))
#NSLog("Environment Vars:")
#for k,v in os.environ.copy().items():
# NSLog("%s=%s", str(k), str(v))
#NSLog("Config Vars:")
#for k,v in config_options.copy().items():
# NSLog("config[%s] = %s", str(k), str(v))
# /
# We do this from a thread so as to not delay app startup by importing more stuff we don't strictly need.
threading.Thread(target=thrdfunc, args=(config_options,), daemon=True).start()
| mit | -7,125,776,896,081,296,000 | 43.276923 | 225 | 0.677206 | false |
sipwise/repoapi | build/test/test_utils.py | 1 | 11161 | # Copyright (C) 2017-2020 The Sipwise Team - http://sipwise.com
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import re
from unittest.mock import patch
from django.test import override_settings
from django.test import SimpleTestCase
from build import exceptions as err
from build.conf import settings
from build.utils import get_common_release
from build.utils import get_simple_release
from build.utils import is_release_trunk
from build.utils import ReleaseConfig
from build.utils import trigger_build
from build.utils import trigger_copy_deps
class SimpleIsReleaseTrunkTest(SimpleTestCase):
def test_trunk(self):
ok, val = is_release_trunk("trunk")
self.assertFalse(ok)
self.assertIsNone(val)
def test_mrXX(self):
ok, val = is_release_trunk("release-mr8.5")
self.assertFalse(ok)
self.assertIsNone(val)
def test_release_trunk(self):
ok, val = is_release_trunk("release-trunk-buster")
self.assertTrue(ok)
self.assertEqual(val, "buster")
ok, val = is_release_trunk("release-trunk-bullseye")
self.assertTrue(ok)
self.assertEqual(val, "bullseye")
class SimpleReleaseTest(SimpleTestCase):
def test_trunk(self):
val = get_simple_release("release-trunk-buster")
self.assertEqual(val, "trunk")
def test_branch_release(self):
val = get_simple_release("release-mr8.0")
self.assertEqual(val, "mr8.0")
def test_release_ok(self):
val = get_simple_release("release-mr8.1.1")
self.assertEqual(val, "mr8.1.1")
def test_release_update_ok(self):
val = get_simple_release("release-mr8.1-update")
self.assertEqual(val, "mr8.1")
def test_release_ko(self):
val = get_simple_release("mr8.1.1")
self.assertIsNone(val)
class CommonReleaseTest(SimpleTestCase):
def test_trunk(self):
val = get_common_release("release-trunk-buster")
self.assertEqual(val, "master")
def test_branch_release(self):
val = get_common_release("release-mr8.0")
self.assertEqual(val, "mr8.0")
def test_release_ok(self):
val = get_common_release("mr8.1.1")
self.assertEqual(val, "mr8.1")
def test_release_ko(self):
val = get_common_release("whatever-mr8.1.1")
self.assertIsNone(val)
class ReleaseConfigTestCase(SimpleTestCase):
build_deps = [
"data-hal",
"ngcp-schema",
"libinewrate",
"libswrate",
"libtcap",
"sipwise-base",
"check-tools",
]
@override_settings(BUILD_RELEASES_SKIP=["mr0.1"])
def test_supported_releases(self):
supported = [
"release-trunk-buster",
"release-trunk-bullseye",
"mr8.1.2",
"mr8.1",
"mr7.5.3",
"mr7.5.2",
"mr7.5.1",
"mr7.5",
]
res = ReleaseConfig.supported_releases()
self.assertListEqual(res, supported)
@patch.object(ReleaseConfig, "supported_releases")
def test_supported_releases_dict(self, sr):
res_ok = [
{"release": "release-trunk-buster", "base": "master"},
{"release": "mr8.0", "base": "mr8.0"},
{"release": "mr8.0.1", "base": "mr8.0"},
{"release": "mr7.5.1", "base": "mr7.5"},
]
sr.return_value = [
"release-trunk-buster",
"mr8.0",
"mr8.0.1",
"mr7.5.1",
]
res = ReleaseConfig.supported_releases_dict()
self.assertListEqual(res, res_ok)
def test_no_release_config(self):
with self.assertRaises(err.NoConfigReleaseFile):
ReleaseConfig("fake_release")
def test_no_jenkins_jobs(self):
with self.assertRaises(err.NoJenkinsJobsInfo):
ReleaseConfig("mr0.1")
def test_ok(self):
rc = ReleaseConfig("trunk")
self.assertIsNotNone(rc.config)
self.assertListEqual(list(rc.build_deps.keys()), self.build_deps)
self.assertEqual(rc.debian_release, "buster")
self.assertEqual(len(rc.projects), 73)
def test_debian_release_value(self):
rc = ReleaseConfig("trunk")
self.assertEqual(rc.debian_release, "buster")
rc = ReleaseConfig("release-trunk-bullseye")
self.assertEqual(rc.debian_release, "bullseye")
rc = ReleaseConfig("trunk", "bullseye")
self.assertEqual(rc.debian_release, "bullseye")
# distribution parameter is only used with trunk
rc = ReleaseConfig("release-mr8.1-update", "bullseye")
self.assertEqual(rc.debian_release, "buster")
def test_release_value(self):
rc = ReleaseConfig("trunk")
self.assertEqual(rc.release, "trunk")
def test_branch_tag_value_trunk(self):
rc = ReleaseConfig("trunk")
self.assertEqual(rc.branch, "master")
self.assertIsNone(rc.tag)
def test_branch_tag_value_mrXX(self):
rc = ReleaseConfig("mr8.1")
self.assertEqual(rc.branch, "mr8.1")
self.assertIsNone(rc.tag)
def test_branch_tag_value_mrXXX(self):
rc = ReleaseConfig("mr7.5.2")
self.assertEqual(rc.branch, "mr7.5.2")
self.assertEqual(rc.tag, "mr7.5.2.1")
def test_build_deps(self):
rc = ReleaseConfig("trunk")
build_deps = [
"data-hal",
"ngcp-schema",
"libinewrate",
"libswrate",
"libtcap",
"sipwise-base",
"check-tools",
]
self.assertListEqual(list(rc.build_deps.keys()), build_deps)
def test_build_deps_iter_step_1(self):
rc = ReleaseConfig("trunk")
build_deps = [
"data-hal",
"libinewrate",
"libswrate",
"libtcap",
"sipwise-base",
"check-tools",
]
values = []
for prj in rc.wanna_build_deps(0):
values.append(prj)
self.assertListEqual(build_deps, values)
def test_build_deps_iter_step_2(self):
rc = ReleaseConfig("trunk")
values = []
for prj in rc.wanna_build_deps(1):
values.append(prj)
self.assertListEqual(["ngcp-schema"], values)
@patch("build.utils.open_jenkins_url")
class TriggerBuild(SimpleTestCase):
def test_project_build(self, openurl):
params = {
"project": "kamailio-get-code",
"release_uuid": "UUID_mr8.2",
"trigger_release": "release-mr8.2",
"trigger_branch_or_tag": "branch/mr8.2",
"trigger_distribution": "buster",
"uuid": "UUID_A",
}
url = (
"{base}/job/{project}/buildWithParameters?"
"token={token}&cause={trigger_release}&uuid={uuid}&"
"release_uuid={release_uuid}&"
"branch=mr8.2&tag=none&"
"release={trigger_release}&distribution={trigger_distribution}"
)
res = trigger_build(**params)
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res, "{base}/job/{project}/".format(**params))
openurl.assert_called_once_with(url.format(**params))
def test_project_build_uuid(self, openurl):
params = {
"project": "kamailio-get-code",
"release_uuid": "UUID_mr8.2",
"trigger_release": "release-mr8.2",
"trigger_branch_or_tag": "branch/mr8.2",
"trigger_distribution": "buster",
}
res = [trigger_build(**params), trigger_build(**params)]
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res[0], "{base}/job/{project}/".format(**params))
self.assertEqual(res[0], res[1])
uuids = list()
self.assertEqual(len(openurl.call_args_list), 2)
for call in openurl.call_args_list:
m = re.match(r".+&uuid=([^&]+)&.+", str(call))
self.assertIsNotNone(m)
uuids.append(m.groups(0))
self.assertNotEqual(uuids[0], uuids[1])
def test_copy_debs_build(self, openurl):
params = {
"release": "release-mr8.2",
"internal": True,
"release_uuid": "UUID_mr8.2",
"uuid": "UUID_A",
}
url = (
"{base}/job/{project}/buildWithParameters?"
"token={token}&cause={release}&uuid={uuid}&"
"release_uuid={release_uuid}&"
"release=mr8.2&internal=true"
)
res = trigger_copy_deps(**params)
params["project"] = "release-copy-debs-yml"
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res, "{base}/job/{project}/".format(**params))
openurl.assert_called_once_with(url.format(**params))
def test_project_build_trunk(self, openurl):
params = {
"project": "kamailio-get-code",
"release_uuid": "UUID_mr8.2",
"trigger_release": "trunk",
"trigger_branch_or_tag": "branch/master",
"trigger_distribution": "buster",
"uuid": "UUID_A",
}
url = (
"{base}/job/{project}/buildWithParameters?"
"token={token}&cause={trigger_release}&uuid={uuid}&"
"release_uuid={release_uuid}&"
"branch=master&tag=none&"
"release=trunk&distribution={trigger_distribution}"
)
res = trigger_build(**params)
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res, "{base}/job/{project}/".format(**params))
openurl.assert_called_once_with(url.format(**params))
def test_copy_debs_build_trunk(self, openurl):
params = {
"release": "release-trunk-buster",
"internal": True,
"release_uuid": "UUID_master",
"uuid": "UUID_B",
}
url = (
"{base}/job/{project}/buildWithParameters?"
"token={token}&cause={release}&uuid={uuid}&"
"release_uuid={release_uuid}&"
"release=release-trunk-buster&internal=true"
)
res = trigger_copy_deps(**params)
params["project"] = "release-copy-debs-yml"
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res, "{base}/job/{project}/".format(**params))
openurl.assert_called_once_with(url.format(**params))
| gpl-3.0 | -7,227,835,457,043,220,000 | 33.55418 | 77 | 0.585431 | false |
RNAcentral/rnacentral-import-pipeline | tests/databases/gtrnadb/parser_test.py | 1 | 5743 | # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
import attr
import pytest
from rnacentral_pipeline.databases import data
from rnacentral_pipeline.databases.gtrnadb import parser
@pytest.fixture(scope="module")
def simple():
tax_file = Path("taxonomy.db")
with open("data/gtrnadb/simple.json", "r") as raw:
yield list(parser.parse(raw, tax_file))
@pytest.fixture(scope="module")
def version2():
with open("data/gtrnadb/version2.json", "r") as raw:
tax_file = Path("taxonomy.db")
yield list(parser.parse(raw, tax_file))
def test_it_can_generate_all_entries(simple):
assert len(simple) == 984 # 16 pseudogenes
def test_it_generates_correct_entries(simple):
assert attr.asdict(simple[0]) == attr.asdict(
data.Entry(
primary_id="tRNA-Ala-CGC-1-1:CP000828.1:603738-603810",
accession="CP000828.1:tRNA-Ala-CGC-1-1",
ncbi_tax_id=329726,
database="GTRNADB",
sequence="GGGGAATTAGCTCAGCTGGTAGAGTGCTGCGATCGCACCGCAGAGGTCAGGGGTTCGAATCCCCTATTCTCCA",
regions=[],
rna_type="tRNA",
url="http://gtrnadb.ucsc.edu/genomes/bacteria/Acar_mari_MBIC11017/genes/tRNA-Ala-CGC-1-1.html",
seq_version="1",
note_data={
"anticodon": "CGC",
"anticodon_positions": [
{
"relative_start": 34,
"relative_stop": 36,
}
],
"isotype": "Ala",
"score": 72.7,
"url": "http://gtrnadb.ucsc.edu/genomes/bacteria/Acar_mari_MBIC11017/genes/tRNA-Ala-CGC-1-1.html",
},
secondary_structure=data.SecondaryStructure.empty(),
references=[
data.Reference(
authors="Chan P.P., Lowe T.M.",
location="Nucl. Acids Res. 37(Database issue)",
title=(
"GtRNAdb: A database of transfer RNA genes detected in "
"genomic sequence"
),
pmid=18984615,
doi="10.1093/nar/gkn787.",
)
],
chromosome="chr",
species="Acaryochloris marina MBIC11017",
common_name=None,
anticodon="CGC",
lineage="Bacteria; Cyanobacteria; Synechococcales; Acaryochloridaceae; Acaryochloris; Acaryochloris marina MBIC11017",
gene="tRNA-Ala-CGC-1-1",
optional_id="tRNA-Ala-CGC-1-1",
product="tRNA-Ala (CGC)",
parent_accession="CP000828.1",
description="Acaryochloris marina MBIC11017 tRNA-Ala (CGC)",
mol_type="genomic DNA",
location_start=1,
location_end=73,
gene_synonyms=["chr.trna27-AlaCGC"],
)
)
def test_it_generates_all_entries(version2):
assert len(version2) == 1000
def test_it_creates_correct_entries(version2):
assert attr.asdict(version2[3]) == attr.asdict(
data.Entry(
primary_id="tRNA-Arg-CCG-1-1:CP003168.1:421631-421753",
accession="CP003168.1:tRNA-Arg-CCG-1-1",
ncbi_tax_id=673860,
database="GTRNADB",
sequence="GGGCCCGTGGGGTAGCTTGGATATCCTAGGGGCCTCCGGAGCCCCGGACCCGGGTTCGAATCCCGGCGGGCCCG",
regions=[],
rna_type="tRNA",
url="http://gtrnadb.ucsc.edu/genomes/archaea/Acid_MAR08_339/genes/tRNA-Arg-CCG-1-1.html",
seq_version="1",
note_data={
"anticodon": "CCG",
"anticodon_positions": [{"relative_start": 36, "relative_stop": 38}],
"isotype": "Arg",
"score": 73.6,
"url": "http://gtrnadb.ucsc.edu/genomes/archaea/Acid_MAR08_339/genes/tRNA-Arg-CCG-1-1.html",
},
secondary_structure=data.SecondaryStructure.empty(),
references=[
data.Reference(
authors="Chan P.P., Lowe T.M.",
location="Nucl. Acids Res. 37(Database issue)",
title=(
"GtRNAdb: A database of transfer RNA genes detected in "
"genomic sequence"
),
pmid=18984615,
doi="10.1093/nar/gkn787.",
)
],
chromosome="chr",
species="Aciduliprofundum sp. MAR08-339",
common_name=None,
anticodon="CCG",
lineage="Archaea; Euryarchaeota; Diaforarchaea group; DHVE2 group; Aciduliprofundum; unclassified Aciduliprofundum; Aciduliprofundum sp. MAR08-339",
gene="tRNA-Arg-CCG-1-1",
optional_id="tRNA-Arg-CCG-1-1",
product="tRNA-Arg (CCG)",
parent_accession="CP003168.1",
description="Aciduliprofundum sp. MAR08-339 tRNA Arginine with anticodon CCG",
mol_type="genomic DNA",
location_start=1,
location_end=123,
gene_synonyms=["chr.trna10-ArgCCG"],
)
)
| apache-2.0 | -7,829,339,666,094,141,000 | 37.543624 | 160 | 0.563991 | false |
grovesdixon/metaTranscriptomes | scripts/parse_codeml_pairwise_outputBACKUP.py | 1 | 6189 | #!/usr/bin/env python
##parse_codeml_pairwise_output.py
##written 6/26/14 by Groves Dixon
ProgramName = 'parse_codeml_pairwise_output.py'
LastUpdated = '6/26/14'
By = 'Groves Dixon'
VersionNumber = '1.0'
print "\nRunning Program {}...".format(ProgramName)
VersionString = '{} version {} Last Updated {} by {}'.format(ProgramName, VersionNumber, LastUpdated, By)
Description = '''
Description:
Parses a list of codeml output files that were generated using pair-wise
dN/dS estimation (runmode -2). Pairs are set up against one base species
(set as spp1) and all other species (a list file)
'''
AdditionalProgramInfo = '''
Additional Program Information:
'''
##Import Modules
import time
import argparse
from sys import argv
from sys import exit
import numpy as np
Start_time = time.time() ##keeps track of how long the script takes to run
##Set Up Argument Parsing
parser = argparse.ArgumentParser(description=Description, epilog=AdditionalProgramInfo) ##create argument parser that will automatically return help texts from global variables above
parser.add_argument('-f', required = True, dest = 'files', nargs="+", help = 'A glob to the codeml output files (probably *.codeml)')
parser.add_argument('-spp1', required = True, dest = 'spp1', help = 'The search tag for species 1')
parser.add_argument('-sppList', required = True, dest = 'sppList', help = 'The List of species to pair with species 1')
parser.add_argument('-o', required = True, dest = 'out', help = 'The desired output file name')
args = parser.parse_args()
#Assign Arguments
FileList = args.files
Spp1 = args.spp1
SppListName = args.sppList
OutfileName = args.out
SppList = []
with open(SppListName, 'r') as infile:
for line in infile:
SppList.append(line.strip("\n"))
def read_files(FileList, Spp1, SppList):
'''Function to reads through each file and parses out
dN and dS estimates for the specified species pair.
'''
print "\nLooking for data in {} codeml output files...".format(len(FileList))
geneList = []
dNList = []
dSList = []
speciesList = []
highDScount = 0
for species in SppList:
if species == Spp1:
continue
for file in FileList:
with open(file, 'r') as infile:
hit = 0
hitCount = 0 #this should never exceed 1
for line in infile:
if hitCount > 1:
exit("Found more than one instance of pairing in a file. Something is wrong.")
if hit == 0:
##look for your species pair
if "("+Spp1+")" in line:
if "("+species+")" in line:
if "..." in line:
hit = 1
continue
elif hit == 1:
if "dN/dS=" in line:
line = line.split()
try:
dn = line[10]
ds = line[13]
except IndexError: #occurs sometimes when dS is very large
#the dn value is also sometimes so high it must be split differently
#this probably means its a bad alignment/ortholog call, but pasrse it anyway
try:
dn = line[10]
ds = line[12]
#it's rare, but possible that N is double digits and S is not, so only "strip" the = from the front of ds if its there
if "=" in ds:
ds = ds.split('=')[1] #split the large ds value assuming that dS is >= 10.0 but dN is not
except IndexError:
dn = line[9].split('=')[1] #this means that the dN value was also >= 10.0, so grab it differently
ds = line[11].split('=')[1] #dS is also in a different place because of the big dN, so grab it
geneName = file.strip(".codeml")
geneList.append(geneName)
dNList.append(dn)
dSList.append(ds)
speciesList.append(species)
hit = 0
hitCount += 1
# print geneName
# print species
# print dn
return geneList, dNList, dSList, speciesList
def output(OutfileName, geneList, dNList, dSList, speciesList):
"""Outputs the data into a table"""
badValues = []
lineNums = []
with open(OutfileName, 'w') as out:
out.write("EST\tspecies\tdN\tdS")
for i in range(len(geneList)):
#########
##there is a bug that occurs when the synonymous substitution rate is >99.99
#these are obviously wrong anyway and they stop the output from uploading into R so skip them
fourData = 'TRUE'
outList = [geneList[i], speciesList[i], dNList[i], dSList[i]]
try:
float(dNList[i])
float(dSList[i])
except ValueError:
badValues.append([dNList[i], dSList[i]])
lineNums.append(i)
continue
for x in outList:
if x == "":
fourData = 'FALSE'
if fourData == 'FALSE':
continue
###########
outString = "\n{}\t{}\t{}\t{}".format(geneList[i], speciesList[i], dNList[i], dSList[i])
out.write("\n{}\t{}\t{}\t{}".format(geneList[i], speciesList[i], dNList[i], dSList[i]))
geneList, dNList, dSList, speciesList = read_files(FileList, Spp1, SppList)
output(OutfileName, geneList, dNList, dSList, speciesList)
#return time to run
Time = time.time() - Start_time
print('\nTime took to run: {}'.format(Time))
| mit | 5,886,263,383,535,908,000 | 40.536913 | 182 | 0.527549 | false |
Wintermute0110/plugin.program.advanced.emulator.launcher | tests/fakes.py | 1 | 8256 | from abc import ABCMeta, abstractmethod
from resources.objects import *
from resources.utils import *
from resources.scrap import *
class FakeRomSetRepository(ROMSetRepository):
def __init__(self, roms):
self.roms = roms
def find_by_launcher(self, launcher):
return self.roms
def save_rom_set(self, launcher, roms):
self.roms = roms
def delete_all_by_launcher(self, launcher):
self.roms = {}
class FakeExecutor(ExecutorABC):
def __init__(self):
self.actualApplication = None
self.actualArgs = None
super(FakeExecutor, self).__init__(None)
def getActualApplication(self):
return self.actualApplication
def getActualArguments(self):
return self.actualArgs
def execute(self, application, arguments, non_blocking):
self.actualApplication = application
self.actualArgs = arguments
pass
class FakeClass():
def FakeMethod(self, value, key, launcher):
self.value = value
class FakeFile(FileName):
def __init__(self, pathString):
self.fakeContent = ''
self.path_str = pathString
self.path_tr = pathString
self.exists = self.exists_fake
self.write = self.write_fake
def setFakeContent(self, content):
self.fakeContent = content
def getFakeContent(self):
return self.fakeContent
def loadFileToStr(self, encoding = 'utf-8'):
return self.fakeContent
def readAllUnicode(self, encoding='utf-8'):
contents = unicode(self.fakeContent)
return contents
def saveStrToFile(self, data_str, encoding = 'utf-8'):
self.fakeContent = data_str
def write_fake(self, bytes):
self.fakeContent = self.fakeContent + bytes
def open(self, mode):
pass
def close(self):
pass
def writeAll(self, bytes, flags='w'):
self.fakeContent = self.fakeContent + bytes
def pjoin(self, *args):
child = FakeFile(self.path_str)
child.setFakeContent(self.fakeContent)
for arg in args:
child.path_str = os.path.join(child.path_str, arg)
child.path_tr = os.path.join(child.path_tr, arg)
return child
def switchExtension(self, targetExt):
switched_fake = super(FakeFile, self).switchExtension(targetExt)
#switched_fake = FakeFile(switched_type.getPath())
switched_fake.setFakeContent(self.fakeContent)
return switched_fake
def exists_fake(self):
return True
def scanFilesInPathAsFileNameObjects(self, mask = '*.*'):
return []
#backwards compatiblity
def __create__(self, path):
return FakeFile(path)
class Fake_Paths:
def __init__(self, fake_base, fake_addon_id = 'ael-tests'):
# --- Base paths ---
self.ADDONS_DATA_DIR = FileName(fake_base, isdir = True)
self.ADDON_DATA_DIR = self.ADDONS_DATA_DIR.pjoin(fake_addon_id, isdir = True)
self.PROFILE_DIR = self.ADDONS_DATA_DIR.pjoin('profile', isdir = True)
self.HOME_DIR = self.ADDONS_DATA_DIR.pjoin('home', isdir = True)
self.ADDONS_DIR = self.HOME_DIR.pjoin('addons', isdir = True)
self.ADDON_CODE_DIR = self.ADDONS_DIR.pjoin(fake_addon_id, isdir = True)
self.ICON_FILE_PATH = self.ADDON_CODE_DIR.pjoin('media/icon.png')
self.FANART_FILE_PATH = self.ADDON_CODE_DIR.pjoin('media/fanart.jpg')
# --- Databases and reports ---
self.CATEGORIES_FILE_PATH = self.ADDON_DATA_DIR.pjoin('categories.xml')
self.FAV_JSON_FILE_PATH = self.ADDON_DATA_DIR.pjoin('favourites.json')
self.COLLECTIONS_FILE_PATH = self.ADDON_DATA_DIR.pjoin('collections.xml')
self.VCAT_TITLE_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_title.xml')
self.VCAT_YEARS_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_years.xml')
self.VCAT_GENRE_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_genre.xml')
self.VCAT_DEVELOPER_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_developers.xml')
self.VCAT_NPLAYERS_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_nplayers.xml')
self.VCAT_ESRB_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_esrb.xml')
self.VCAT_RATING_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_rating.xml')
self.VCAT_CATEGORY_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_category.xml')
# Launcher app stdout/stderr file
self.LAUNCH_LOG_FILE_PATH = self.ADDON_DATA_DIR.pjoin('launcher.log')
self.RECENT_PLAYED_FILE_PATH = self.ADDON_DATA_DIR.pjoin('history.json')
self.MOST_PLAYED_FILE_PATH = self.ADDON_DATA_DIR.pjoin('most_played.json')
self.BIOS_REPORT_FILE_PATH = self.ADDON_DATA_DIR.pjoin('report_BIOS.txt')
self.LAUNCHER_REPORT_FILE_PATH = self.ADDON_DATA_DIR.pjoin('report_Launchers.txt')
# --- Offline scraper databases ---
self.GAMEDB_INFO_DIR = self.ADDON_CODE_DIR.pjoin('GameDBInfo', isdir = True)
self.GAMEDB_JSON_BASE_NOEXT = 'GameDB_info'
self.LAUNCHBOX_INFO_DIR = self.ADDON_CODE_DIR.pjoin('LaunchBox', isdir = True)
self.LAUNCHBOX_JSON_BASE_NOEXT = 'LaunchBox_info'
# --- Artwork and NFO for Categories and Launchers ---
self.CATEGORIES_ASSET_DIR = self.ADDON_DATA_DIR.pjoin('asset-categories', isdir = True)
self.COLLECTIONS_ASSET_DIR = self.ADDON_DATA_DIR.pjoin('asset-collections', isdir = True)
self.LAUNCHERS_ASSET_DIR = self.ADDON_DATA_DIR.pjoin('asset-launchers', isdir = True)
self.FAVOURITES_ASSET_DIR = self.ADDON_DATA_DIR.pjoin('asset-favourites', isdir = True)
self.VIRTUAL_CAT_TITLE_DIR = self.ADDON_DATA_DIR.pjoin('db_title', isdir = True)
self.VIRTUAL_CAT_YEARS_DIR = self.ADDON_DATA_DIR.pjoin('db_year', isdir = True)
self.VIRTUAL_CAT_GENRE_DIR = self.ADDON_DATA_DIR.pjoin('db_genre', isdir = True)
self.VIRTUAL_CAT_DEVELOPER_DIR = self.ADDON_DATA_DIR.pjoin('db_developer', isdir = True)
self.VIRTUAL_CAT_NPLAYERS_DIR = self.ADDON_DATA_DIR.pjoin('db_nplayer', isdir = True)
self.VIRTUAL_CAT_ESRB_DIR = self.ADDON_DATA_DIR.pjoin('db_esrb', isdir = True)
self.VIRTUAL_CAT_RATING_DIR = self.ADDON_DATA_DIR.pjoin('db_rating', isdir = True)
self.VIRTUAL_CAT_CATEGORY_DIR = self.ADDON_DATA_DIR.pjoin('db_category', isdir = True)
self.ROMS_DIR = self.ADDON_DATA_DIR.pjoin('db_ROMs', isdir = True)
self.COLLECTIONS_DIR = self.ADDON_DATA_DIR.pjoin('db_Collections', isdir = True)
self.REPORTS_DIR = self.ADDON_DATA_DIR.pjoin('reports', isdir = True)
class FakeScraper(Scraper):
def __init__(self, settings, launcher, rom_data_to_apply = None):
self.rom_data_to_apply = rom_data_to_apply
scraper_settings = ScraperSettings(1,1,False,True)
super(FakeScraper, self).__init__(scraper_settings, launcher, True, [])
def getName(self):
return 'FakeScraper'
def supports_asset_type(self, asset_info):
return True
def _get_candidates(self, searchTerm, romPath, rom):
return ['fake']
def _load_metadata(self, candidate, romPath, rom):
gamedata = self._new_gamedata_dic()
if self.rom_data_to_apply :
gamedata['title'] = self.rom_data_to_apply['m_name'] if 'm_name' in self.rom_data_to_apply else ''
gamedata['year'] = self.rom_data_to_apply['m_year'] if 'm_year' in self.rom_data_to_apply else ''
gamedata['genre'] = self.rom_data_to_apply['m_genre'] if 'm_genre' in self.rom_data_to_apply else ''
gamedata['developer'] = self.rom_data_to_apply['m_developer']if 'm_developer' in self.rom_data_to_apply else ''
gamedata['plot'] = self.rom_data_to_apply['m_plot'] if 'm_plot' in self.rom_data_to_apply else ''
else:
gamedata['title'] = romPath.getBase_noext()
def _load_assets(self, candidate, romPath, rom):
pass
| gpl-2.0 | -8,512,910,359,609,642,000 | 42.005208 | 123 | 0.622941 | false |
coolbombom/CouchPotatoServer | couchpotato/core/downloaders/transmission/main.py | 1 | 10725 | from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import isInt
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
import httplib
import json
import os.path
import re
import traceback
import urllib2
log = CPLog(__name__)
class Transmission(Downloader):
type = ['torrent', 'torrent_magnet']
log = CPLog(__name__)
def download(self, data, movie, filedata = None):
log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('type')))
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
# Set parameters for Transmission
params = {
'paused': self.conf('paused', default = 0),
}
if len(self.conf('directory', default = '')) > 0:
folder_name = self.createFileName(data, filedata, movie)[:-len(data.get('type')) - 1]
params['download-dir'] = os.path.join(self.conf('directory', default = ''), folder_name).rstrip(os.path.sep)
torrent_params = {}
if self.conf('ratio'):
torrent_params = {
'seedRatioLimit': self.conf('ratio'),
'seedRatioMode': self.conf('ratiomode')
}
if not filedata and data.get('type') == 'torrent':
log.error('Failed sending torrent, no data')
return False
# Send request to Transmission
try:
trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
if data.get('type') == 'torrent_magnet':
remote_torrent = trpc.add_torrent_uri(data.get('url'), arguments = params)
torrent_params['trackerAdd'] = self.torrent_trackers
else:
remote_torrent = trpc.add_torrent_file(b64encode(filedata), arguments = params)
if not remote_torrent:
return False
# Change settings of added torrents
elif torrent_params:
trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(remote_torrent['torrent-added']['hashString'])
except:
log.error('Failed to change settings for transfer: %s', traceback.format_exc())
return False
def getAllDownloadStatus(self):
log.debug('Checking Transmission download status.')
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
# Go through Queue
try:
trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
return_params = {
'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isFinished', 'downloadDir', 'uploadRatio']
}
queue = trpc.get_alltorrents(return_params)
except Exception, err:
log.error('Failed getting queue: %s', err)
return False
if not queue:
return []
statuses = StatusList(self)
# Get torrents status
# CouchPotato Status
#status = 'busy'
#status = 'failed'
#status = 'completed'
# Transmission Status
#status = 0 => "Torrent is stopped"
#status = 1 => "Queued to check files"
#status = 2 => "Checking files"
#status = 3 => "Queued to download"
#status = 4 => "Downloading"
#status = 4 => "Queued to seed"
#status = 6 => "Seeding"
#To do :
# add checking file
# manage no peer in a range time => fail
for item in queue['torrents']:
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / eta=%s / uploadRatio=%s / confRatio=%s / isFinished=%s', (item['name'], item['id'], item['downloadDir'], item['hashString'], item['percentDone'], item['status'], item['eta'], item['uploadRatio'], self.conf('ratio'), item['isFinished']))
if not os.path.isdir(Env.setting('from', 'renamer')):
log.error('Renamer "from" folder doesn\'t to exist.')
return
if (item['percentDone'] * 100) >= 100 and (item['status'] == 6 or item['status'] == 0) and item['uploadRatio'] > self.conf('ratio'):
try:
trpc.stop_torrent(item['hashString'], {})
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'completed',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': os.path.join(item['downloadDir'], item['name']),
})
if ((not os.path.isdir(item['downloadDir']))) and (self.conf('from') in item['downloadDir'])):
trpc.remove_torrent(item['id'], "true", {})
except Exception, err:
log.error('Failed to stop and remove torrent "%s" with error: %s', (item['name'], err))
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'failed',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
})
else:
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'busy',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = item['eta'])), # Is ETA in seconds??
})
return statuses
class TransmissionRPC(object):
"""TransmissionRPC lite library"""
def __init__(self, host = 'localhost', port = 9091, username = None, password = None):
super(TransmissionRPC, self).__init__()
self.url = 'http://' + host + ':' + str(port) + '/transmission/rpc'
self.tag = 0
self.session_id = 0
self.session = {}
if username and password:
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager), urllib2.HTTPDigestAuthHandler(password_manager))
opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')]
urllib2.install_opener(opener)
elif username or password:
log.debug('User or password missing, not using authentication.')
self.session = self.get_session()
def _request(self, ojson):
self.tag += 1
headers = {'x-transmission-session-id': str(self.session_id)}
request = urllib2.Request(self.url, json.dumps(ojson).encode('utf-8'), headers)
try:
open_request = urllib2.urlopen(request)
response = json.loads(open_request.read())
log.debug('request: %s', json.dumps(ojson))
log.debug('response: %s', json.dumps(response))
if response['result'] == 'success':
log.debug('Transmission action successfull')
return response['arguments']
else:
log.debug('Unknown failure sending command to Transmission. Return text is: %s', response['result'])
return False
except httplib.InvalidURL, err:
log.error('Invalid Transmission host, check your config %s', err)
return False
except urllib2.HTTPError, err:
if err.code == 401:
log.error('Invalid Transmission Username or Password, check your config')
return False
elif err.code == 409:
msg = str(err.read())
try:
self.session_id = \
re.search('X-Transmission-Session-Id:\s*(\w+)', msg).group(1)
log.debug('X-Transmission-Session-Id: %s', self.session_id)
# #resend request with the updated header
return self._request(ojson)
except:
log.error('Unable to get Transmission Session-Id %s', err)
else:
log.error('TransmissionRPC HTTPError: %s', err)
except urllib2.URLError, err:
log.error('Unable to connect to Transmission %s', err)
def get_session(self):
post_data = {'method': 'session-get', 'tag': self.tag}
return self._request(post_data)
def add_torrent_uri(self, torrent, arguments):
arguments['filename'] = torrent
post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag}
return self._request(post_data)
def add_torrent_file(self, torrent, arguments):
arguments['metainfo'] = torrent
post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag}
return self._request(post_data)
def set_torrent(self, torrent_id, arguments):
arguments['ids'] = torrent_id
post_data = {'arguments': arguments, 'method': 'torrent-set', 'tag': self.tag}
return self._request(post_data)
def get_alltorrents(self, arguments):
post_data = {'arguments': arguments, 'method': 'torrent-get', 'tag': self.tag}
return self._request(post_data)
def stop_torrent(self, torrent_id, arguments):
arguments['ids'] = torrent_id
post_data = {'arguments': arguments, 'method': 'torrent-stop', 'tag': self.tag}
return self._request(post_data)
def remove_torrent(self, torrent_id, remove_local_data, arguments):
arguments['ids'] = torrent_id
arguments['delete-local-data'] = remove_local_data
post_data = {'arguments': arguments, 'method': 'torrent-remove', 'tag': self.tag}
return self._request(post_data)
| gpl-3.0 | 1,714,337,725,172,411,100 | 41.9 | 339 | 0.554499 | false |
astroswego/magellanic-structure | src/magstruct/transformations.py | 1 | 1722 | import numpy
from numpy import array, sin, cos
__all__ = [
'Equatorial2Cartesian',
'Rotation3D',
'rotation_matrix_3d'
]
class Equatorial2Cartesian():
def __init__(self, RA_0, Dec_0, D_0):
self.RA_0 = RA_0
self.Dec_0 = Dec_0
self.D_0 = D_0
def fit(self, X, y=None):
return self
def transform(self, X, y=None, **params):
X_new = numpy.empty_like(X)
x, y, z = X_new[:,0], X_new[:,1], X_new[:,2]
RA, Dec, D = X[:,0], X[:,1], X[:,2]
delta_RA = RA - self.RA_0
x[:] = -D * sin(delta_RA) * cos(Dec)
y[:] = D * (sin(Dec) * cos(self.Dec_0) +
sin(self.Dec_0) * cos(delta_RA) * cos(Dec))
z[:] = self.D_0 \
- D * (sin(Dec)*sin(self.Dec_0) + cos(RA)*cos(self.Dec_0)) \
- self.RA_0*cos(Dec)
return X_new
def rotation_matrix_3d(angle, axis):
assert axis in range(3), 'Axis must be 0, 1, or 2'
T = numpy.empty((3, 3), dtype=float)
# find the index of the -sin(angle) term
# this formula is the polynomial which passes through all of the pairs
# (axis, index)
i = axis**2 - 4*axis + 5
T.flat[::3+1] = cos(angle)
T.flat[i::3-1] = sin(angle)
# negate the -sin(angle) term, as it is currently just sin(angle)
T.flat[i] *= -1
T[axis,:] = 0
T[:,axis] = 0
T[axis,axis] = 1
return T
class Rotation3D():
def __init__(self, angle, axis):
self.axis = axis
self.angle = angle
self.rotation_matrix = rotation_matrix_3d(angle, axis)
def fit(self, X, y=None):
return self
def transform(self, X, y=None, **params):
return self.rotation_matrix.dot(X)
| mit | -4,956,154,805,142,992,000 | 24.701493 | 74 | 0.522648 | false |
lindemann09/pyForceDAQ | forceDAQ/data_handling/read_force_data.py | 1 | 2147 | """
Functions to read your force and event data
"""
__author__ = 'Oliver Lindemann'
import os
import sys
import gzip
from collections import OrderedDict
import numpy as np
TAG_COMMENTS = "#"
TAG_UDPDATA = TAG_COMMENTS + "UDP"
TAG_DAQEVENTS = TAG_COMMENTS + "T"
def _csv(line):
return list(map(lambda x: x.strip(), line.split(",")))
def DataFrameDict(data, varnames):
"""data frame: Dict of numpy arrays
does not require Pandas, but can be easily converted to pandas dataframe
via pandas.DataFrame(data_frame_dict)
"""
rtn = OrderedDict()
for v in varnames:
rtn[v] = []
for row in data:
for v, d in zip(varnames, row):
rtn[v].append(d)
return rtn
def data_frame_to_text(data_frame):
rtn = ",".join(data_frame.keys())
rtn += "\n"
for x in np.array(list(data_frame.values())).T:
rtn += ",".join(x) + "\n"
return rtn
def read_raw_data(path):
"""reading trigger and udp data
Returns: data, udp_event, daq_events and comments
data, udp_event, daq_events: DataFrameDict
comments: text string
"""
daq_events = []
udp_events = []
comments = ""
data = []
varnames = None
app_dir = os.path.split(sys.argv[0])[0]
path = os.path.abspath(os.path.join(app_dir, path))
if path.endswith("gz"):
fl = gzip.open(path, "rt")
else:
fl = open(path, "rt")
for ln in fl:
if ln.startswith(TAG_COMMENTS):
comments += ln
if ln.startswith(TAG_UDPDATA + ","):
udp_events.append(_csv(ln[len(TAG_UDPDATA) + 1:]))
elif ln.startswith(TAG_DAQEVENTS):
daq_events.append(_csv(ln[len(TAG_DAQEVENTS) + 1:]))
else:
# data
if varnames is None:
# first row contains varnames
varnames = _csv(ln)
else:
data.append(_csv(ln))
fl.close()
return (DataFrameDict(data, varnames),
DataFrameDict(udp_events, ["time", "value"]),
DataFrameDict(daq_events, ["time", "value"]),
comments)
| mit | -4,905,626,410,306,586,000 | 23.123596 | 76 | 0.56218 | false |
cemarchi/biosphere | Src/BioAnalyzer/Analysis/GenePrioritization/Steps/DataIntegration/IntermediateRepresentation/Transformers/MicroRnaToGeneTransformer.py | 1 | 4546 | import math
import statistics
from itertools import groupby
from random import randint
from typing import Dict, Tuple, Counter
import pandas as pd
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Generators import \
IntermediateRepresentationGeneratorBase
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Transformers.SampleTransformerBase import \
SampleTransformerBase
from Src.BioDataManagement.CrossCutting.DTOs.ExpressionLevelStatusDto import ExpressionLevelStatusDto
class MicroRnaToGeneTransformer(SampleTransformerBase):
"""
"""
def __init__(self,
intermediateRepresentationGenerator: IntermediateRepresentationGeneratorBase,
get_global_diff_values_action,
get_mirna_gene_target_action):
super().__init__(intermediateRepresentationGenerator)
self.__get_mirna_gene_target_action = get_mirna_gene_target_action
self.__get_global_diff_values_action = get_global_diff_values_action
def transform(self, from_sample_matrix: pd.DataFrame, is_highly_significant: bool) -> Tuple[pd.DataFrame, Dict[int, ExpressionLevelStatusDto]]:
mirna_gene_targets = {mirna.lower(): g for mirna, g in
self.__get_mirna_gene_targets(from_sample_matrix.columns.tolist()).items()}
mirna_samples = self.__get_mirna_samples(from_sample_matrix, mirna_gene_targets)
id_entrez_list = list(set([id_entrez for mirna_symbol, id_entrez_list in mirna_gene_targets.items()
for id_entrez in id_entrez_list]))
measure_matrix = dict([(g, []) for g in id_entrez_list])
key_func = lambda gene: gene[0]
for patient_id, exp_values in mirna_samples.items():
gene_values = [(id_entrez,
exp_value) for mirna_symbol, exp_value in exp_values.items()
for id_entrez in mirna_gene_targets[mirna_symbol]]
gene_values = sorted(gene_values, key=key_func)
for id_entrez, measures in groupby(gene_values, key_func):
measures = [measure for id_entrez, measure in list(measures) if not math.isnan(measure)]
measure_matrix[id_entrez].append(float('NaN') if not measures else statistics.mean(measures))
gene_matrix = pd.DataFrame.from_dict(measure_matrix).dropna(axis=1,how='all')
gene_matrix = self.intermediateRepresentationGenerator.generate(gene_matrix).dropna(axis=1,how='all')
return gene_matrix, \
self.__get_gene_status(mirna_gene_targets, gene_matrix.columns.tolist(), is_highly_significant)
def __get_mirna_gene_targets(self, mirnas):
gene_targets = {}
fe_target = self.__get_mirna_gene_target_action(mirnas)
gene_targets.update(dict([(t.microrna_symbol, list(set(gene_targets[t.microrna_symbol] + t.id_entrez_genes)))
if t.microrna_symbol in gene_targets
else (t.microrna_symbol, t.id_entrez_genes) for t in fe_target.result_list]))
return gene_targets
def __get_mirna_samples(self, from_sample_matrix, mirna_gene_targets):
from_sample_matrix = from_sample_matrix[list(mirna_gene_targets.keys()) + ['patient_id']]
from_sample_matrix.set_index("patient_id", drop=True, inplace=True)
return from_sample_matrix.to_dict(orient="index")
def __get_gene_status(self, mirna_gene_targets, genes, is_highly_significant):
diff_mirna = [diff for diff in self.__get_global_diff_values_action(is_highly_significant).result.values
if diff.element_id in mirna_gene_targets]
genes_status = [(g, diff.status) for diff in diff_mirna
for g in mirna_gene_targets[diff.element_id] if g in genes]
key_func = lambda gene: gene[0]
genes_status = sorted(genes_status, key=key_func)
genes_status_dict = {}
for id_entrez, status in groupby(genes_status, key_func):
status = list(status)
status_counter = Counter(status)
status = [k for k, v in status_counter.most_common()]
len_status = len(status) - 1
genes_status_dict[id_entrez] = status[0] if len_status == 1 else status[randint(0, len_status)]
return dict([(entrez_id, status[1]) for entrez_id, status in genes_status_dict.items()]) | bsd-3-clause | 261,306,271,858,045,380 | 49.522222 | 147 | 0.653322 | false |
aplanas/kmanga | kmanga/core/models.py | 1 | 21424 | import os.path
from django.conf import settings
from django.db import connection
from django.db import models
from django.db.models import Count
from django.db.models import F
from django.db.models import Q
from django.urls import reverse
from django.utils import timezone
class TimeStampedModel(models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Source(TimeStampedModel):
name = models.CharField(max_length=200)
spider = models.CharField(max_length=80)
url = models.URLField(unique=True)
has_footer = models.BooleanField(default=False)
enabled = models.BooleanField(default=True)
def __str__(self):
return self.name
class SourceLanguage(TimeStampedModel):
GERMAN = 'DE'
ENGLISH = 'EN'
SPANISH = 'ES'
FRENCH = 'FR'
ITALIAN = 'IT'
RUSSIAN = 'RU'
PORTUGUESE = 'PT'
LANGUAGE_CHOICES = (
(ENGLISH, 'English'),
(SPANISH, 'Spanish'),
(GERMAN, 'German'),
(FRENCH, 'French'),
(ITALIAN, 'Italian'),
(RUSSIAN, 'Russian'),
(PORTUGUESE, 'Portuguese'),
)
language = models.CharField(max_length=2, choices=LANGUAGE_CHOICES)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
def __str__(self):
return '%s (%s)' % (self.get_language_display(), self.language)
class ConsolidateGenre(TimeStampedModel):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Genre(TimeStampedModel):
name = models.CharField(max_length=200)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
# consolidategenre = models.ForeignKey(ConsolidateGenre,
# on_delete=models.CASCADE)
def __str__(self):
return self.name
class AdvRawQuerySet(models.query.RawQuerySet):
"""RawQuerySet subclass with advanced options."""
def __init__(self, raw_query, paged_query, count_query,
model=None, query=None, params=None,
translations=None, using=None, hints=None):
super(AdvRawQuerySet, self).__init__(raw_query, model=model,
query=query,
params=params,
translations=translations,
using=using, hints=hints)
self.raw_query = raw_query
self.paged_query = paged_query
self.count_query = count_query
def __getitem__(self, key):
if isinstance(key, slice):
start, stop = key.start, key.stop
else:
start, stop = key, key + 1
if self.params:
params = self.params + [stop-start, start]
else:
params = (stop-start, start)
return models.query.RawQuerySet(self.paged_query,
model=self.model,
params=params,
translations=self.translations,
using=self._db,
hints=self._hints)
def __len__(self):
cursor = connection.cursor()
cursor.execute(self.count_query, self.params)
return cursor.fetchone()[0]
class MangaQuerySet(models.QuerySet):
def latests(self):
"""Return the lastest mangas with new/updated issues."""
# The correct annotation expression is the next one, but due
# to an error in Django ORM, this empression uses a full GROUP
# BY with the data fields. This produce a slow query.
#
# return self.annotate(
# models.Max('issue__modified')
# ).order_by('-issue__modified__max')
#
# Alternative (without deferreds)
#
# extra_query = '''
# SELECT MAX(core_issue.modified)
# FROM core_issue
# WHERE core_issue.manga_id = core_manga.id
# '''
# Manga.objects.extra({
# 'issue__modified__max': extra_query
# }).order_by('-issue__modified__max')
raw_query = '''
SELECT core_manga.id,
MAX(core_issue.modified) AS issue__modified__max
FROM core_manga
LEFT OUTER JOIN core_issue
ON (core_manga.id = core_issue.manga_id)
GROUP BY core_manga.id
ORDER BY issue__modified__max DESC NULLS LAST,
core_manga.name ASC,
core_manga.url ASC;
'''
paged_query = '''
SELECT core_manga.id,
MAX(core_issue.modified) AS issue__modified__max
FROM core_manga
LEFT OUTER JOIN core_issue
ON (core_manga.id = core_issue.manga_id)
GROUP BY core_manga.id
ORDER BY issue__modified__max DESC NULLS LAST,
core_manga.name ASC,
core_manga.url ASC
LIMIT %s
OFFSET %s;
'''
count_query = '''
SELECT COUNT(*)
FROM core_manga;
'''
return AdvRawQuerySet(raw_query=raw_query,
paged_query=paged_query,
count_query=count_query,
model=self.model,
using=self.db)
def _to_tsquery(self, q):
"""Convert a query to a PostgreSQL tsquery."""
# Remove special chars (except parens)
q = ''.join(c if c.isalnum() or c in '()' else ' ' for c in q)
# Separate parentesis from words
for token in ('(', ')'):
q = q.replace(token, ' %s ' % token)
# Parse the query
op = {
'and': '&',
'or': '|',
'not': '-',
'(': '(',
')': ')',
}
# Join operators
j = '&|'
# Operators that expect and join before
ops_j = '-('
tsquery = []
for token in q.split():
if token in op:
if tsquery and op[token] in ops_j and tsquery[-1] not in j:
tsquery.append(op['and'])
tsquery.append(op[token])
else:
if tsquery and tsquery[-1] not in (j + ops_j):
tsquery.append(op['and'])
tsquery.append('%s:*' % token)
# Add spaces between join operators
tsquery = [(t if t not in j else ' %s ' % t) for t in tsquery]
return ''.join(tsquery)
def is_valid(self, q):
"""Check is the query is a valid query."""
q = self._to_tsquery(q)
# Separate parentesis from words
for token in ('(', ')'):
q = q.replace(token, ' %s ' % token)
s = []
for token in q.split():
if token == '(':
s.append(token)
elif token == ')':
try:
t = s.pop()
except IndexError:
return False
if t != '(':
return False
return not len(s)
def search(self, q):
q = self._to_tsquery(q)
raw_query = '''
SELECT core_manga.*
FROM (
SELECT id
FROM core_manga_fts_view,
to_tsquery(%s) AS q
WHERE document @@ q
ORDER BY ts_rank(document, q) DESC,
name ASC,
url ASC
) AS ids
INNER JOIN core_manga ON core_manga.id = ids.id;
'''
paged_query = '''
SELECT core_manga.*
FROM (
SELECT id
FROM core_manga_fts_view,
to_tsquery(%s) AS q
WHERE document @@ q
ORDER BY ts_rank(document, q) DESC,
name ASC,
url ASC
LIMIT %s
OFFSET %s
) AS ids
INNER JOIN core_manga ON core_manga.id = ids.id;
'''
count_query = '''
SELECT COUNT(*)
FROM core_manga_fts_view
WHERE document @@ to_tsquery(%s);
'''
return AdvRawQuerySet(raw_query=raw_query,
paged_query=paged_query,
count_query=count_query,
model=self.model,
params=[q],
using=self.db)
def refresh(self):
cursor = connection.cursor()
cursor.execute('REFRESH MATERIALIZED VIEW core_manga_fts_view;')
def _cover_path(instance, filename):
return os.path.join(instance.source.spider, filename)
class Manga(TimeStampedModel):
LEFT_TO_RIGHT = 'LR'
RIGHT_TO_LEFT = 'RL'
READING_DIRECTION = (
(LEFT_TO_RIGHT, 'Left-to-right'),
(RIGHT_TO_LEFT, 'Right-to-left'),
)
ONGOING = 'O'
COMPLETED = 'C'
STATUS = (
(ONGOING, 'Ongoing'),
(COMPLETED, 'Completed'),
)
ASC = 'ASC'
DESC = 'DESC'
RANK_ORDER = (
(ASC, 'Ascending'),
(DESC, 'Descending'),
)
name = models.CharField(max_length=200, db_index=True)
# slug = models.SlugField(max_length=200)
# release = models.DateField()
author = models.CharField(max_length=200)
artist = models.CharField(max_length=200)
reading_direction = models.CharField(max_length=2,
choices=READING_DIRECTION,
default=RIGHT_TO_LEFT)
status = models.CharField(max_length=1,
choices=STATUS,
default=ONGOING)
genres = models.ManyToManyField(Genre)
rank = models.FloatField(null=True, blank=True)
rank_order = models.CharField(max_length=4,
choices=RANK_ORDER,
default=ASC)
description = models.TextField()
cover = models.ImageField(upload_to=_cover_path)
url = models.URLField(unique=True, db_index=True)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
objects = MangaQuerySet.as_manager()
def __str__(self):
return self.name
def subscribe(self, user, language=None, issues_per_day=4, paused=False):
"""Subscribe an User to the current manga."""
language = language if language else user.userprofile.language
obj, created = Subscription.all_objects.update_or_create(
manga=self,
user=user,
defaults={
'language': language,
'issues_per_day': issues_per_day,
'paused': paused,
'deleted': False,
})
return obj
def is_subscribed(self, user):
"""Check if an user is subscribed to this manga."""
return self.subscription(user).exists()
def subscription(self, user):
"""Return the users' subscription of this manga."""
return self.subscription_set.filter(user=user)
def languages(self):
"""Return the number of issues per language."""
return self.issue_set\
.values('language')\
.order_by('language')\
.annotate(Count('language'))
class AltName(TimeStampedModel):
name = models.CharField(max_length=200)
manga = models.ForeignKey(Manga, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Issue(TimeStampedModel):
name = models.CharField(max_length=200)
number = models.CharField(max_length=10)
order = models.IntegerField()
language = models.CharField(max_length=2,
choices=SourceLanguage.LANGUAGE_CHOICES)
release = models.DateField()
url = models.URLField(unique=True, max_length=255)
manga = models.ForeignKey(Manga, on_delete=models.CASCADE)
class Meta:
ordering = ('order', 'name')
def __str__(self):
return self.name
def is_sent(self, user):
"""Check if an user has received this issue."""
return self.result(user, status=Result.SENT).exists()
def create_result_if_needed(self, user, status, set_send_date=True):
"""Create `Result` if is new with a status."""
defaults = {'status': status}
if set_send_date:
defaults['send_date'] = timezone.now()
subscription = Subscription.objects.get(
manga=self.manga, user=user)
result, _ = Result.objects.update_or_create(
issue=self,
subscription=subscription,
defaults=defaults)
return result
def result(self, user, status=None):
"""Return the Result for an user for this issue."""
# XXX TODO - Avoid filtering by subscription__deleted using
# the Subscription manager.
query = self.result_set.filter(
subscription__user=user,
subscription__deleted=False)
if status:
query = query.filter(status=status)
return query
def retry_if_failed(self, user):
"""Increment the retry field of `Result` if status is FAIL."""
self.result(user, status=Result.FAILED).update(retry=F('retry') + 1)
class SubscriptionQuerySet(models.QuerySet):
def latests(self, user):
"""Return the latests subscriptions with changes in Result."""
# See the notes from `MangaQuerySet.latests()`
raw_query = '''
SELECT core_subscription.id,
MAX(core_result.modified) AS result__modified__max
FROM core_subscription
LEFT OUTER JOIN core_result
ON (core_subscription.id = core_result.subscription_id)
WHERE core_subscription.deleted = false
AND core_subscription.user_id = %s
GROUP BY core_subscription.id
ORDER BY result__modified__max DESC NULLS LAST,
core_subscription.id ASC;
'''
paged_query = '''
SELECT core_subscription.id,
MAX(core_result.modified) AS result__modified__max
FROM core_subscription
LEFT OUTER JOIN core_result
ON (core_subscription.id = core_result.subscription_id)
WHERE core_subscription.deleted = false
AND core_subscription.user_id = %s
GROUP BY core_subscription.id
ORDER BY result__modified__max DESC NULLS LAST,
core_subscription.id ASC
LIMIT %s
OFFSET %s;
'''
count_query = '''
SELECT COUNT(*)
FROM core_subscription
WHERE core_subscription.deleted = false
AND core_subscription.user_id = %s;
'''
return AdvRawQuerySet(raw_query=raw_query,
paged_query=paged_query,
count_query=count_query,
model=self.model,
params=[user.id],
using=self.db)
class SubscriptionManager(models.Manager):
def get_queryset(self):
"""Exclude deleted subscriptions."""
return super(SubscriptionManager,
self).get_queryset().exclude(deleted=True)
class SubscriptionActiveManager(models.Manager):
def get_queryset(self):
"""Exclude paused and deleted subscriptions."""
return super(SubscriptionActiveManager,
self).get_queryset().exclude(
Q(paused=True) | Q(deleted=True))
class Subscription(TimeStampedModel):
# Number of retries before giving up in a FAILED result
RETRY = 3
manga = models.ForeignKey(Manga, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
language = models.CharField(max_length=2,
choices=SourceLanguage.LANGUAGE_CHOICES)
issues_per_day = models.IntegerField(default=4)
paused = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
objects = SubscriptionManager.from_queryset(SubscriptionQuerySet)()
actives = SubscriptionActiveManager.from_queryset(SubscriptionQuerySet)()
all_objects = models.Manager()
class Meta:
unique_together = ('manga', 'user')
def __str__(self):
return '%s (%d per day)' % (self.manga, self.issues_per_day)
def issues(self):
"""Return the list of issues in the language of the Subscription."""
return self.manga.issue_set.filter(language=self.language)
def issues_to_send(self, retry=None):
"""Return the list of issues to send, ordered by number."""
if not retry:
retry = Subscription.RETRY
already_sent = Result.objects.processed_last_24hs(self.user,
subscription=self)
remains = max(0, self.issues_per_day-already_sent)
return self.manga.issue_set.filter(
language=self.language
).exclude(
pk__in=self.result_set.filter(
Q(status__in=(Result.PROCESSING, Result.SENT)) |
(Q(status=Result.FAILED) & Q(retry__gt=retry))
).values('issue__id')
).order_by('order')[:remains]
def issues_to_retry(self, retry=None):
"""Return the list of issues to retry, ordered by number."""
# This method doesn't take care about the limits of the user
if not retry:
retry = Subscription.RETRY
return self.manga.issue_set.filter(
language=self.language,
result__subscription=self,
result__status=Result.FAILED,
result__retry__lte=retry
).order_by('order')
def add_sent(self, issue):
"""Add or update a Result to a Subscription."""
# XXX TODO - add_sent is deprecated, use
# Issue.create_result_if_needed, or extend the features inside
# Subscription.
return Result.objects.update_or_create(
issue=issue,
subscription=self,
defaults={
'status': Result.SENT,
'send_date': timezone.now(),
})
def latest_issues(self):
"""Return the list of issues ordered by modified result."""
return self.issues().filter(
result__subscription=self
).annotate(
models.Max('result__modified')
).order_by('-result__modified')
class ResultQuerySet(models.QuerySet):
TIME_DELTA = 2
def latests(self, status=None):
query = self
if status:
query = query.filter(status=status)
return query.order_by('-modified')
def _processed_last_24hs(self, user, subscription=None):
"""Return the list of `Result` processed during the last 24 hours."""
today = timezone.now()
yesterday = today - timezone.timedelta(days=1)
# XXX TODO - Objects are created / modified always after time
# T. If the send process is slow, the error margin can be
# bigger than the one used here.
yesterday += timezone.timedelta(hours=ResultQuerySet.TIME_DELTA)
query = self.filter(
subscription__user=user,
send_date__range=[yesterday, today],
)
if subscription:
query = query.filter(subscription=subscription)
return query
def processed_last_24hs(self, user, subscription=None):
"""Return the number of `Result` processed during the last 24 hours."""
return self._processed_last_24hs(user, subscription).count()
def pending(self):
return self.latests(status=Result.PENDING)
def processing(self):
return self.latests(status=Result.PROCESSING)
def sent(self):
return self.latests(status=Result.SENT)
def failed(self):
return self.latests(status=Result.FAILED)
class Result(TimeStampedModel):
PENDING = 'PE'
PROCESSING = 'PR'
SENT = 'SE'
FAILED = 'FA'
STATUS_CHOICES = (
(PENDING, 'Pending'),
(PROCESSING, 'Processing'),
(SENT, 'Sent'),
(FAILED, 'Failed'),
)
issue = models.ForeignKey(Issue, on_delete=models.CASCADE)
subscription = models.ForeignKey(Subscription, on_delete=models.CASCADE)
status = models.CharField(max_length=2, choices=STATUS_CHOICES,
default=PENDING)
missing_pages = models.IntegerField(default=0)
send_date = models.DateTimeField(null=True, blank=True)
retry = models.IntegerField(default=0)
objects = ResultQuerySet.as_manager()
class Meta:
unique_together = ('issue', 'subscription')
def __str__(self):
return '%s (%s)' % (self.issue, self.get_status_display())
def get_absolute_url(self):
return reverse('result-detail', kwargs={'pk': self.pk})
def set_status(self, status):
self.status = status
# If the result is marked as FAILED, unset the `send_date`.
# In this way, if the result is moved to PENDING is not
# counted as SENT. Also if is not moved, the user can have
# one more issue for this day.
if status == Result.FAILED:
self.send_date = None
self.save()
def is_pending(self):
return self.status == Result.PENDING
def is_processing(self):
return self.status == Result.PROCESSING
def is_sent(self):
return self.status == Result.SENT
def is_failed(self):
return self.status == Result.FAILED
| gpl-3.0 | -7,570,314,442,123,775,000 | 32.632653 | 80 | 0.56334 | false |
blueboxgroup/neutron | neutron/plugins/sriovnicagent/sriov_nic_agent.py | 1 | 14818 | # Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo.config import cfg
from oslo import messaging
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants as q_constants
from neutron.common import topics
from neutron.common import utils as q_utils
from neutron import context
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.sriovnicagent.common import config # noqa
from neutron.plugins.sriovnicagent.common import exceptions as exc
from neutron.plugins.sriovnicagent import eswitch_manager as esm
LOG = logging.getLogger(__name__)
class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
target = messaging.Target(version='1.1')
def __init__(self, context, agent, sg_agent):
super(SriovNicSwitchRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def port_update(self, context, **kwargs):
LOG.debug("port_update received")
port = kwargs.get('port')
# Put the port mac address in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
self.agent.updated_devices.add(port['mac_address'])
LOG.debug("port_update RPC received for port: %s", port['id'])
class SriovNicSwitchAgent(object):
def __init__(self, physical_devices_mappings, exclude_devices,
polling_interval, root_helper):
self.polling_interval = polling_interval
self.root_helper = root_helper
self.setup_eswitch_mgr(physical_devices_mappings,
exclude_devices)
configurations = {'device_mappings': physical_devices_mappings}
self.agent_state = {
'binary': 'neutron-sriov-nic-agent',
'host': cfg.CONF.host,
'topic': q_constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': q_constants.AGENT_TYPE_NIC_SWITCH,
'start_flag': True}
# Stores port update notifications for processing in the main loop
self.updated_devices = set()
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc, self.root_helper)
self._setup_rpc()
# Initialize iteration counter
self.iter_num = 0
def _setup_rpc(self):
self.agent_id = 'nic-switch-agent.%s' % socket.gethostname()
LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
# Handle updates from service
self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self,
self.sg_agent)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
devices = len(self.eswitch_mgr.get_assigned_devices())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def setup_eswitch_mgr(self, device_mappings, exclude_devices={}):
self.eswitch_mgr = esm.ESwitchManager(device_mappings,
exclude_devices,
self.root_helper)
def scan_devices(self, registered_devices, updated_devices):
curr_devices = self.eswitch_mgr.get_assigned_devices()
device_info = {}
device_info['current'] = curr_devices
device_info['added'] = curr_devices - registered_devices
# we don't want to process updates for devices that don't exist
device_info['updated'] = updated_devices & curr_devices
# we need to clean up after devices are removed
device_info['removed'] = registered_devices - curr_devices
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.sg_agent.prepare_devices_filter(device_info.get('added'))
if device_info.get('updated'):
self.sg_agent.refresh_firewall()
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_device(self, device, pci_slot, admin_state_up):
if self.eswitch_mgr.device_exists(device, pci_slot):
try:
self.eswitch_mgr.set_device_state(device, pci_slot,
admin_state_up)
except exc.SriovNicError:
LOG.exception(_LE("Failed to set device %s state"), device)
return
if admin_state_up:
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
LOG.info(_LI("No device with MAC %s defined on agent."), device)
def treat_devices_added_updated(self, devices):
try:
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context, devices, self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for devices "
"with MAC address %(devices)s: %(e)s",
{'devices': devices, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.debug("Port with MAC address %s is added", device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': device_details})
profile = device_details['profile']
self.treat_device(device_details['device'],
profile.get('pci_slot'),
device_details['admin_state_up'])
else:
LOG.info(_LI("Device with MAC %s not defined on plugin"),
device)
return False
def treat_devices_removed(self, devices):
resync = False
for device in devices:
LOG.info(_LI("Removing device with mac_address %s"), device)
try:
dev_details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("Removing port failed for device %(device)s "
"due to %(exc)s", {'device': device, 'exc': e})
resync = True
continue
if dev_details['exists']:
LOG.info(_LI("Port %s updated."), device)
else:
LOG.debug("Device %s not defined on plugin", device)
return resync
def daemon_loop(self):
sync = True
devices = set()
LOG.info(_LI("SRIOV NIC Agent RPC Daemon Started!"))
while True:
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
devices.clear()
sync = False
device_info = {}
# Save updated devices dict to perform rollback in case
# resync would be needed, and then clear self.updated_devices.
# As the greenthread should not yield between these
# two statements, this will should be thread-safe.
updated_devices_copy = self.updated_devices
self.updated_devices = set()
try:
device_info = self.scan_devices(devices, updated_devices_copy)
if self._device_info_has_changes(device_info):
LOG.debug("Agent loop found changes! %s", device_info)
# If treat devices fails - indicates must resync with
# plugin
sync = self.process_network_devices(device_info)
devices = device_info['current']
except Exception:
LOG.exception(_LE("Error in agent loop. Devices info: %s"),
device_info)
sync = True
# Restore devices that were removed from this set earlier
# without overwriting ones that may have arrived since.
self.updated_devices |= updated_devices_copy
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
class SriovNicAgentConfigParser(object):
def __init__(self):
self.device_mappings = {}
self.exclude_devices = {}
def parse(self):
"""Parses device_mappings and exclude_devices.
Parse and validate the consistency in both mappings
"""
self.device_mappings = q_utils.parse_mappings(
cfg.CONF.SRIOV_NIC.physical_device_mappings)
self.exclude_devices = config.parse_exclude_devices(
cfg.CONF.SRIOV_NIC.exclude_devices)
self._validate()
def _validate(self):
"""Validate configuration.
Validate that network_device in excluded_device
exists in device mappings
"""
dev_net_set = set(self.device_mappings.itervalues())
for dev_name in self.exclude_devices.iterkeys():
if dev_name not in dev_net_set:
raise ValueError(_("Device name %(dev_name)s is missing from "
"physical_device_mappings") % {'dev_name':
dev_name})
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
config_parser = SriovNicAgentConfigParser()
config_parser.parse()
device_mappings = config_parser.device_mappings
exclude_devices = config_parser.exclude_devices
except ValueError:
LOG.exception(_LE("Failed on Agent configuration parse. "
"Agent terminated!"))
raise SystemExit(1)
LOG.info(_LI("Physical Devices mappings: %s"), device_mappings)
LOG.info(_LI("Exclude Devices: %s"), exclude_devices)
polling_interval = cfg.CONF.AGENT.polling_interval
root_helper = cfg.CONF.AGENT.root_helper
try:
agent = SriovNicSwitchAgent(device_mappings,
exclude_devices,
polling_interval,
root_helper)
except exc.SriovNicError:
LOG.exception(_LE("Agent Initialization Failed"))
raise SystemExit(1)
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
agent.daemon_loop()
if __name__ == '__main__':
main()
| apache-2.0 | 2,106,327,654,775,121,000 | 40.740845 | 79 | 0.5687 | false |
Azure/azure-sdk-for-python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_12_01/aio/operations/_certificates_operations.py | 1 | 21769 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CertificatesOperations:
"""CertificatesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.CertificateCollection"]:
"""Get all certificates for a subscription.
Description for Get all certificates for a subscription.
:param filter: Return only information specified in the filter (using OData syntax). For
example: $filter=KeyVaultId eq 'KeyVaultId'.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_12_01.models.CertificateCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str', skip_quote=True)
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CertificateCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/certificates'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.CertificateCollection"]:
"""Get all certificates in a resource group.
Description for Get all certificates in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_12_01.models.CertificateCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CertificateCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates'} # type: ignore
async def get(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.Certificate":
"""Get a certificate.
Description for Get a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_12_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
name: str,
certificate_envelope: "_models.Certificate",
**kwargs: Any
) -> "_models.Certificate":
"""Create or update a certificate.
Description for Create or update a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:param certificate_envelope: Details of certificate, if it exists already.
:type certificate_envelope: ~azure.mgmt.web.v2020_12_01.models.Certificate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_12_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_envelope, 'Certificate')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
async def delete(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete a certificate.
Description for Delete a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
async def update(
self,
resource_group_name: str,
name: str,
certificate_envelope: "_models.CertificatePatchResource",
**kwargs: Any
) -> "_models.Certificate":
"""Create or update a certificate.
Description for Create or update a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:param certificate_envelope: Details of certificate, if it exists already.
:type certificate_envelope: ~azure.mgmt.web.v2020_12_01.models.CertificatePatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_12_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_envelope, 'CertificatePatchResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
| mit | -2,230,951,434,480,266,200 | 47.700224 | 174 | 0.638798 | false |
HingeChat/HingeChat | src/hingechat/qt/qChatWidget.py | 1 | 8829 | import re
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QFontMetrics
from PyQt5.QtWidgets import QHBoxLayout
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QSplitter
from PyQt5.QtWidgets import QTextBrowser
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QWidget
from src.hingechat.qt import qtUtils
from src.hinge.utils import *
class QChatWidget(QWidget):
def __init__(self, chat_window, nick, parent=None):
QWidget.__init__(self, parent)
self.chat_window = chat_window
self.nick = nick
self.disabled = False
self.cleared = False
self.url_regex = re.compile(URL_REGEX)
self.chat_log = QTextBrowser()
self.chat_log.setOpenExternalLinks(True)
self.chat_input = QTextEdit()
self.chat_input.textChanged.connect(self.chatInputTextChanged)
self.send_button = QPushButton("Send")
self.send_button.clicked.connect(self.sendMessage)
# Set the min height for the chatlog and a matching fixed height for the send button
chat_input_font_metrics = QFontMetrics(self.chat_input.font())
self.chat_input.setMinimumHeight(chat_input_font_metrics.lineSpacing() * 3)
self.send_button.setFixedHeight(chat_input_font_metrics.lineSpacing() * 3)
hbox = QHBoxLayout()
hbox.addWidget(self.chat_input)
hbox.addWidget(self.send_button)
# Put the chatinput and send button in a wrapper widget so they may be added to the splitter
chat_input_wrapper = QWidget()
chat_input_wrapper.setLayout(hbox)
chat_input_wrapper.setMinimumHeight(chat_input_font_metrics.lineSpacing() * 3.7)
# Put the chat log and chat input into a splitter so the user can resize them at will
splitter = QSplitter(Qt.Vertical)
splitter.addWidget(self.chat_log)
splitter.addWidget(chat_input_wrapper)
splitter.setSizes([int(parent.height()), 1])
hbox = QHBoxLayout()
hbox.addWidget(splitter)
self.setLayout(hbox)
self.typing_timer = QTimer()
self.typing_timer.setSingleShot(True)
self.typing_timer.timeout.connect(self.stoppedTyping)
def setRemoteNick(self, nick):
self.nick = nick
def chatInputTextChanged(self):
# Check if the text changed was the text box being cleared to avoid sending an invalid typing status
if self.cleared:
self.cleared = False
return
if str(self.chat_input.toPlainText())[-1:] == '\n':
self.sendMessage()
else:
# Start a timer to check for the user stopping typing
self.typing_timer.start(TYPING_TIMEOUT)
self.sendTypingStatus(TYPING_START)
def stoppedTyping(self):
self.typing_timer.stop()
if str(self.chat_input.toPlainText()) == '':
self.sendTypingStatus(TYPING_STOP_WITHOUT_TEXT)
else:
self.sendTypingStatus(TYPING_STOP_WITH_TEXT)
def sendMessage(self):
if self.disabled:
return
else:
pass
self.typing_timer.stop()
text = str(self.chat_input.toPlainText())[:-1]
# Don't send empty messages
if text == '':
return
# Convert URLs into clickable links
text = self.__linkify(text)
# Add the message to the message queue to be sent
self.chat_window.client.getSession(self.remote_id).sendChatMessage(text)
# Clear the chat input
self.wasCleared = True
self.chat_input.clear()
self.appendMessage(text, MSG_SENDER)
def sendTypingStatus(self, status):
self.chat_window.client.getSession(self.remote_id).sendTypingMessage(status)
def showNowChattingMessage(self, nick):
self.nick = nick
self.remote_id = self.chat_window.client.getClientId(self.nick)
self.appendMessage("You are now securely chatting with " + self.nick + " :)",
MSG_SERVICE, show_timestamp_and_nick=False)
self.appendMessage("It's a good idea to verify the communcation is secure by selecting "
"\"authenticate buddy\" in the options menu.", MSG_SERVICE, show_timestamp_and_nick=False)
self.addNickButton = QPushButton('Add', self)
self.addNickButton.setGeometry(584, 8, 31, 23)
self.addNickButton.clicked.connect(self.addNickScreen)
self.addNickButton.show()
def addUser(self, user):
nick = str(user.text()).lower()
# Validate the given nick
nickStatus = utils.isValidNick(nick)
if nickStatus == errors.VALID_NICK:
# TODO: Group chats
pass
elif nickStatus == errors.INVALID_NICK_CONTENT:
QMessageBox.warning(self, errors.TITLE_INVALID_NICK, errors.INVALID_NICK_CONTENT)
elif nickStatus == errors.INVALID_NICK_LENGTH:
QMessageBox.warning(self, errors.TITLE_INVALID_NICK, errors.INVALID_NICK_LENGTH)
elif nickStatus == errors.INVALID_EMPTY_NICK:
QMessageBox.warning(self, errors.TITLE_EMPTY_NICK, errors.EMPTY_NICK)
def addNickScreen(self):
self.chat_log.setEnabled(False)
self.chat_input.setEnabled(False)
self.send_button.setEnabled(False)
self.addNickButton.hide()
self.addUserText = QLabel("Enter a username to add a user to the group chat.", self)
self.addUserText.setGeometry(200, 20, 300, 100)
self.addUserText.show()
self.user = QLineEdit(self)
self.user.setGeometry(200, 120, 240, 20)
self.user.returnPressed.connect(self.addUser)
self.user.show()
self.addUserButton = QPushButton('Add User', self)
self.addUserButton.setGeometry(250, 150, 150, 25)
self.addUserButton.clicked.connect(lambda: self.addUser(self.user))
self.addUserButton.show()
self.cancel = QPushButton('Cancel', self)
self.cancel.setGeometry(298, 210, 51, 23)
self.cancel.clicked.connect(lambda: self.chat_log.setEnabled(True))
self.cancel.clicked.connect(lambda: self.chat_input.setEnabled(True))
self.cancel.clicked.connect(lambda: self.send_button.setEnabled(True))
self.cancel.clicked.connect(self.addUserText.hide)
self.cancel.clicked.connect(self.user.hide)
self.cancel.clicked.connect(self.addUserButton.hide)
self.cancel.clicked.connect(self.addNickButton.show)
self.cancel.clicked.connect(self.cancel.hide)
self.cancel.show()
def appendMessage(self, message, source, show_timestamp_and_nick=True):
color = self.__getColor(source)
if show_timestamp_and_nick:
timestamp = '<font color="' + color + '">(' + getTimestamp() + ') <strong>' + \
(self.chat_window.client.nick if source == MSG_SENDER else self.nick) + \
':</strong></font> '
else:
timestamp = ''
# If the user has scrolled up (current value != maximum), do not move the scrollbar
# to the bottom after appending the message
shouldScroll = True
scrollbar = self.chat_log.verticalScrollBar()
if scrollbar.value() != scrollbar.maximum() and source != constants.SENDER:
shouldScroll = False
self.chat_log.append(timestamp + message)
# Move the vertical scrollbar to the bottom of the chat log
if shouldScroll:
scrollbar.setValue(scrollbar.maximum())
def __linkify(self, text):
matches = self.url_regex.findall(text)
for match in matches:
text = text.replace(match[0], '<a href="%s">%s</a>' % (match[0], match[0]))
return text
def __getColor(self, source):
if source == MSG_SENDER:
if qtUtils.is_light_theme:
return '#0000CC'
else:
return '#6666FF'
elif source == MSG_RECEIVER:
if qtUtils.is_light_theme:
return '#CC0000'
else:
return '#CC3333'
else:
if qtUtils.is_light_theme:
return '#000000'
else:
return '#FFFFFF'
def disable(self):
self.disabled = True
self.chat_input.setReadOnly(True)
def enable(self):
self.disabled = False
self.chat_input.setReadOnly(False)
| lgpl-3.0 | 7,082,114,969,052,042,000 | 36.220779 | 117 | 0.619209 | false |
JiahuiZHONG/Internship_Thread | tests/scripts/thread-cert/Cert_5_6_08_ContextManagement.py | 1 | 4303 | #!/usr/bin/python
#
# Copyright (c) 2016, Nest Labs, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import pexpect
import time
import unittest
import node
LEADER = 1
ROUTER = 2
ED = 3
class Cert_5_6_8_ContextManagement(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,4):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[LEADER].set_context_reuse_delay(10)
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ED].set_panid(0xface)
self.nodes[ED].set_mode('rsn')
self.nodes[ED].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED].enable_whitelist()
def tearDown(self):
for node in self.nodes.itervalues():
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
time.sleep(3)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED].start()
time.sleep(3)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[ROUTER].add_prefix('2001::/64', 'pvcrs')
self.nodes[ROUTER].register_netdata()
time.sleep(2)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
self.nodes[ROUTER].remove_prefix('2001::/64')
self.nodes[ROUTER].register_netdata()
time.sleep(5)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
self.nodes[ROUTER].add_prefix('2002::/64', 'pvcrs')
self.nodes[ROUTER].register_netdata()
time.sleep(5)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
time.sleep(5)
self.nodes[ROUTER].add_prefix('2003::/64', 'pvcrs')
self.nodes[ROUTER].register_netdata()
time.sleep(5)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -5,238,427,176,629,124,000 | 35.159664 | 78 | 0.647223 | false |
scherroman/mugen | mugen/location_utility.py | 1 | 2143 | from typing import List, Tuple
"""
Module for Location & Interval manipulation
"""
def intervals_from_locations(locations: List[float]) -> List[float]:
intervals = []
previous_location = None
for index, location in enumerate(locations):
if index == 0:
intervals.append(location)
else:
intervals.append(location - previous_location)
previous_location = location
return intervals
def locations_from_intervals(intervals: List[float]) -> List[float]:
locations = []
running_duration = 0
for index, interval in enumerate(intervals):
if index < len(intervals):
running_duration += interval
locations.append(running_duration)
return locations
def start_end_locations_from_locations(locations: List[float]) -> Tuple[List[float], List[float]]:
"""
Calculates the start and end times of each location
Ex) 5, 10, 15
start_times == 5, 10, 15
end_times == 10, 15, 15
Returns
-------
A tuple of start and end times
"""
start_locations = []
end_locations = []
for index, location in enumerate(locations):
start_time = location
if index == len(locations) - 1:
end_time = location
else:
end_time = locations[index + 1]
start_locations.append(start_time)
end_locations.append(end_time)
return start_locations, end_locations
def start_end_locations_from_intervals(intervals: List[float]) -> Tuple[List[float], List[float]]:
"""
Calculates the start and end times of each interval
Ex) 5, 10, 15
start_times == 0, 5, 10
end_times == 5, 10, 15
Returns
-------
A tuple of start and end times
"""
start_locations = []
end_locations = []
running_duration = 0
for index, duration in enumerate(intervals):
start_time = running_duration
end_time = start_time + duration
start_locations.append(start_time)
end_locations.append(end_time)
running_duration += duration
return start_locations, end_locations
| mit | 7,078,705,883,654,456,000 | 23.918605 | 98 | 0.615492 | false |
adbrebs/spynet | models/max_pool_3d.py | 1 | 3198 |
from theano import tensor
from theano.tensor.signal.downsample import DownsampleFactorMax
def max_pool_3d(input, ds, ignore_border=False):
"""
Takes as input a N-D tensor, where N >= 3. It downscales the input by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1],ds[2]) (depth, height, width)
Arguments:
input (N-D theano tensor of input images): input images. Max pooling will be done over the 3 last dimensions.
ds (tuple of length 3): factor by which to downscale. (2,2,2) will halve the video in each dimension.
ignore_border (boolean): When True, (5,5,5) input with ds=(2,2,2)
will generate a (2,2,2) output. (3,3,3) otherwise.
"""
if input.ndim < 3:
raise NotImplementedError('max_pool_3d requires a dimension >= 3')
# extract nr dimensions
vid_dim = input.ndim
# max pool in two different steps, so we can use the 2d implementation of
# downsamplefactormax. First maxpool frames as usual.
# Then maxpool the depth dimension. Shift the depth dimension to the third
# position, so rows and cols are in the back
# extract dimensions
frame_shape = input.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input.shape[:-2])
batch_size = tensor.shape_padright(batch_size,1)
# store as 4D tensor with shape: (batch_size,1,height,width)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1,]),
frame_shape), 'int32')
input_4D = tensor.reshape(input, new_shape, ndim=4)
# downsample mini-batch of videos in rows and cols
op = DownsampleFactorMax((ds[1],ds[2]), ignore_border)
output = op(input_4D)
# restore to original shape
outshape = tensor.join(0, input.shape[:-2], output.shape[-2:])
out = tensor.reshape(output, outshape, ndim=input.ndim)
# now maxpool depth
# output (depth, rows, cols), reshape so that depth is in the back
shufl = (list(range(vid_dim-3)) + [vid_dim-2]+[vid_dim-1]+[vid_dim-3])
input_depth = out.dimshuffle(shufl)
# reset dimensions
vid_shape = input_depth.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input_depth.shape[:-2])
batch_size = tensor.shape_padright(batch_size,1)
# store as 4D tensor with shape: (batch_size,1,width,depth)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1,]),
vid_shape), 'int32')
input_4D_depth = tensor.reshape(input_depth, new_shape, ndim=4)
# downsample mini-batch of videos in depth
op = DownsampleFactorMax((1,ds[0]), ignore_border)
outdepth = op(input_4D_depth)
# output
# restore to original shape (xxx, rows, cols, depth)
outshape = tensor.join(0, input_depth.shape[:-2], outdepth.shape[-2:])
shufl = (list(range(vid_dim-3)) + [vid_dim-1]+[vid_dim-3]+[vid_dim-2])
return tensor.reshape(outdepth, outshape, ndim=input.ndim).dimshuffle(shufl) | bsd-2-clause | 608,790,142,119,456,300 | 42.821918 | 117 | 0.641026 | false |
chrismedrela/scriptcraft | scriptcraft/client.py | 1 | 76856 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import ConfigParser
try:
import cPickle as pickle
except:
import pickle
import itertools
import math
import os.path
from Queue import Queue
import random
import time
import threading
from Tkinter import *
import tkColorChooser
import tkFileDialog
import tkFont
import tkMessageBox
import tkSimpleDialog
# Imports from PIL
import Image, ImageTk # it overrides Tkinter.Image so it must be after Tkinter
# import statement
# We have to explicitly statically import PIL plugins so py2exe know that they
# are necessary.
import PngImagePlugin
import GifImagePlugin
from scriptcraft import direction
from scriptcraft.gamemap import GameMap
from scriptcraft.gamestate import (actions, Game, DEFAULT_GAME_CONFIGURATION,
Language, Program, STAR_PROGRAM, Unit,
NoFreeStartPosition, Tree, MineralDeposit,
load_game_map, InvalidGameMapData, cmds)
from scriptcraft.gamesession import (GameSession, SystemConfiguration,
AlreadyExecuteGame)
from scriptcraft.utils import *
class GameViewer(Canvas):
"""GameViewer is canvas widget to display a scriptcraft Game
instance. It provides scrolling and zooming the map and selecting
fields.
About selecting:
When a mouse motion is detected and the selection (the selection is the
field under the cursor) changed then <<selection-changed>> event is
emitted. You can find out which field is selected by checking
GameViewer.selection_position which is (x, y) tuple or None.
When a left mouse button is being pressed then <<field-selected>> event is
emitted (it doesn't matter if the mouse is inside or outside map). You can
check position of clicked field by getting
GameViewer.selection_position. If there is a double click, then
<<field-double-clicked>> event is also emitted.
You can set pointer at any valid position by calling
set_pointer_position. Pointer is a special selection.
There is also second pointer. Its color can be changed. Use
set_pointer_2_position and set_pointer_2_color methods.
Layers:
layer-1 -- ground
layer-1.5 -- grid of ground
layer-2 -- arrows
layer-3 -- units, trees, objects on map, texts over units
layer-4 -- pointer
interface
"""
SCROLLING_SENSITIVITY = 2**(1/2.0) # in (1, +inf); greater means faster scrolling
TILE_WIDTH = 64
TILE_HEIGHT = 32
GROUND_TILE_WIDTH = 32
GROUND_TILE_HEIGHT = 32
GROUND_TILES_IN_ROW = 4
GROUND_TILES_IN_COLUMN = 4
GROUND_TYPE_TO_NAME = {
0:'ground-black',
1:'ground-dirt',
2:'ground-grass',
3:'ground-rock',
4:'ground-stones',
5:'ground-flowers',
6:'ground-hardearth',
7:'ground-tiles',
8:'ground-sand',
}
MAX_ZOOM = 1.0
MIN_ZOOM = 1.0/4
CORNER_TEXT_POSITION = (15, 20) # position at screen
CORNER_TEXT_FONT_OPTIONS = {'size':12,
'weight':'bold'}
CORNER_TEXT_COLOR = 'red'
CORNER_TEXT_INITIAL_TEXT = ''
FREQUENCY_OF_UPDATING_ANIMATIONS = 50 # ms
LOADING_INDICATOR_POSITION = (-45, 15)
LOADING_INDICATOR_SPEED = int(-360*1.5) # degrees per second
FREQUENCY_OF_CHECKING_QUERY = 100 # ms
COLOR_OF_GROUND_IMITATION = '#336633'
GRID_COLOR = '#555555'
POINTER_ROTATION_SPEED = int(-360*2.5) # degrees per second
POINTER_SIZE = (80, 40)
POINTER_COLORS = [
('white', '#ffffff'),
('red', '#ff0000'),
('green', '#00ff00'),
('darkblue', '#0000aa'),
]
POINTER_2_ROTATION_SPEED = int(-360*1.2)
POINTER_2_SIZE = (64, 32)
def __init__(self, master):
Canvas.__init__(self, master, width=800, height=600, bg='black')
self.pack(expand=YES, fill=BOTH)
# To enable receiving wheel rolling events under windows, we
# need this action before bindings:
self.focus_set()
# bindings
self.bind('<B1-Motion>',
self._mouse_motion_with_button_pressed_callback)
self.bind('<Motion>', self._mouse_motion_callback)
self.bind('<ButtonRelease-1>', self._release_callback)
self.bind('<MouseWheel>', self._roll_wheel_callback)
self.bind('<Button-4>', self._roll_wheel_callback)
self.bind('<Button-5>', self._roll_wheel_callback)
self.bind('<Button-1>', self._click_callback)
self.bind('<Double-Button-1>', self._double_click_callback)
self.bind("<Configure>", self._resized_callback)
# own attributes
self._zoom = 1.0
self._delta = (-5.0, 0.0)
self._game = None
self._scaled_images_cache = {}
self._ground_image_cache = None
self._ground_tiles_cache = {}
self._last_mouse_position = None # None unless button pressed
self._click_position = None
self.selection_position = None # None or (x, y)
self._trees_ids_by_position = {}
self._queue = Queue()
self._compute_ground_image_flag = False
# corner text
self._corner_text_id = self.create_text(
GameViewer.CORNER_TEXT_POSITION[0],
GameViewer.CORNER_TEXT_POSITION[1],
anchor=NW, text=GameViewer.CORNER_TEXT_INITIAL_TEXT,
font=tkFont.Font(**GameViewer.CORNER_TEXT_FONT_OPTIONS),
fill=GameViewer.CORNER_TEXT_COLOR,
tag=['interface'])
# loading indicator
image = self._get_image('loading')
self._loading_image = ImageTk.PhotoImage(image)
self._loading_indicator_id = self.create_image(
self._loading_indicator_position[0],
self._loading_indicator_position[1],
image=self._loading_image,
state=HIDDEN, anchor=NW,
tags=['interface'])
self._loading_indicator_turned_on = False
self._update_loading_indicator()
# load pointer images
self._pointer_images_by_color = {}
image = self._get_image('loading')
alpha = image.split()[-1]
for color_name, color in GameViewer.POINTER_COLORS:
colored = Image.new('RGBA', image.size, color)
colored.putalpha(alpha)
self._pointer_images_by_color[color_name] = colored
# pointer
image = self._pointer_images_by_color['white']
self._pointer_image = ImageTk.PhotoImage(image)
self._pointer_position = None
self._pointer_id = self.create_image(
0, 0,
image=self._pointer_image,
state=HIDDEN, anchor=NW,
tags=['layer-2', 'game'])
self._update_pointer()
# second pointer
self._pointer_2_image = None
self._pointer_2_color = 'white'
self._pointer_2_position = None
self._pointer_2_id = self.create_image(
0, 0,
image=self._pointer_2_image,
state=HIDDEN, anchor=NW,
tags=['layer-2', 'game'])
self._update_pointer_2()
# run checking queue
self._check_queue()
@log_on_enter('set game in game viewer', mode='only time')
def set_game(self, game):
""" Attribute game should be scriptcraft game instance or
None.
In this method game instance passed during previous
call is used. The previous game instance cannot be modified
since the previous call!
Use set_game(None) and set_game(new_game) to force redrawing
ground and delete current selection.
"""
previous_game = self._game
self._game = game
if previous_game:
self.delete('non-cached')
if not game:
# reset queue
self._queue = Queue()
self._compute_ground_image_flag = False
# selection position
self._set_selection_position(None, force_emitting=True)
# force redrawing ground during next set_game call
self._ground_image_cache = None
if 'ground' in self._scaled_images_cache:
del self._scaled_images_cache['ground']
# reset zoom and delta
self._zoom = 1.0
self._delta = (-5.0, 0.0)
# hide loading indicator
self.show_loading_indicator(False)
# other stuff
self._trees_ids_by_position.clear()
self.delete('tree')
else:
# selection position
self._set_selection_position(self.selection_position,
force_emitting=True)
# draw game
self._draw_game(game, old_game=previous_game)
def set_corner_text(self, text):
self.itemconfigure(self._corner_text_id,
text=text)
def show_loading_indicator(self, state):
assert isinstance(state, bool)
self._loading_indicator_turned_on = state
state = NORMAL if state else HIDDEN
self.itemconfig(self._loading_indicator_id,
state=state)
def set_pointer_position(self, position_or_None):
self._pointer_position = position_or_None
def set_pointer_2_position(self, position_or_None):
self._pointer_2_position = position_or_None
def set_pointer_2_color(self, color):
assert color in self._pointer_images_by_color, 'unknown color'
self._pointer_2_color = color
@property
def _loading_indicator_position(self):
x, y = GameViewer.LOADING_INDICATOR_POSITION
width, height = self.winfo_width(), self.winfo_height()
result = (x if x >= 0 else width+x,
y if y >= 0 else height+y)
return result
def _update_loading_indicator(self):
if self._loading_indicator_turned_on:
angle = time.time()*GameViewer.LOADING_INDICATOR_SPEED % 360
image = self._get_image('loading')
image = image.rotate(angle, resample=Image.BICUBIC)
self._loading_image = ImageTk.PhotoImage(image)
self.itemconfig(self._loading_indicator_id,
image=self._loading_image)
self.master.after(GameViewer.FREQUENCY_OF_UPDATING_ANIMATIONS,
self._update_loading_indicator)
def _update_pointer(self):
if self._pointer_position is not None:
angle = time.time()*GameViewer.POINTER_ROTATION_SPEED % 360
image = self._get_image('pointer')
image = image.rotate(angle, resample=Image.BICUBIC)
size = GameViewer.POINTER_SIZE
size = (size[0]*self._zoom,
size[1]*self._zoom)
size = tuple(map(int, size))
image = image.resize(size)
self._pointer_image = ImageTk.PhotoImage(image)
self.itemconfig(self._pointer_id, state=NORMAL,
image=self._pointer_image)
pos = self._to_screen_coordinate(self._pointer_position)
x, y = self._to_image_position('pointer', pos)
self.coords(self._pointer_id, x, y)
else:
self.itemconfig(self._pointer_id, state=HIDDEN)
self.master.after(GameViewer.FREQUENCY_OF_UPDATING_ANIMATIONS,
self._update_pointer)
def _update_pointer_2(self):
if self._pointer_2_position is not None:
angle = time.time()*GameViewer.POINTER_2_ROTATION_SPEED % 360
image = self._pointer_images_by_color[self._pointer_2_color]
image = image.rotate(angle, resample=Image.BICUBIC)
size = GameViewer.POINTER_2_SIZE
size = (size[0]*self._zoom,
size[1]*self._zoom)
size = tuple(map(int, size))
image = image.resize(size)
self._pointer_2_image = ImageTk.PhotoImage(image)
self.itemconfig(self._pointer_2_id, state=NORMAL,
image=self._pointer_2_image)
pos = self._to_screen_coordinate(self._pointer_2_position)
x, y = self._to_image_position('pointer2', pos)
self.coords(self._pointer_2_id, x, y)
else:
self.itemconfig(self._pointer_2_id, state=HIDDEN)
self.master.after(GameViewer.FREQUENCY_OF_UPDATING_ANIMATIONS,
self._update_pointer_2)
def _draw_game(self, game, old_game):
# draw imitation of ground
size = self._game.game_map.size
points = [(0, 0), (0, size[1]), (size[0], size[1]), (size[0], 0)]
points = [self._to_screen_coordinate(pos) for pos in points]
points = [coord for pos in points for coord in pos]
self.create_polygon(points,
fill=GameViewer.COLOR_OF_GROUND_IMITATION,
tags=['game', 'non-cached', 'layer-1'])
# draw ground
self._draw_ground()
# remove deleted trees
tree_positions = [position for (position, obj)
in self._game.game_map._objs.items()
if isinstance(obj, Tree)]
tree_positions = set(tree_positions)
if old_game is not None:
old_tree_positions = [
position for (position, obj)
in old_game.game_map._objs.items()
if isinstance(obj, Tree)]
old_tree_positions = set(old_tree_positions)
else:
old_tree_positions = set()
deleted_trees = old_tree_positions - tree_positions
for position in deleted_trees:
self.delete(self._trees_ids_by_position[position])
del self._trees_ids_by_position[position]
# draw objects
def draw_arrow(source, destination, type='red'):
assert type in ('red', 'blue')
delta = map(lambda (a, b): a-b, zip(destination,
source))
d = direction.FROM_RAY[tuple(delta)]
direction_name = direction.TO_FULL_NAME[d]
self._draw('arrow-%s-%s' % (type, direction_name),
source, layer=2)
objs = sorted(self._game.game_map._objs.items(),
key=lambda (pos, obj): pos[0]+pos[1])
for position, obj in objs:
if isinstance(obj, Tree): # draw tree
if position not in old_tree_positions:
name = 'tree%s' % obj.type
id_ = self._draw(name, position, layer=3, cached=True,
extra_tags=['tree'])
self._trees_ids_by_position[position] = id_
else:
pass
self.tag_raise(self._trees_ids_by_position[position])
elif isinstance(obj, MineralDeposit): # draw minerals
if obj.minerals:
self._draw('minerals', position, layer=3)
else:
self._draw('minerals-ex', position, layer=3)
elif isinstance(obj, Unit): # draw unit
unit = obj
# build sprite name
if unit.type.main_name == '4': # base
sprite_name = 'base'
elif unit.type.main_name == '5': # miner
storage_state = 'loaded' if unit.minerals else 'empty'
direction_name = direction.TO_FULL_NAME[unit.direction]
sprite_name = 'miner-%s-%s' % \
(storage_state, direction_name)
elif unit.type.main_name == '6':
direction_name = direction.TO_FULL_NAME[unit.direction]
sprite_name = 'tank-%s' % direction_name
else:
assert False, 'oops, unknown unit type %r' % unit.type
# draw the unit
self._draw(sprite_name, position, layer=3)
# draw label for the unit
x, y = self._to_screen_coordinate(position)
color = '#' + "%02x%02x%02x" % unit.player.color
font = self._get_font_for_current_zoom()
# this operation costs a lot [optimization]
self.create_text(x, y, fill=color, text=unit.player.name,
font=font, tags=['layer-3', 'game', 'text',
'non-cached'],
state=NORMAL if font else HIDDEN)
# draw arrows indicating executing action (or fire explosion)
if isinstance(unit.action, actions.MoveAction):
draw_arrow(unit.action.source,
unit.action.destination,
type='blue')
if isinstance(unit.action, actions.GatherAction):
draw_arrow(unit.position,
unit.action.source)
elif isinstance(unit.action, actions.StoreAction):
destination_unit = self._game.units_by_IDs[
unit.action.storage_ID]
destination = destination_unit.position
draw_arrow(unit.position, destination)
elif isinstance(unit.action, actions.FireAction):
self._draw('explosion', unit.action.destination, layer=3)
# draw lines (debug)
def draw_grid():
line_color = GameViewer.GRID_COLOR
for x in xrange(0, game.game_map.size[1] + 1):
start_position = (0, x)
end_position = (game.game_map.size[0], x)
start_position = self._to_screen_coordinate(start_position)
end_position = self._to_screen_coordinate(end_position)
self.create_line(*(start_position + end_position),
fill=line_color,
tag=['layer-1.5', 'game', 'non-cached'])
for y in xrange(0, game.game_map.size[0] + 1):
start_position = (y, 0)
end_position = (y, game.game_map.size[1])
start_position = self._to_screen_coordinate(start_position)
end_position = self._to_screen_coordinate(end_position)
self.create_line(*(start_position + end_position),
fill=line_color,
tag=['layer-1.5', 'game', 'non-cached'])
# draw grid
draw_grid()
# sort layers
self._sort_layers()
def _sort_layers(self):
self.tag_raise('layer-1')
self.tag_raise('layer-1.5')
self.tag_raise('layer-2')
self.tag_raise('layer-3')
self.tag_raise('layer-4')
self.tag_raise('interface')
def _draw_ground(self):
if self._ground_image_cache:
self._draw('ground', (0, 0), layer=1)
self.tag_lower('layer-1')
elif not self._compute_ground_image_flag:
target = lambda: self._compute_ground_image_asynch(self._queue)
thread = threading.Thread(target=target)
thread.start()
def _compute_ground_image_asynch(self, queue):
self._get_ground_image()
queue.put('ready')
self._compute_ground_image_flag = False
def _check_queue(self):
if not self._queue.empty():
command = self._queue.get_nowait()
assert command == 'ready'
self._draw_ground()
self.master.after(GameViewer.FREQUENCY_OF_CHECKING_QUERY,
self._check_queue)
@memoized
def _gradient(self, align):
assert align in ('ns', 'we')
gradient = Image.new('L', (255, 1))
for x in range(255):
gradient.putpixel((254-x, 0), x)
gradient = gradient.resize((255, 255))
if align == 'ns':
gradient = gradient.rotate(45-180, expand=True)
elif align == 'we':
gradient = gradient.rotate(-45, expand=True)
gradient = gradient.resize((GameViewer.TILE_WIDTH+2,
GameViewer.TILE_HEIGHT+2))
return gradient
def _draw(self, name, position, layer, state=NORMAL,
extra_tags=None, cached=False):
""" Draw sprite with name 'name' at position 'position' in
game coordinates."""
extra_tags = extra_tags or []
tags = [name, 'layer-%s' % layer, 'game']
if not cached:
tags.append('non-cached')
position = self._to_screen_coordinate(position)
x, y = self._to_image_position(name, position)
image = self._get_scaled_sprite(name)
id_ = self.create_image(x, y, image=image, anchor=NW,
state=state, tags=tags+extra_tags)
return id_
def _get_font_for_current_zoom(self):
size = int(12.2*self._zoom)
if size < 9:
if size >= 6:
return tkFont.Font(size=9)
else:
return None
else:
return tkFont.Font(size=size)
@memoized
def _get_image(self, name):
""" Return (PIL.)Image instance. """
path = 'graphic/%s.png' % name
image = Image.open(datafile_path(path))
return image
def _get_ground_tile(self, name, (x, y)):
x %= GameViewer.GROUND_TILES_IN_ROW
y %= GameViewer.GROUND_TILES_IN_COLUMN
key = (name, (x, y))
if key not in self._ground_tiles_cache:
start_point_x = x*GameViewer.GROUND_TILE_WIDTH
start_point_y = y*GameViewer.GROUND_TILE_HEIGHT
image = self._get_image(name) # '.'+name for testing
image = image.convert('RGBA')
box = (start_point_x, start_point_y,
GameViewer.GROUND_TILE_WIDTH+start_point_x,
GameViewer.GROUND_TILE_HEIGHT+start_point_y)
croped = image.crop(box)
rotated = croped.rotate(-45, expand=True,
resample=Image.BICUBIC)
scaled = rotated.resize((GameViewer.TILE_WIDTH+2,
GameViewer.TILE_HEIGHT+2))
self._ground_tiles_cache[key] = scaled
return self._ground_tiles_cache[key]
@log_on_enter('GameViewer._get_ground_image', mode='only time')
def _get_ground_image(self):
""" Return (PIL.)Image instance. """
if self._ground_image_cache is None: # then compute it and cache
log('computing ground image')
def blend(image_nw, image_ne, image_se, image_sw,
gradient_ns, gradient_we):
if image_nw == image_ne == image_se == image_sw:
return image_nw
image_w = (Image.composite(image_nw, image_sw, gradient_ns)
if image_nw != image_sw
else image_nw)
image_e = (Image.composite(image_ne, image_se, gradient_ns)
if image_ne != image_se
else image_ne)
return Image.composite(image_w, image_e, gradient_we)
gradient_ns = self._gradient('ns')
gradient_we = self._gradient('we')
size = self._game.game_map.size
image_size = (GameViewer.TILE_WIDTH/2.0*(size[0]+size[1]+2),
GameViewer.TILE_HEIGHT/2.0*(size[0]+size[1]+2))
result = Image.new('RGB', map(int, image_size))
game_map = self._game.game_map
for (x, y) in itertools.product(xrange(-1, size[0]),
xrange(-1, size[1])):
ground_type_nw = game_map[x, y].ground_type or 0
ground_type_ne = game_map[x+1, y].ground_type or 0
ground_type_se = game_map[x+1, y+1].ground_type or 0
ground_type_sw = game_map[x, y+1].ground_type or 0
tile_name_nw = GameViewer.GROUND_TYPE_TO_NAME[ground_type_nw]
tile_name_ne = GameViewer.GROUND_TYPE_TO_NAME[ground_type_ne]
tile_name_se = GameViewer.GROUND_TYPE_TO_NAME[ground_type_se]
tile_name_sw = GameViewer.GROUND_TYPE_TO_NAME[ground_type_sw]
tile_nw = self._get_ground_tile(tile_name_nw, (x, y))
tile_ne = self._get_ground_tile(tile_name_ne, (x, y))
tile_se = self._get_ground_tile(tile_name_se, (x, y))
tile_sw = self._get_ground_tile(tile_name_sw, (x, y))
tile = blend(tile_nw, tile_ne, tile_se, tile_sw,
gradient_ns, gradient_we)
box = [GameViewer.TILE_WIDTH/2.0*(x-y+size[1]),
GameViewer.TILE_HEIGHT/2.0*(x+y+2)]
result.paste(tile, tuple(map(int, box)), tile)
self._ground_image_cache = result
return self._ground_image_cache
def _get_scaled_sprite(self, name):
""" Return (PIL.)ImageTk scaled by self._zoom factor. """
# if cached, return cached value
key = name
image = self._scaled_images_cache.get(key, None)
if image:
return image
# otherwise compute, cache and return
if name == 'ground':
image = self._get_ground_image()
elif name.startswith('pointer-'):
_, color = name.split('-')
image = self._pointer_images_by_color[color]
else:
image = self._get_image(name)
width, height = image.size
delta = 0 if name == 'ground' else 2
new_width, new_height = (int(width*self._zoom)+delta,
int(height*self._zoom)+delta)
if width != new_width: # resize if it's necessary
image = image.resize((new_width, new_height), Image.NEAREST)
image = ImageTk.PhotoImage(image)
# no problem with bug connected with reference count --
# caching keeps image reference
self._scaled_images_cache[key] = image
return image
def _to_screen_coordinate(self, (x, y), delta=None, zoom=None):
""" From game coordinates. """
zoom = zoom or self._zoom
delta = delta or self._delta
return (32*zoom*(x-y-2*delta[0]),
16*zoom*(x+y-2*delta[1]))
def _to_game_coordinate(self, (x, y), delta=None, zoom=None):
""" From screen coordinates. """
zoom = zoom or self._zoom
delta = delta or self._delta
return (x/64.0/zoom + y/32.0/zoom \
+ delta[0] + delta[1],
-x/64.0/zoom + y/32.0/zoom \
- delta[0] + delta[1])
def _to_image_position(self, image_name, (x, y)):
""" From screen coordinaties. """
if image_name == 'ground':
dx = GameViewer.TILE_WIDTH/2.0 * (self._game.game_map.size[1]+1)
dy = GameViewer.TILE_HEIGHT/2.0
else:
switch = {
'tank' : (22, 0),
'miner' : (18, 3),
'base' : (31, 13),
'minerals' : (20, 10),
'tree1' : (10, 45),
'tree2' : (20, 25),
'tree3' : (20, 33),
'tree4' : (15, 25),
'tree5' : (18, 15),
'tree6' : (22, 18),
'arrow' : (32, 0),
'pointer' : (GameViewer.POINTER_SIZE[0]/2, 4),
'pointer2' : (GameViewer.POINTER_2_SIZE[0]/2, 0),
'explosion' : (10, -5),}
first_part = image_name.split('-', 1)[0]
dx, dy = switch[first_part]
return x-dx*self._zoom, y-dy*self._zoom
def _set_zoom(self, zoom, (XS, YS)):
""" Set zoom. The point (XS, YS) in screen coordinate doesn't
move."""
# bound zoom
zoom = max(zoom, GameViewer.MIN_ZOOM)
zoom = min(zoom, GameViewer.MAX_ZOOM)
if zoom == self._zoom:
# zoom hasn't been changed
return
# It clears cache of scaled images. Due to reference count bug
# all images will be removed from memory!
# compute new self._delta and self._zoom
xS, yS = self._to_game_coordinate((XS, YS))
delta = [-XS/64.0/zoom + xS/2.0 - yS/2.0,
-YS/32.0/zoom + xS/2.0 + yS/2.0]
self._zoom, old_zoom = zoom, self._zoom
cleared_delta = self._clear_delta(delta)
self._delta = cleared_delta
delta_delta = (cleared_delta[0]-delta[0],
cleared_delta[1]-delta[1])
# scale all images
with log_on_enter('GameViewer._set_zoom: rescaling images',
mode='only time'):
names = self._scaled_images_cache.keys()
self._scaled_images_cache = {} # clear cache
for name in names:
image = self._get_scaled_sprite(name)
self.itemconfigure(name, image=image)
# scale all texts
font = self._get_font_for_current_zoom()
self.itemconfigure('text', font=font,
state = NORMAL if font else HIDDEN)
# move all images
factor = zoom/old_zoom
self.scale('game', XS, YS, factor, factor)
self.move('game',
-delta_delta[0]*64.0*self._zoom,
-delta_delta[1]*32.0*self._zoom)
def _clear_delta(self, delta):
if not self._game:
return delta
size = self.winfo_width(), self.winfo_height()
center_of_screen = (size[0]/2, size[1]/2)
map_width = self._game.game_map.size[0]
map_height = self._game.game_map.size[1]
pos = self._to_game_coordinate(center_of_screen, delta=delta)
if (0 <= pos[0] < map_width and
0 <= pos[1] < map_height):
return delta
# If we are here it means that the delta is invalid.
# 1. Find valid position
pos = (min(map_width, max(0, pos[0])),
min(map_height, max(0, pos[1])))
# 2. Find delta which fullfils the condition:
# _to_screen_coordinate(pos) == center_of_screen
delta = (-(center_of_screen[0]/32.0/self._zoom - pos[0] + pos[1])/2.0,
-(center_of_screen[1]/16.0/self._zoom - pos[0] - pos[1])/2.0)
return delta
def _set_selection_position(self, value, force_emitting=False):
old_selection = self.selection_position
self.selection_position = value
if old_selection != value or force_emitting:
self.event_generate('<<selection-changed>>')
def _roll_wheel_callback(self, event):
if self._game:
delta = 0
if event.num == 5: # respond Linux wheel event
delta -= 1
elif event.num == 4: # -//-
delta += 1
else: # respond Windows wheel event
delta += event.delta // 120
factor = GameViewer.SCROLLING_SENSITIVITY**delta
self._set_zoom(self._zoom*factor, (event.x, event.y))
def _mouse_motion_with_button_pressed_callback(self, event):
# scrolling map
if self._game and self._last_mouse_position:
with log_on_enter('moving everything', mode='only time'):
dx, dy = (event.x - self._last_mouse_position[0],
event.y - self._last_mouse_position[1])
delta = (self._delta[0] - dx/64.0/self._zoom,
self._delta[1] - dy/32.0/self._zoom)
delta = self._clear_delta(delta)
dx, dy = ((self._delta[0]-delta[0])*64.0*self._zoom,
(self._delta[1]-delta[1])*32.0*self._zoom)
self._delta = delta
self.move('game', dx, dy)
self._last_mouse_position = (event.x, event.y)
def _mouse_motion_callback(self, event):
if not self._game:
return
# info about field/unit under mouse -- update corner text
pos = self._to_game_coordinate((event.x, event.y))
pos = tuple(map(lambda x: int(math.floor(x)), pos))
if self._game.game_map[pos].valid_position:
self._set_selection_position(pos)
else:
self._set_selection_position(None)
def _click_callback(self, event):
if self._game:
self._click_position = (event.x, event.y)
def _double_click_callback(self, event):
if self._game:
self.event_generate('<<field-double-clicked>>')
def _release_callback(self, event):
self._last_mouse_position = None
if self._click_position:
release_position = (event.x, event.y)
if self._click_position == release_position:
self._single_click_callback(event)
def _single_click_callback(self, event):
if self._game:
click_position = self._to_game_coordinate((event.x, event.y))
integer_click_position = map(lambda i: int(math.floor(i)),
click_position)
integer_click_position = tuple(integer_click_position)
# generate event even click position is outside map
self.event_generate('<<field-selected>>')
def _resized_callback(self, event):
# update delta
delta = self._clear_delta(self._delta)
dx, dy = ((self._delta[0]-delta[0])*64.0*self._zoom,
(self._delta[1]-delta[1])*32.0*self._zoom)
self._delta = delta
self.move('game', dx, dy)
# update loading indicator's position
self.coords(
self._loading_indicator_id,
self._loading_indicator_position[0],
self._loading_indicator_position[1])
class Scrolled(Frame):
"""Example:
>>> scroll = Scrolled(master)
>>> label = Label(scroll, text='Label')
>>> scroll.set_widget(label) # ==> label packed
>>> scroll.pack()
"""
def __init__(self, *args, **kwargs):
Frame.__init__(self, *args, **kwargs)
def set_widget(self, widget):
scroll = Scrollbar(self)
scroll.pack(side=RIGHT, fill=Y)
scroll.config(command=widget.yview)
widget.configure(yscrollcommand=scroll.set)
widget.pack(side=LEFT, fill=BOTH, expand=1)
class UnitInfoWindow(tkSimpleDialog.Dialog):
LANGUAGES = [None, STAR_PROGRAM]
LANGUAGES += (lang for lang in Language.ALL)
LANGUAGE_NAMES = tuple(('brak programu' if lang is None else
'star program' if lang is STAR_PROGRAM else
Language.TO_NAME[lang])
for lang in LANGUAGES)
FONT_ATTRS = {
'family':'Courier New',
'size':8,
}
CODE_FONT_ATTRS = dict(FONT_ATTRS)
CODE_FONT_ATTRS['size'] = 10
def __init__(self, master, program,
maybe_compilation_status,
maybe_run_status,
ok_callback):
self._program = program
self._maybe_compilation_status = maybe_compilation_status
self._maybe_run_status = maybe_run_status
self._ok_callback = ok_callback
tkSimpleDialog.Dialog.__init__(self, master)
def buttonbox(self):
tkSimpleDialog.Dialog.buttonbox(self)
self.unbind('<Return>') # so we can use enter in code textarea
def body(self, master):
left_box = Frame(master)
separator = Frame(master, width=2, bd=1, relief=SUNKEN)
right_box = Frame(master)
self._create_program_editor_part(left_box)
self._create_compilation_part(right_box)
self._add_horizontal_separator(right_box)
self._create_execution_part(right_box)
left_box.pack(side=LEFT, fill=BOTH, expand=1)
separator.pack(side=LEFT, fill=Y, padx=15, pady=15)
right_box.pack(side=RIGHT, fill=BOTH, expand=1)
master.pack(fill=BOTH, expand=1)
self.geometry('800x600')
self.attributes('-fullscreen', '1')
def _add_horizontal_separator(self, master):
separator = Frame(master, height=2, bd=1, relief=SUNKEN)
separator.pack(fill=X, padx=15, pady=15)
def _create_program_editor_part(self, master):
box = Frame(master)
self._create_language_label(box)
self._create_language_listbox(box)
box.pack(fill=BOTH)
self._create_code_label(master)
self._create_code_textarea(master)
def _create_language_label(self, master):
self._language_label = Label(master, text='Język: ')
self._language_label.pack(side=LEFT)
def _create_language_listbox(self, master):
scroll = Scrolled(master)
language_list = StringVar(value=UnitInfoWindow.LANGUAGE_NAMES)
self._language_listbox = Listbox(scroll,
selectmode=SINGLE,
height=5,
width=20,
exportselection=0, # to keep selection highlighted
listvariable=language_list)
language_index = UnitInfoWindow.LANGUAGES.index(
self._program.language
if isinstance(self._program, Program)
else self._program)
self._language_listbox.select_set(language_index)
scroll.set_widget(self._language_listbox)
scroll.pack(side=LEFT, fill=BOTH, expand=1)
def _create_code_label(self, master):
self._code_label = Label(master, text='Kod: ', anchor=W)
self._code_label.pack(fill=BOTH)
def _create_code_textarea(self, master):
scroll = Scrolled(master)
self._code_textarea = Text(
scroll, height=1, width=1,
font=tkFont.Font(**UnitInfoWindow.CODE_FONT_ATTRS))
text = (""
if self._program in (None, STAR_PROGRAM)
else self._program.code)
self._code_textarea.insert('1.0', text)
scroll.set_widget(self._code_textarea)
scroll.pack(fill=BOTH, expand=1)
def _create_compilation_part(self, master):
self._create_compilation_label(master)
box = Frame(master)
self._create_compilation_output_label(box)
self._create_compilation_error_output_label(box)
box.pack(fill=BOTH)
box = Frame(master)
self._create_compilation_output_textarea(box)
self._create_compilation_error_output_textarea(box)
box.pack(fill=BOTH, expand=1)
def _create_compilation_label(self, master):
text = u"Kompilacja: "
if self._maybe_compilation_status is None:
text += u"brak informacji"
else:
text += u"czas %.2f s" % self._maybe_compilation_status.execution_time
if self._maybe_compilation_status.killed:
text += u" (zabity)"
text += u"."
self._compilation_label = Label(master, text=text, anchor=W)
self._compilation_label.pack(fill=BOTH)
def _create_compilation_output_label(self, master):
label = Label(master, text="Standardowe wyjście: ")
label.pack(side=LEFT, fill=BOTH, expand=1)
def _create_compilation_error_output_label(self, master):
label = Label(master, text="Standardowe wyjście błędów: ")
label.pack(side=LEFT, fill=BOTH, expand=1)
def _create_compilation_output_textarea(self, master):
scroll = Scrolled(master)
self._compilation_output_area = Text(
scroll, height=1, width=1,
font=tkFont.Font(**UnitInfoWindow.FONT_ATTRS))
text = (self._maybe_compilation_status.output
if self._maybe_compilation_status
else "")
self._compilation_output_area.insert('1.0', text)
self._compilation_output_area.configure(state=DISABLED)
scroll.set_widget(self._compilation_output_area)
scroll.pack(side=LEFT, fill=BOTH, expand=1)
def _create_compilation_error_output_textarea(self, master):
scroll = Scrolled(master)
self._compilation_error_output_area = Text(
scroll, height=1, width=1,
font=tkFont.Font(**UnitInfoWindow.FONT_ATTRS))
text = (self._maybe_compilation_status.error_output
if self._maybe_compilation_status
else "")
self._compilation_error_output_area.insert('1.0', text)
self._compilation_error_output_area.configure(state=DISABLED)
scroll.set_widget(self._compilation_error_output_area)
scroll.pack(side=LEFT, fill=BOTH, expand=1)
def _create_execution_part(self, master):
self._create_execution_label(master)
self._create_execution_input_label(master)
self._create_execution_input_area(master)
box = Frame(master)
self._create_execution_output_label(box)
self._create_execution_error_output_label(box)
box.pack(fill=BOTH)
box = Frame(master)
self._create_execution_output_textarea(box)
self._create_execution_error_output_textarea(box)
box.pack(fill=BOTH, expand=1)
def _create_execution_label(self, master):
text = u"Wykonanie: "
if self._maybe_run_status is None:
text += u"program nie został wykonany"
else:
text += u"czas %.2f s" % self._maybe_run_status.execution_time
if self._maybe_run_status.killed:
text += u" (zabity)"
text += u"."
self._execution_label = Label(master, text=text, anchor=W)
self._execution_label.pack(fill=BOTH)
def _create_execution_input_label(self, master):
label = Label(master, text='Standardowe wejście: ')
label.pack(fill=BOTH)
def _create_execution_input_area(self, master):
scroll = Scrolled(master)
self._execution_input_area = Text(
scroll, height=1, width=1,
font=tkFont.Font(**UnitInfoWindow.FONT_ATTRS))
text = (self._maybe_run_status.input
if self._maybe_run_status
else "")
self._execution_input_area.insert('1.0', text)
self._execution_input_area.configure(state=DISABLED)
scroll.set_widget(self._execution_input_area)
scroll.pack(fill=BOTH, expand=1)
def _create_execution_output_label(self, master):
label = Label(master, text="Standardowe wyjście: ")
label.pack(side=LEFT, fill=BOTH, expand=1)
def _create_execution_error_output_label(self, master):
label = Label(master, text="Standardowe wyjście błędów: ")
label.pack(side=LEFT, fill=BOTH, expand=1)
def _create_execution_output_textarea(self, master):
scroll = Scrolled(master)
self._execution_output_area = Text(
scroll, height=1, width=1,
font=tkFont.Font(**UnitInfoWindow.FONT_ATTRS))
text = (self._maybe_run_status.output
if self._maybe_run_status
else "")
self._execution_output_area.insert('1.0', text)
self._execution_output_area.configure(state=DISABLED)
scroll.set_widget(self._execution_output_area)
scroll.pack(side=LEFT, fill=BOTH, expand=1)
def _create_execution_error_output_textarea(self, master):
scroll = Scrolled(master)
self._execution_error_output_area = Text(
scroll, height=1, width=1,
font=tkFont.Font(**UnitInfoWindow.FONT_ATTRS))
text = (self._maybe_run_status.error_output
if self._maybe_run_status
else "")
self._execution_error_output_area.insert('1.0', text)
self._execution_error_output_area.configure(state=DISABLED)
scroll.set_widget(self._execution_error_output_area)
scroll.pack(side=LEFT, fill=BOTH, expand=1)
def apply(self):
language_index = int(self._language_listbox.curselection()[0])
language = UnitInfoWindow.LANGUAGES[language_index]
program = (language if language in (None, STAR_PROGRAM) else
Program(language, self._code_textarea.get('1.0', END)))
self._ok_callback(program)
class ClientApplication(Frame):
CONFIGURATION_FILE = 'configuration.ini'
FREQUENCY_OF_CHECKING_QUERY = 50 # ms
TIME_BETWEEN_TICS = 100 # ms
MAPS_DIRECTORY = 'maps'
GAMES_DIRECTORY = 'games'
MENU_GAME_LABEL = "Gra"
NEW_GAME_LABEL = "Stwórz nową grę..."
SAVE_GAME_LABEL = "Zapisz grę"
LOAD_GAME_LABEL = "Wczytaj grę..."
ADD_PLAYER_LABEL = "Dodaj nowego gracza..."
SET_PROGRAM_LABEL = "Ustaw program zaznaczonej jednostce..."
SET_STAR_PROGRAM_LABEL = "Ustaw star program zaznaczonej jednostce"
DELETE_PROGRAM_LABEL = "Usuń program zaznaczonej jednostce"
TIC_LABEL = "Symuluj jedną turę gry"
TIC_IN_LOOP_LABEL = "Symulacja gry w pętli"
QUIT_LABEL = "Wyjdź"
MENU_ABOUT_LABEL = "O grze"
CHOOSE_MAP_FILE = 'Wybierz mapę'
CHOOSE_DIRECTORY_FOR_NEW_GAME = "Wybierz folder dla nowej gry"
TITLE_CREATE_NEW_GAME = 'Stwórz nową grę'
CANNOT_CREATE_NEW_GAME = 'Nie można stworzyć nowej gry.'
CANNOT_OPEN_FILE = ('Nie można otworzyć pliku '
'(być może nie masz wystarczających uprawnień).')
MAP_FILE_IS_CORRUPTED = 'Plik mapy jest uszkodzony.'
CANNOT_CREATE_FOLDER = 'Nie można utworzyć folderu gry.'
FILE_WITH_THE_SAME_NAME_EXISTS = ('Nie można utworzyć folderu gry ponieważ '
'istnieje już plik o takiej samej nazwie.')
IO_ERROR_DURING_READING = 'Wystąpił błąd podczas czytania pliku.'
TITLE_SAVE_GAME = 'Zapisz grę'
CANNOT_SAVE_GAME = 'Nie można zapisać gry.'
IO_ERROR_DURING_SAVING = 'Wystąpił błąd podczas zapisywania pliku.'
TITLE_LOAD_GAME = 'Wczytaj grę'
CANNOT_LOAD_GAME = 'Nie można wczytać gry.'
TITLE_CREATE_PLAYER = 'Dodaj nowego gracza'
ENTER_NEW_PLAYER_NAME = 'Wpisz nazwę nowego gracza.'
TITLE_CREATE_PLAYER_CHOOSE_COLOR = 'Wybierz kolor dla nowego gracza.'
CANNOT_CREATE_PLAYER = 'Nie można dodać nowego gracza.'
NO_FREE_START_POSITION = \
'Wszystkie pozycje startowe na mapie są już zajęte.'
TITLE_CHOOSE_SOURCE_FILE = 'Wybierz plik źródłowy'
TITLE_SET_PROGRAM = 'Ustaw program'
CANNOT_SET_PROGRAM = 'Nie można ustawić programu.'
UNKNOWN_SOURCE_FILE_EXTENSION = 'Nieznane rozszerzenie pliku źródłowego.'
TITLE_ARE_YOU_SURE = 'Czy jesteś pewien?'
WARNING_CURRENT_GAME_WILL_BE_LOST = \
'Czy jesteś pewien? Aktualna gra zostanie bezpowrotnie utracona.'
TITLE_QUIT_PROGRAM = 'Wyjdź z programu'
QUIT_PROGRAM_QUESTION = 'Czy na pewno chcesz wyjść z programu?'
ABOUT_TITLE = 'O grze'
ABOUT_CONTENT = ('Scriptcraft - gra programistyczna.\n\n'
'Właścicielem grafiki i map jest Marek Szykuła. '
'Nie mogą być one kopiowane ani rozpowszechniane. \n\n'
'Kod źródłowy jest na licencji GPLv3 '
'i może być rozpowszechniany i kopiowany.')
TITLE_INVALID_CONFIGURATION_FILE = 'Niepoprawny plik konfiguracji'
INVALID_CONFIGURATION_FILE = ('Nie można wczytać ustawień z pliku '
'konfiguracji. Aplikacja zostanie '
'zamknięta. Sprawdź zawartość pliku "' + \
CONFIGURATION_FILE + \
'".')
DIRECTION_TO_NAME = {
direction.N : u'północ',
direction.W : u'zachód',
direction.S : u'południe',
direction.E : u'wschód',
}
MAP_FILE_TYPES = (
('Plik mapy', '*.map'),
('Wszystkie pliki', '*')
)
DEFAULT_PLAYER_COLORS = (
(178, 146, 0),
(128, 0, 0),
(0, 255, 220),
(255, 0, 255),
(0, 0, 255),
(0, 200, 0),
(255, 255, 0),
(255, 0, 0), # the last one is get as the first one
)
# initializing --------------------------------------------------------
def __init__(self, master):
Frame.__init__(self, master)
self._tic_in_loop = BooleanVar(False)
self._init_gui()
self._game = None
self._game_session = None
self._queue = Queue()
self._master = master
self._pointed_unit_id = None
self._check_queue()
self._load_configuration_file()
if len(sys.argv) == 2 and sys.argv[1].lower() == '--test':
self._load_testing_game()
@log_on_enter('load game for testing')
def _load_testing_game(self):
filename = datafile_path('maps/small.map')
# create game_map
#game_map = load_game_map(open(filename, 'r').read())
def generate_simple_map():
import random
size = 96
game_map = GameMap((size, size), [(10, 10), (53, 10), (10, 53), (53, 53)])
number_of_trees = 0
for x in xrange(size):
for y in xrange(size):
p = 0.0
if (6 <= x <= 14 or 49 <= x <= 57 or
6 <= y <= 14 or 49 <= y <= 57):
p = 0.0
if (random.random() < p):
number_of_trees += 1
game_map[x, y].place_object(Tree())
game_map[x, y].change_ground(random.randint(1, 8))
log('map size: %d, number of fields: %d' % (size, size**2))
log('number of trees: %d' % number_of_trees)
return game_map
game = None
#game = Game(generate_simple_map(), DEFAULT_GAME_CONFIGURATION)
# create game and game session
session = GameSession(
directory='scriptcraft/.tmp',
system_configuration=self.system_configuration,
game=game)
self.set_game_session(session)
game = session.game
# modify game (set programs)
def set_program(unit_id, filename):
program = Program(Language.PYTHON,
open('scriptcraft/.tmp/'+filename).read())
game.set_program(game.units_by_IDs[unit_id], program)
try:
set_program(8, 'build_tank.py')
for i in xrange(3,7):
set_program(i, 'move_randomly.py')
for i in xrange(9,13):
set_program(i, 'move_randomly.py')
except Exception:
log_exception('cannot set program for testing game')
self._set_game(game)
def _check_queue(self):
if not self._queue.empty():
command = self._queue.get_nowait()
assert command == 'ready'
self._set_game(self._game_session.game)
if self._tic_in_loop.get():
self.master.after(ClientApplication.TIME_BETWEEN_TICS,
self._tic)
else:
self._game_viewer.show_loading_indicator(False)
self.master.after(ClientApplication.FREQUENCY_OF_CHECKING_QUERY,
self._check_queue)
def _load_configuration_file(self):
try:
filename = datafile_path(ClientApplication.CONFIGURATION_FILE)
self.system_configuration = SystemConfiguration(filename)
except (IOError, ValueError, ConfigParser.Error) as ex:
log_exception('invalid configuration file')
self._warning(
ClientApplication.TITLE_INVALID_CONFIGURATION_FILE,
ClientApplication.INVALID_CONFIGURATION_FILE
)
global root
root.destroy()
def _init_gui(self):
self.pack(expand=YES, fill=BOTH)
global root
root.protocol("WM_DELETE_WINDOW", self._quit_callback)
self._game_viewer = GameViewer(self)
self._game_viewer.bind('<<selection-changed>>',
self._selection_changed_callback)
self._game_viewer.bind('<<field-selected>>',
self._field_selected_callback)
self._game_viewer.bind('<Button-3>',
self._command_ordered_callback)
self._game_viewer.bind('<<field-double-clicked>>',
self._field_double_clicked_callback)
self._create_menubar()
self._create_keyboard_shortcuts()
def _create_menubar(self):
menubar = Menu(self)
self._game_menu = Menu(menubar, tearoff=0)
menubar.add_cascade(label=ClientApplication.MENU_GAME_LABEL,
menu=self._game_menu)
self._game_menu.add_command(
label=ClientApplication.NEW_GAME_LABEL,
command=self._new_game_callback)
self._game_menu.add_command(
label=ClientApplication.SAVE_GAME_LABEL,
command=self._save_game_callback,
state=DISABLED)
self._game_menu.add_command(
label=ClientApplication.LOAD_GAME_LABEL,
command=self._load_game_callback)
self._game_menu.add_separator()
self._game_menu.add_command(
label=ClientApplication.ADD_PLAYER_LABEL,
command=self._add_player_callback,
state=DISABLED)
self._game_menu.add_command(
label=ClientApplication.DELETE_PROGRAM_LABEL,
command=self._delete_program_callback,
state=DISABLED)
self._game_menu.add_command(
label=ClientApplication.SET_PROGRAM_LABEL,
command=self._set_program_callback,
state=DISABLED)
self._game_menu.add_command(
label=ClientApplication.SET_STAR_PROGRAM_LABEL,
command=self._set_star_program_callback,
state=DISABLED)
self._game_menu.add_command(
label=ClientApplication.TIC_LABEL,
command=self._tic_callback,
state=DISABLED)
self._game_menu.add_checkbutton(
label=ClientApplication.TIC_IN_LOOP_LABEL,
command=lambda: self._tic_in_loop_callback(switch=False),
state=DISABLED,
variable=self._tic_in_loop)
self._game_menu.add_separator()
self._game_menu.add_command(
label=ClientApplication.QUIT_LABEL,
command=self._quit_callback)
menubar.add_command(label=ClientApplication.MENU_ABOUT_LABEL,
command=self._about_callback)
global root
root.config(menu=menubar)
def _create_keyboard_shortcuts(self):
# new game
self._game_menu.entryconfigure(
ClientApplication.NEW_GAME_LABEL,
accelerator="Ctrl+N")
args = ("<Control-n>", lambda w: self._new_game_callback())
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# save game
self._game_menu.entryconfigure(
ClientApplication.SAVE_GAME_LABEL,
accelerator="Ctrl+S")
args = ("<Control-s>", lambda w: self._save_game_callback())
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# load game
self._game_menu.entryconfigure(
ClientApplication.LOAD_GAME_LABEL,
accelerator="Ctrl+O")
args = ("<Control-o>", lambda w: self._load_game_callback())
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# add player
self._game_menu.entryconfigure(
ClientApplication.ADD_PLAYER_LABEL,
accelerator="Ctrl+A")
args = ("<Control-a>", lambda w: self._add_player_callback())
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# tic item
self._game_menu.entryconfigure(
ClientApplication.TIC_LABEL,
accelerator="T")
args = ("<t>", lambda w: self._tic_callback())
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# tic in loop item
self._game_menu.entryconfigure(
ClientApplication.TIC_IN_LOOP_LABEL,
accelerator='spacja')
args = ("<space>", lambda w: self._tic_in_loop_callback(switch=True))
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# quit program
self._game_menu.entryconfigure(
ClientApplication.QUIT_LABEL,
accelerator="Ctrl+Q")
args = ("<Control-q>", lambda w: self._quit_callback())
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# callbacks ----------------------------------------------------------
@log_on_enter('use case: new game', lvl='info')
def _new_game_callback(self):
if not self._ask_if_delete_current_game_if_exists():
return
map_filename = tkFileDialog.askopenfilename(
title=ClientApplication.CHOOSE_MAP_FILE,
filetypes=ClientApplication.MAP_FILE_TYPES,
initialdir=datafile_path(ClientApplication.MAPS_DIRECTORY),
parent=self,
)
if not map_filename:
return
directory = tkFileDialog.askdirectory(
title=ClientApplication.CHOOSE_DIRECTORY_FOR_NEW_GAME,
initialdir=datafile_path(ClientApplication.GAMES_DIRECTORY),
mustexist=False,
parent=self,
)
if is_it_py2exe_distribution():
directory = directory.replace('/', '\\')
if not directory:
return
try:
stream = open(map_filename, 'r')
except IOError as ex:
log_exception('io error during opening stream to map file')
self._warning(ClientApplication.TITLE_CREATE_NEW_GAME,
ClientApplication.CANNOT_CREATE_NEW_GAME + ' ' + \
ClientApplication.CANNOT_OPEN_FILE)
return
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
log_exception('cannot create directory for a game')
self._warning(ClientApplication.TITLE_CREATE_NEW_GAME,
ClientApplication.CANNOT_CREATE_NEW_GAME + ' ' + \
ClientApplication.CANNOT_CREATE_FOLDER)
return
else:
if not os.path.isdir(directory):
self._warning(ClientApplication.TITLE_CREATE_NEW_GAME,
ClientApplication.CANNOT_CREATE_NEW_GAME + ' ' + \
ClientApplication.FILE_WITH_THE_SAME_NAME_EXISTS)
return
try:
game_map = load_game_map(stream.read())
except InvalidGameMapData as ex:
log_exception('invalid game map data')
self._warning(ClientApplication.TITLE_CREATE_NEW_GAME,
ClientApplication.CANNOT_CREATE_NEW_GAME + ' ' + \
ClientApplication.MAP_FILE_IS_CORRUPTED)
except IOError as ex:
log_exception('io error during loading map file')
self._warning(ClientApplication.TITLE_CREATE_NEW_GAME,
ClientApplication.CANNOT_CREATE_NEW_GAME + ' ' + \
ClientApplication.IO_ERROR_DURING_READING)
else:
game = Game(game_map, DEFAULT_GAME_CONFIGURATION)
game_session = GameSession(directory,
self.system_configuration,
game=game)
self.set_game_session(game_session)
finally:
stream.close()
@log_on_enter('use case: save game', mode='time', lvl='info')
def _save_game_callback(self):
try:
self._game_session.save()
except IOError as ex:
log_exception('io error duing saving game')
self._warning(ClientApplication.TITLE_SAVE_GAME,
ClientApplication.CANNOT_SAVE_GAME + ' ' + \
ClientApplication.IO_ERROR_DURING_SAVING)
@log_on_enter('use case: load game', lvl='info')
def _load_game_callback(self):
if not self._ask_if_delete_current_game_if_exists():
return
directory = tkFileDialog.askdirectory(
title=ClientApplication.TITLE_LOAD_GAME,
initialdir=datafile_path(ClientApplication.GAMES_DIRECTORY),
mustexist=True,
parent=self,
)
if is_it_py2exe_distribution():
directory = directory.replace('/', '\\')
if not directory:
return
try:
game_session = GameSession(directory, self.system_configuration)
except IOError as ex:
log_exception('io error during loading game')
self._warning(ClientApplication.TITLE_LOAD_GAME,
ClientApplication.CANNOT_LOAD_GAME + ' ' + \
ClientApplication.IO_ERROR_DURING_READING)
except pickle.UnpicklingError as ex:
log_exception('pickle error during loading game')
self._warning(ClientApplication.TITLE_LOAD_GAME,
ClientApplication.CANNOT_LOAD_GAME + ' ' + \
ClientApplication.MAP_FILE_IS_CORRUPTED)
else:
self.set_game_session(game_session)
@log_on_enter('use case: add player', lvl='info')
def _add_player_callback(self):
if self._game is None:
return
name = tkSimpleDialog.askstring(
title=ClientApplication.TITLE_CREATE_PLAYER,
prompt=ClientApplication.ENTER_NEW_PLAYER_NAME,
parent=self)
if name is None:
return
color = self._reserve_color()
try:
self._game_session.new_player_with_units(name, color)
except NoFreeStartPosition:
self._warning(ClientApplication.TITLE_CREATE_PLAYER,
ClientApplication.CANNOT_CREATE_PLAYER + ' ' + \
ClientApplication.NO_FREE_START_POSITION)
else:
self._set_game(self._game)
@log_on_enter('use case: set program', lvl='info')
def _set_program_callback(self):
stream = tkFileDialog.askopenfile(
title=ClientApplication.TITLE_CHOOSE_SOURCE_FILE,
mode='r',
parent=self)
if stream is None:
return
filename = stream.name
if filename.endswith('.cpp'):
language = Language.CPP
elif filename.endswith('.py'):
language = Language.PYTHON
else:
self._warning(ClientApplication.TITLE_SET_PROGRAM,
ClientApplication.CANNOT_SET_PROGRAM + ' ' + \
ClientApplication.UNKNOWN_SOURCE_FILE_EXTENSION)
return
field = self._game.game_map[self._game_viewer._pointer_position]
unit = field.maybe_object
program = Program(language=language, code=stream.read())
self._game_session.set_program(unit, program)
@log_on_enter('use case: set star program', lvl='info')
def _set_star_program_callback(self):
field = self._game.game_map[self._game_viewer._pointer_position]
unit = field.maybe_object
self._game_session.set_program(unit, STAR_PROGRAM)
@log_on_enter('use case: delete program', lvl='info')
def _delete_program_callback(self):
field = self._game.game_map[self._game_viewer._pointer_position]
unit = field.maybe_object
self._game_session.set_program(unit, None)
@log_on_enter('use case: tic', mode='time', lvl='info')
def _tic_callback(self):
self._tic()
@log_on_enter('use case: switch tic in loop', lvl='info')
def _tic_in_loop_callback(self, switch):
if switch:
self._tic_in_loop.set(not self._tic_in_loop.get())
if self._tic_in_loop.get():
self._tic()
@log_on_enter('use case: quit', lvl='info')
def _quit_callback(self):
if not self._ask_if_quit_program():
return
global root
root.destroy()
@log_on_enter('use case: about game', lvl='info')
def _about_callback(self):
tkMessageBox.showinfo(
title=ClientApplication.ABOUT_TITLE,
message=ClientApplication.ABOUT_CONTENT,
parent=self)
def _selection_changed_callback(self, event):
# first, update the corner text
pos = self._game_viewer.selection_position
if pos is None:
text = u" "
else:
field = self._game.game_map[pos]
obj = field.maybe_object
if obj is None:
obj_info = u""
elif isinstance(obj, Tree):
obj_info = u"Drzewa."
elif isinstance(obj, MineralDeposit):
obj_info = u"Złoża minerałów (%d jednostek minerałów)." % obj.minerals
elif isinstance(obj, Unit):
# type of the unit
if obj.type.main_name == '4': # base
obj_info = u"Baza (%d minerałów)" % obj.minerals
elif obj.type.main_name == '5': # miner
state = (u'pełny' if obj.minerals else u'pusty')
obj_info = u"Zbieracz minerałów (%s)" % state
elif obj.type.main_name == '6': # tank
obj_info = u"Czołg"
else:
assert False, 'oops, unknown unit type %r' % unit.type
# player
obj_info += u' gracza %s.' % obj.player.name
# command
if isinstance(obj.command, cmds.StopCommand):
command_info = u'stop'
elif isinstance(obj.command, cmds.MoveCommand):
d = ClientApplication.DIRECTION_TO_NAME[obj.command.direction]
command_info = u'idź na %s' % d
elif isinstance(obj.command, cmds.ComplexMoveCommand):
command_info = u'idź do (%d, %d)' \
% obj.command.destination
elif isinstance(obj.command, cmds.ComplexGatherCommand):
command_info = u'zbieraj minerały z (%d, %d)' \
% obj.command.destination
elif isinstance(obj.command, cmds.FireCommand):
command_info = u'ogień na (%d, %d)' \
% obj.command.destination
elif isinstance(obj.command, cmds.ComplexAttackCommand):
command_info = u'atak na (%d, %d)' \
% obj.command.destination
elif isinstance(obj.command, cmds.BuildCommand):
command_info = u'buduj "%s"' \
% obj.command.unit_type_name
obj_info += u' Komenda: %s.' % command_info
else:
assert False, 'oops, unknown object on map %r' % obj
field_info = u"Pole (%d, %d)." % (pos[0], pos[1])
text = u" ".join([field_info, obj_info])
self._game_viewer.set_corner_text(text)
self._refresh_game_menu_items_state()
# second, update second pointer
self._update_pointer_2()
def _field_selected_callback(self, event):
pos = self._game_viewer.selection_position
if pos is None:
self._game_viewer.set_pointer_position(None)
else:
obj = self._game.game_map[pos].maybe_object
if obj and isinstance(obj, Unit):
self._pointed_unit_id = obj.ID
self._game_viewer.set_pointer_position(pos)
else:
self._pointed_unit_id = None
self._game_viewer.set_pointer_position(None)
self._update_pointer_2()
def _field_double_clicked_callback(self, event):
if self._game is None:
return
pos = self._game_viewer._pointer_position
if pos is None:
return
unit = self._game.game_map[pos].maybe_object
assert unit is not None
# because self._game_viewer._pointer_position is set to sth other than
# None only if there is an object on pointed field
def ok_callback(program):
if unit.ID in self._game_session.units_by_IDs:
self._game_session.set_program(unit, program)
window = UnitInfoWindow(self,
program=unit.program,
maybe_compilation_status=unit.maybe_last_compilation_status,
maybe_run_status=unit.maybe_run_status,
ok_callback=ok_callback)
def _command_ordered_callback(self, event):
command = self._command_for_pointed_unit()
clicked_pos = self._game_viewer._pointer_position
if clicked_pos is None:
return
clicked_obj = self._game.game_map[clicked_pos].maybe_object
pointed_pos = self._game_viewer.selection_position
if clicked_obj is None or not isinstance(clicked_obj, Unit):
return
unit = clicked_obj
if command == '':
self._game_session.set_program(unit, Program(Language.OUTPUT, ''))
return
command = {
'move' : 'MOVE %(x)d %(y)d',
'attack' : 'ATTACK %(x)d %(y)d',
'gather' : 'GATHER %(x)d %(y)d',
}[command]
command = command % {'x':pointed_pos[0], 'y':pointed_pos[1]}
self._game_session.set_program(unit, Program(Language.OUTPUT, command))
self._pointed_unit_id = None
self._game_viewer.set_pointer_2_position(None)
self._game_viewer.set_pointer_position(None)
def _update_pointer_2(self):
command = self._command_for_pointed_unit()
if command == '':
self._game_viewer.set_pointer_2_position(None)
return
pos = self._game_viewer.selection_position
self._game_viewer.set_pointer_2_position(pos)
color = {
'attack' : 'red',
'gather' : 'darkblue',
'move' : 'green',
}[command]
self._game_viewer.set_pointer_2_color(color)
def _command_for_pointed_unit(self):
""" Or '' if there is no pointed unit """
pos = self._game_viewer._pointer_position # selected (clicked) field
if pos is None:
return ''
clicked_unit = self._game.game_map[pos].maybe_object
if not clicked_unit or not isinstance(clicked_unit, Unit):
return ''
pointed_position = self._game_viewer.selection_position # field under cursor
if pointed_position is None:
return ''
selected_obj = self._game.game_map[pointed_position].maybe_object
if (clicked_unit.type.can_attack and
clicked_unit.type.movable):
selected_own_unit = (selected_obj is not None and
isinstance(selected_obj, Unit) and
selected_obj.player == clicked_unit.player)
if selected_own_unit:
return 'move'
else:
return 'attack'
elif (clicked_unit.type.has_storage and
clicked_unit.type.movable and
selected_obj is not None and
isinstance(selected_obj, MineralDeposit)):
return 'gather'
elif (clicked_unit.type.movable
and pos != pointed_position):
return 'move'
return ''
# other methods -------------------------------------------------------
def _tic(self):
try:
self._game_viewer.show_loading_indicator(True)
self._game_session.tic(self._queue)
except AlreadyExecuteGame as ex:
log('already execute game')
@log_on_enter('set game session')
def set_game_session(self, game_session):
self._game_session = game_session
self._set_game(None)
self._tic_in_loop.set(False)
self._queue = Queue()
if game_session:
self._set_game(game_session.game)
def _set_game(self, game):
""" Call it if game instance was changed and you want to make
the application up to date."""
# set game.free_colors
if game is not None and not hasattr(game, 'free_colors'):
if self._game is None or not hasattr(self._game, 'free_colors'):
game.free_colors = \
list(ClientApplication.DEFAULT_PLAYER_COLORS)
else:
game.free_colors = self._game.free_colors
# track pointed unit
self._game_viewer.set_pointer_position(None)
if game and self._pointed_unit_id:
unit = game.units_by_IDs.get(self._pointed_unit_id, None)
if unit:
self._game_viewer.set_pointer_position(unit.position)
# other stuff
self._game = game
self._game_viewer.set_game(game)
self._refresh_game_menu_items_state()
def _reserve_color(self):
if self._game.free_colors:
return self._game.free_colors.pop()
else:
rand = lambda: random.randint(0, 255)
return (rand(), rand(), rand())
def _print_info_about_field_at(self, position):
field = self._game.game_map[position]
print "\nSelected position: (%d, %d)" % position
print "Field: %s" % str(field)
if isinstance(field.maybe_object, Unit):
unit = field.maybe_object
print "Unit: %s" % (unit,)
print "Compilation: %s" % (unit.maybe_last_compilation_status,)
print "Executing: %s" % (unit.maybe_run_status,)
def _refresh_game_menu_items_state(self):
has_game = self._game is not None
obj = (self._game.game_map[self._game_viewer._pointer_position].maybe_object
if has_game and self._game_viewer._pointer_position is not None
else None)
has_unit = (self._game is not None and
self._game_viewer._pointer_position is not None and
isinstance(obj, Unit))
state = NORMAL if has_game else DISABLED
entries = [ClientApplication.ADD_PLAYER_LABEL,
ClientApplication.SAVE_GAME_LABEL,
ClientApplication.TIC_LABEL,
ClientApplication.TIC_IN_LOOP_LABEL]
for entry in entries:
self._game_menu.entryconfigure(entry, state=state)
state = NORMAL if has_unit else DISABLED
entries = [ClientApplication.SET_PROGRAM_LABEL,
ClientApplication.SET_STAR_PROGRAM_LABEL,
ClientApplication.DELETE_PROGRAM_LABEL]
for entry in entries:
self._game_menu.entryconfigure(entry, state=state)
def _warning(self, title, text):
tkMessageBox.showwarning(title, text, parent=self)
def _ask_if_delete_current_game_if_exists(self):
if self._game:
return tkMessageBox.askyesno(ClientApplication.TITLE_ARE_YOU_SURE,
ClientApplication.WARNING_CURRENT_GAME_WILL_BE_LOST,
icon=tkMessageBox.WARNING,
parent=self
)
else:
return True
def _ask_if_quit_program(self):
return tkMessageBox.askyesno(
ClientApplication.TITLE_QUIT_PROGRAM,
ClientApplication.QUIT_PROGRAM_QUESTION,
icon=tkMessageBox.WARNING,
parent=self
)
def run_with_profiling():
# profile run function
filename = '.stats'
import cProfile
cProfile.run('run()', filename)
import pstats
p = pstats.Stats(filename)
p.strip_dirs()
p.sort_stats('cumulative')
p.dump_stats(filename)
p.print_stats(25)
def run():
# prevent "errors occured" message box in py2exe distribution
turn_off_standard_streams_if_it_is_py2exe_distribution()
# run it!
global root, app
init_logging('debug')
try:
root = Tk()
root.report_callback_exception = log_error_callback
app = ClientApplication(master=root)
app.mainloop()
except Exception as ex:
log_exception('Unhandled exception outside tkinter!')
finally:
shutdown_logging()
if __name__ == "__main__":
run() # replace with run_with_profiling to enable profiling
| gpl-3.0 | -163,926,196,609,843,170 | 37.956345 | 92 | 0.559653 | false |
mikedh/trimesh | trimesh/path/exchange/load.py | 1 | 2622 | import os
from .dxf import _dxf_loaders
from .svg_io import svg_to_path
from ..path import Path
from . import misc
from ... import util
def load_path(file_obj, file_type=None, **kwargs):
"""
Load a file to a Path file_object.
Parameters
-----------
file_obj : One of the following:
- Path, Path2D, or Path3D file_objects
- open file file_object (dxf or svg)
- file name (dxf or svg)
- shapely.geometry.Polygon
- shapely.geometry.MultiLineString
- dict with kwargs for Path constructor
- (n,2,(2|3)) float, line segments
file_type : str
Type of file is required if file
file_object passed.
Returns
---------
path : Path, Path2D, Path3D file_object
Data as a native trimesh Path file_object
"""
if isinstance(file_obj, Path):
# we have been passed a Path file_object so
# do nothing and return the passed file_object
return file_obj
elif util.is_file(file_obj):
# for open file file_objects use loaders
kwargs.update(path_loaders[file_type](
file_obj, file_type=file_type))
elif util.is_string(file_obj):
# strings passed are evaluated as file file_objects
with open(file_obj, 'rb') as file_file_obj:
# get the file type from the extension
file_type = os.path.splitext(file_obj)[-1][1:].lower()
# call the loader
kwargs.update(path_loaders[file_type](
file_file_obj, file_type=file_type))
elif util.is_instance_named(file_obj, 'Polygon'):
# convert from shapely polygons to Path2D
kwargs.update(misc.polygon_to_path(file_obj))
elif util.is_instance_named(file_obj, 'MultiLineString'):
# convert from shapely LineStrings to Path2D
kwargs.update(misc.linestrings_to_path(file_obj))
elif isinstance(file_obj, dict):
# load as kwargs
from ...exchange.load import load_kwargs
return load_kwargs(file_obj)
elif util.is_sequence(file_obj):
# load as lines in space
kwargs.update(misc.lines_to_path(file_obj))
else:
raise ValueError('Not a supported object type!')
from ...exchange.load import load_kwargs
return load_kwargs(kwargs)
def path_formats():
"""
Get a list of supported path formats.
Returns
------------
loaders : list of str
Extensions of loadable formats, ie:
['svg', 'dxf']
"""
return list(path_loaders.keys())
path_loaders = {'svg': svg_to_path}
path_loaders.update(_dxf_loaders)
| mit | 7,173,151,525,337,611,000 | 29.847059 | 66 | 0.615179 | false |
jakdot/pyactr | tutorials/u7_simplecompilation.py | 1 | 1254 | """
Testing a simple case of production compilation. The compilation also allows for utility learning, shown in the model below, as well.
"""
import warnings
import pyactr as actr
class Compilation1(object):
"""
Model testing compilation -- basic cases.
"""
def __init__(self, **kwargs):
actr.chunktype("state", "starting ending")
self.m = actr.ACTRModel(**kwargs)
self.m.goal.add(actr.makechunk(nameofchunk="start", typename="state", starting=1))
self.m.productionstring(name="one", string="""
=g>
isa state
starting =x
ending ~=x
==>
=g>
isa state
ending =x""", utility=2)
self.m.productionstring(name="two", string="""
=g>
isa state
starting =x
ending =x
==>
=g>
isa state
starting =x
ending 4""")
if __name__ == "__main__":
warnings.simplefilter("ignore")
mm = Compilation1(production_compilation=True, utility_learning=True)
model = mm.m
sim = model.simulation(realtime=True)
sim.run(0.5)
print(model.productions["one and two"])
| gpl-3.0 | 8,944,572,004,527,267,000 | 24.591837 | 133 | 0.53429 | false |
duyet-website/api.duyet.net | lib/docs/conf.py | 1 | 8187 | # -*- coding: utf-8 -*-
#
# Faker documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 11 11:25:48 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'faker.build_docs',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Faker'
copyright = u'2014, Daniele Faraglia'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7.5'
# The full version, including alpha/beta/rc tags.
release = '0.7.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Fakerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Faker.tex', u'Faker Documentation',
u'Daniele Faraglia', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'faker', u'Faker Documentation',
[u'Daniele Faraglia'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Faker', u'Faker Documentation',
u'Daniele Faraglia', 'Faker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | 5,187,688,238,013,529,000 | 30.367816 | 79 | 0.706119 | false |
coyotevz/nobix-app | nbs/models/misc.py | 1 | 3865 | # -*- coding: utf-8 -*-
from datetime import datetime
from sqlalchemy.ext.declarative import declared_attr
from nbs.models import db
class TimestampMixin(object):
created = db.Column(db.DateTime, default=datetime.now)
modified = db.Column(db.DateTime, default=datetime.now,
onupdate=datetime.now)
@staticmethod
def stamp_modified(mapper, connection, target):
if db.object_session(target).is_modified(target):
target.modified = datetime.now()
@classmethod
def __declare_last__(cls):
db.event.listen(cls, 'before_update', cls.stamp_modified)
class RefEntityMixin(object):
@declared_attr
def entity_id(cls):
return db.Column('entity_id', db.Integer, db.ForeignKey('entity.id'),
nullable=False)
@declared_attr
def entity(cls):
name = cls.__name__.lower()
return db.relationship('Entity',
backref=db.backref(name, lazy='joined'),
lazy='joined')
class Address(RefEntityMixin, db.Model):
"""Stores addresses information"""
__tablename__ = 'address'
id = db.Column(db.Integer, primary_key=True)
address_type = db.Column(db.Unicode)
street = db.Column(db.Unicode(128), nullable=False)
city = db.Column(db.Unicode(64))
province = db.Column(db.Unicode(32), nullable=False)
postal_code = db.Column(db.Unicode(32))
def __str__(eslf):
retval = self.street
if self.city:
retval += ", {}".format(self.city)
retval += ", {}".format(self.province)
if self.postal_code:
retval += " ({})".format(self.postal_code)
return retval
def __repr__(self):
return "<Address '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
class Phone(RefEntityMixin, db.Model):
"""Model to store phone information"""
__tablename__ = 'phone'
id = db.Column(db.Integer, primary_key=True)
phone_type = db.Column(db.Unicode)
prefix = db.Column(db.Unicode(8))
number = db.Column(db.Unicode, nullable=False)
extension = db.Column(db.Unicode(5))
def __str__(self):
retval = self.phone_type+': ' if self.phone_type else ''
if self.prefix:
retval += "({})".format(self.prefix)
retval += self.number
if self.extension:
retval += " ext: {}".format(self.extension)
return retval
def __repr__(self):
return "<Phone '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
class Email(RefEntityMixin, db.Model):
"""Model to store email information"""
__tablename__ = 'email'
id = db.Column(db.Integer, primary_key=True)
email_type = db.Column(db.Unicode(50))
email = db.Column(db.Unicode(50), nullable=False)
def __str__(self):
retval = self.email_type + ': ' if self.email_type else ''
retval += self.email
return retval
def __repr__(self):
return "<Email '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
class ExtraField(RefEntityMixin, db.Model):
"""Model to store information of additional data"""
__tablename__ = 'extra_field'
id = db.Column(db.Integer, primary_key=True)
field_name = db.Column(db.Unicode(50), nullable=False)
field_value = db.Column(db.Unicode(50), nullable=False)
def __str__(self):
return self.field_name + ': ' + self.field_value
def __repr__(self):
return "<ExtraField '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
| mit | 6,016,156,342,892,989,000 | 28.280303 | 77 | 0.574386 | false |
pablodiguerero/asterisk.api | migrations/versions/4_add_physical_users_.py | 1 | 1968 | """empty message
Revision ID: a374e36d0888
Revises: 4a6559da7594
Create Date: 2017-05-21 22:53:53.490856
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm import Session
from models import physical
from models import user
# revision identifiers, used by Alembic.
revision = '4_add_physical_users'
down_revision = '3_modify_user_fields'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('crm_physical',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fam', sa.String(length=255), nullable=True),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('otch', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('crm_users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('login', sa.String(length=255), nullable=False),
sa.Column('password', sa.LargeBinary(), nullable=False),
sa.Column('access_level', sa.Integer(), server_default='10', nullable=False),
sa.Column('is_active', sa.Boolean(), server_default='f', nullable=False),
sa.ForeignKeyConstraint(['id'], ['crm_physical.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('login')
)
op.add_column('crm_users', sa.Column('sip_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'crm_users', 'asterisk_sip_users', ['sip_id'], ['id'], ondelete='SET NULL')
# ### end Alembic commands ###
bind = op.get_bind()
session = Session(bind)
phys = physical.Physical("Администратор")
phys.user = user.User("admin", "admin")
phys.user.access_level = user.User.BOSS
session.add(phys)
session.commit()
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('crm_users')
op.drop_table('crm_physical')
# ### end Alembic commands ###
| mit | 1,240,968,507,135,020,300 | 31.583333 | 107 | 0.670588 | false |
wesm/ibis | dev/merge-pr.py | 1 | 8184 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to
# Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# Lightly modified from version of this script in incubator-parquet-format
from __future__ import print_function
from requests.auth import HTTPBasicAuth
import requests
import os
import subprocess
import sys
import textwrap
from six.moves import input
import six
IBIS_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
PROJECT_NAME = 'ibis'
print("IBIS_HOME = " + IBIS_HOME)
# Remote name with the PR
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "upstream")
# Remote name where results pushed
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "upstream")
GITHUB_BASE = "https://github.com/cloudera/" + PROJECT_NAME + "/pull"
GITHUB_API_BASE = "https://api.github.com/repos/cloudera/" + PROJECT_NAME
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
os.chdir(IBIS_HOME)
auth_required = False
if auth_required:
GITHUB_USERNAME = os.environ['GITHUB_USER']
import getpass
GITHUB_PASSWORD = getpass.getpass('Enter github.com password for %s:'
% GITHUB_USERNAME)
def get_json_auth(url):
auth = HTTPBasicAuth(GITHUB_USERNAME, GITHUB_PASSWORD)
req = requests.get(url, auth=auth)
return req.json()
get_json = get_json_auth
else:
def get_json_no_auth(url):
req = requests.get(url)
return req.json()
get_json = get_json_no_auth
def fail(msg):
print(msg)
clean_up()
sys.exit(-1)
def run_cmd(cmd):
if isinstance(cmd, six.string_types):
cmd = cmd.split(' ')
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
print('Command failed: %s' % cmd)
print('With output:')
print('--------------')
print(e.output)
print('--------------')
raise e
if isinstance(output, six.binary_type):
output = output.decode('utf-8')
return output
def continue_maybe(prompt):
result = input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
original_head = run_cmd("git rev-parse HEAD")[:8]
def clean_up():
print("Restoring head pointer to %s" % original_head)
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print("Deleting local branch %s" % branch)
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num,
target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num,
pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref,
target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = ("Error merging: %s\nWould you like to "
"manually fix-up this merge?" % e)
continue_maybe(msg)
msg = ("Okay, please fix any conflicts and 'git add' "
"conflicting files... Finished?")
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x),
reverse=True)
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
merge_message_flags += ["-m", '\n'.join(textwrap.wrap(body))]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = ("This patch had conflicts when merged, "
"resolved by\nCommitter: %s <%s>" %
(committer_name, committer_email))
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close
# the PR
merge_message_flags += [
"-m",
"Closes #%s from %s and squashes the following commits:"
% (pr_num, pr_repo_desc)]
for c in commits:
merge_message_flags += ["-m", c]
run_cmd(['git', 'commit',
'--no-verify', # do not run commit hooks
'--author="%s"' % primary_author] +
merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name,
target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
branches = get_json("%s/branches" % GITHUB_API_BASE)
branch_names = filter(lambda x: x.startswith("branch-"),
[x['name'] for x in branches])
pr_num = input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["merged"] is True:
print("Pull request {0} has already been merged, assuming "
"you want to backport".format(pr_num))
merge_commit_desc = run_cmd([
'git', 'log', '--merges', '--first-parent',
'--grep=pull request #%s' % pr_num, '--oneline']).split("\n")[0]
if merge_commit_desc == "":
fail("Couldn't find any merge commit for #{0}"
", you may need to update HEAD.".format(pr_num))
merge_hash = merge_commit_desc[:7]
message = merge_commit_desc[8:]
print("Found: %s" % message)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = ("Pull request {0} is not mergeable in its current form.\n"
"Continue? (experts only!)".format(pr_num))
continue_maybe(msg)
print("\n=== Pull Request #%s ===" % pr_num)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref)
| apache-2.0 | -6,758,902,495,747,892,000 | 31.86747 | 78 | 0.601417 | false |
vinoth3v/In | In/core/valuator.py | 1 | 7873 | import re
from In.core.object_meta import ObjectMetaBase
class ValuatorContainer(dict):
def __missing__(self, key):
vcls = IN.register.get_class(key, 'Valuator')
obj = vcls()
self[key] = obj
return obj
class ValuatorEngine:
'''Valuator class that valuate values based on validation rules.
Instance available as IN.valuator
'''
# dict of all Valuator instances
valuators = ValuatorContainer()
def validate(self, value, rule): # rule is ['type', args] or [[], [], []]
'''
#TODO: allow per false error message
rule = [
'And', [
['Length', '>', 6, 'The value length should be greater than 6.'],
['Not', [['Num']],
['Or', [
['Email', 'Invalid email address.'],
['Domain'],
['Url', 'Invalid Url.'],
]],
]],
]
'''
if not rule: # empty list
return [True]
try:
firstitem = rule[0]
item_type = type(firstitem)
if item_type is str: # ['type', args]
args = rule[1:]
result = self.valuators[firstitem].validate(value, *args)
if not result[0]:
#return [False, args[-1]] # last item is error message
return result
elif item_type is list: # [[], [], []]
for subrule in rule:
result = self.validate(value, subrule) # recursive
if not result[0]:
return result
except Exception as e:
IN.logger.debug()
return [False, str(e)]
return [True]
def __getattr__(self, key):
self.key = self.valuators[key]
return self.key
class ValuatorMeta(ObjectMetaBase):
__class_type_base_name__ = 'ValuatorBase'
__class_type_name__ = 'Valuator'
class ValuatorBase(dict, metaclass = ValuatorMeta):
'''Base class of all IN ValuatorBase.
'''
__allowed_children__ = None
__default_child__ = None
ops = {
'=' : lambda l, al, ml: l == al,
'==' : lambda l, al, ml: l == al,
'!=' : lambda l, al, ml: l != al,
'>' : lambda l, al, ml: l > al,
'<' : lambda l, al, ml: l < al,
'>=' : lambda l, al, ml: l >= al,
'<=' : lambda l, al, ml: l <= al,
'<>' : lambda l, al, ml: al < l > ml,
'><' : lambda l, al, ml: al > l < ml,
}
def validate(self, value):
'''return value should be a list like [False, 'Error message.'] or [True]
'''
return [True]
@IN.register('Valuator', type = 'Valuator')
class Valuator(ValuatorBase):
'''Base class of all IN ValuatorBase.
'''
pass
class And(Valuator):
pass
class Or(Valuator):
pass
class Not(Valuator):
def validate(self, value, rule, message = ''):
'''not validator'''
result = IN.valuator.validate(value, rule[0])
not_result = not result[0]
return [not_result, message]
class Empty(Valuator):
def validate(self, value, message = ''):
# returning value itself makes it evaluates again
return [False, message] if value else [True]
class NotEmpty(Valuator):
def validate(self, value, message = ''):
# returning value itself makes it evaluates again
return [False, message] if not value else [True]
class Length(Valuator):
def validate(self, value, length = 0, op = '=', mlength = 0, message = ''):
try:
# does multiple ifs are good?
result = self.ops[op](len(value), length, mlength)
result = [result or False, message]
return result
except KeyError:
IN.logger.debug()
return [False, message] # always false
class Equal(Valuator):
def validate(self, value, tvalue, op = '=', mvalue = 0, message = ''):
try:
# does multiple ifs are good?
result = self.ops[op](value, tvalue, mvalue)
result = [result or False, message]
return result
except KeyError:
IN.logger.debug()
return [False, message] # always false
class Regx(Valuator):
'''Valuator rule class that using regex'''
re_compiled = {} # we dont want to compile again
def get_regx(self, regx):
try:
return self.re_compiled[regx]
except KeyError:
self.re_compiled[regx] = re.compile(regx)
return self.re_compiled[regx]
def validate(self, value, regx, message = ''):
result = self.get_regx(regx).match(value)
return [result, message]
class Domain(Regx):
regex_host = r'(?:(?:[a-zA-Z0-9][a-zA-Z0-9\-]*)?[a-zA-Z0-9])'
def validate(self, domain, message = ''):
false_message = [False, message]
dlen = len(domain)
if dlen < 4 or dlen > 255 or domain.endswith('.') or '.' not in domain:
return false_message
try:
domain = domain.encode('idna').decode('ascii')
except Exception:
return false_message
try:
domain.encode('ascii').decode('idna')
except Exception:
return false_message
reg = self.regex_host + r'(?:\.' + self.regex_host + r')*'
m = re.match(reg + "$", domain)
if not m:
return false_message
return [True]
class Email(Regx):
regex = re.compile(r'^[A-Za-z0-9\.\+_-]')
atext = r'a-zA-Z0-9_\.\-' # !#\$%&\'\*\+/=\?\^`\{\|\}~
atext_utf8 = atext + r"\u0080-\U0010FFFF"
regex_local = re.compile(''.join(('[', atext, ']+(?:\\.[', atext, ']+)*$')))
regex_local_utf8 = re.compile(''.join(('[', atext_utf8, ']+(?:\\.[', atext_utf8, ']+)*$')))
def validate(self, value, message = ''):
parts = value.split('@')
if len(parts) != 2:
return [False, message]
local = self.validate_local(parts[0])
if not local:
return [False, message]
# check domain part
domain_result = IN.valuator.validate(parts[1], ['Domain', message])
if not domain_result[0]:
return domain_result
return [True] # valid
def validate_local(self, local):
# check nabar name part
if not local or len(local) > 64 or '..' in local:
return False
m = re.match(self.regex_local, local) # ASCII
if m: # True
return True
else:
# unicode
m = re.match(self.regex_local_utf8, local)
if m:
return True
else:
return False
class Url(Regx):
def validate(self, value, message = ''):
return True
class Alpha(Valuator):
def validate(self, value, message = ''):
return [str.isalpha(value), message]
class AlphaNum(Valuator):
def validate(self, value, message = ''):
return [str.isalnum(value), message]
class Digit(Valuator):
def validate(self, value, message = ''):
return [str.isdigit(value), message]
class Decimal(Valuator):
def validate(self, value, message = ''):
return [str.isdecimal(value), message]
class Lower(Valuator):
def validate(self, value, message = ''):
return [str.islower(value), message]
class Upper(Valuator):
def validate(self, value, message = ''):
return [str.isupper(value), message]
class Numeric(Valuator):
def validate(self, value, message = ''):
return [str.isnumeric(value), message]
class Space(Valuator):
'''Is value has only non printable chars'''
def validate(self, value, message = ''):
return [str.isspace(value), message]
class Startswith(Valuator):
def validate(self, value, start, message = ''):
return [str(value).startswith(start), message]
class Endswith(Valuator):
def validate(self, value, start, message = ''):
return [str(value).endswith(start), message]
class In(Valuator):
def validate(self, value, itr, message = ''):
return [value in itr, message]
class INPath(Valuator):
'''Check whether this string is a valid IN route.'''
def validate(self, value, message = ''):
return True
class NabarRole(Valuator):
'''Check whether nabar has this role.'''
def validate(self, value, message = ''):
return True
class NabarAccess(Valuator):
'''Check whether nabar has this access permissions.'''
def validate(self, value):
return True
class Callback(Valuator):
'''call the Callback to valuate.'''
def validate(self, value, message = ''):
return True
#@IN.hook
#def __In_app_init__(app):
### set the valuator
#IN.valuator = ValuatorEngine()
| apache-2.0 | 3,151,906,794,283,461,600 | 23.396774 | 92 | 0.61133 | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/heatmapgl/legendgrouptitle/_font.py | 1 | 8487 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "heatmapgl.legendgrouptitle"
_path_str = "heatmapgl.legendgrouptitle.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.heatmapgl.lege
ndgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmapgl.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmapgl.legendgrouptitle.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | 5,632,017,995,005,265,000 | 36.387665 | 82 | 0.559797 | false |
danhooper/sandbox | pinball/attract.py | 1 | 2386 | from procgame import *
class Mode(game.Mode):
def __init__(self, game):
super(Mode, self).__init__(game, 1)
highscore_categories = []
cat = highscore.HighScoreCategory()
cat.game_data_key = "HighScores"
cat.titles = [
"Grand Champion",
"High Score 1",
"High Score 2",
"High Score 3",
"High Score 4"
]
highscore_categories.append(cat)
for category in highscore_categories:
category.load_from_game(game)
frame_proc = dmd.Animation().load('dmd/P-ROC.dmd').frames[0]
layer_proc = dmd.FrameLayer(opaque=True, frame=frame_proc)
layer_th = dmd.TextLayer(128/2, 7, game.font_jazz, "center",
opaque=True).set_text("Town Hall")
layer_presents = dmd.TextLayer(128/2, 7, game.font_jazz, "center",
opaque=True).set_text("Presents")
layer_name = dmd.TextLayer(128/2, 7, game.font_jazz, "center",
opaque=True).set_text("TBD")
layer_high_scores = []
for frame in highscore.generate_highscore_frames(highscore_categories):
layer_high_scores.append(dmd.FrameLayer(opaque=True, frame=frame))
self.layer = dmd.ScriptedLayer(128, 32, [
{ "layer": None, "seconds": 10.0 },
{ "layer": layer_proc, "seconds": 3.0 },
{ "layer": layer_th, "seconds": 3.0 },
{ "layer": layer_presents, "seconds": 3.0 },
{ "layer": layer_name, "seconds": 3.0 },
{ "layer": layer_high_scores[0], "seconds": 3.0 },
{ "layer": layer_high_scores[1], "seconds": 3.0 },
{ "layer": layer_high_scores[2], "seconds": 3.0 },
{ "layer": layer_high_scores[3], "seconds": 3.0 },
{ "layer": layer_high_scores[4], "seconds": 3.0 },
])
def mode_stopped(self):
self.layer.script_index = 0
self.frame_start_time = None
self.is_new_script_item = True
def sw_enter_active(self, sw):
self.game.modes.add(self.game.service_mode)
return True
def sw_exit_active(self, sw):
return True
def sw_startButton_active(self, sw):
self.game.modes.remove(self)
self.game.modes.add(self.game.mode.base)
return True
| mit | -5,484,256,992,289,313,000 | 38.766667 | 79 | 0.536463 | false |
userzimmermann/robotframework-python3 | src/robot/utils/text.py | 1 | 3235 | # Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .charwidth import get_char_width
from .misc import seq2str2
from .unic import unic
_MAX_ASSIGN_LENGTH = 200
_MAX_ERROR_LINES = 40
_MAX_ERROR_LINE_LENGTH = 78
_ERROR_CUT_EXPLN = ' [ Message content over the limit has been removed. ]'
def cut_long_message(msg):
lines = msg.splitlines()
lengths = _count_line_lengths(lines)
if sum(lengths) <= _MAX_ERROR_LINES:
return msg
start = _prune_excess_lines(lines, lengths)
end = _prune_excess_lines(lines, lengths, from_end=True)
return '\n'.join(start + [_ERROR_CUT_EXPLN] + end)
def _prune_excess_lines(lines, lengths, from_end=False):
if from_end:
lines.reverse()
lengths.reverse()
ret = []
total = 0
# Use // (explicit int div) for Python 3 compatibility:
limit = _MAX_ERROR_LINES//2
for line, length in zip(lines[:limit], lengths[:limit]):
if total + length >= limit:
ret.append(_cut_long_line(line, total, from_end))
break
total += length
ret.append(line)
if from_end:
ret.reverse()
return ret
def _cut_long_line(line, used, from_end):
# Use // (explicit int div) for Python 3 compatibility:
available_lines = _MAX_ERROR_LINES//2 - used
available_chars = available_lines * _MAX_ERROR_LINE_LENGTH - 3
if len(line) > available_chars:
if not from_end:
line = line[:available_chars] + '...'
else:
line = '...' + line[-available_chars:]
return line
def _count_line_lengths(lines):
return [ _count_virtual_line_length(line) for line in lines ]
def _count_virtual_line_length(line):
if not line:
return 1
lines, remainder = divmod(len(line), _MAX_ERROR_LINE_LENGTH)
return lines if not remainder else lines + 1
def format_assign_message(variable, value, cut_long=True):
value = unic(value) if variable.startswith('$') else seq2str2(value)
if cut_long and len(value) > _MAX_ASSIGN_LENGTH:
value = value[:_MAX_ASSIGN_LENGTH] + '...'
return '%s = %s' % (variable, value)
def get_console_length(text):
return sum(get_char_width(char) for char in text)
def pad_console_length(text, width):
if width < 5:
width = 5
diff = get_console_length(text) - width
if diff > 0:
text = _lose_width(text, diff+3) + '...'
return _pad_width(text, width)
def _pad_width(text, width):
more = width - get_console_length(text)
return text + ' ' * more
def _lose_width(text, diff):
lost = 0
while lost < diff:
lost += get_console_length(text[-1])
text = text[:-1]
return text
| apache-2.0 | 7,472,979,966,690,661,000 | 30.715686 | 77 | 0.643586 | false |
chromium/chromium | build/util/lib/common/chrome_test_server_spawner.py | 6 | 16817 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
It's used to accept requests from the device to spawn and kill instances of the
chrome test server on the host.
"""
# pylint: disable=W0702
import json
import logging
import os
import select
import struct
import subprocess
import sys
import threading
import time
from six.moves import BaseHTTPServer, urllib
SERVER_TYPES = {
'http': '',
'ftp': '-f',
'ws': '--websocket',
}
_DIR_SOURCE_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir,
os.pardir))
_logger = logging.getLogger(__name__)
# Path that are needed to import necessary modules when launching a testserver.
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s'
% (os.path.join(_DIR_SOURCE_ROOT, 'third_party'),
os.path.join(_DIR_SOURCE_ROOT, 'third_party', 'tlslite'),
os.path.join(_DIR_SOURCE_ROOT, 'net', 'tools', 'testserver')))
# The timeout (in seconds) of starting up the Python test server.
_TEST_SERVER_STARTUP_TIMEOUT = 10
def _GetServerTypeCommandLine(server_type):
"""Returns the command-line by the given server type.
Args:
server_type: the server type to be used (e.g. 'http').
Returns:
A string containing the command-line argument.
"""
if server_type not in SERVER_TYPES:
raise NotImplementedError('Unknown server type: %s' % server_type)
return SERVER_TYPES[server_type]
class PortForwarder:
def Map(self, port_pairs):
pass
def GetDevicePortForHostPort(self, host_port):
"""Returns the device port that corresponds to a given host port."""
return host_port
def WaitHostPortAvailable(self, port):
"""Returns True if |port| is available."""
return True
def WaitPortNotAvailable(self, port):
"""Returns True if |port| is not available."""
return True
def WaitDevicePortReady(self, port):
"""Returns whether the provided port is used."""
return True
def Unmap(self, device_port):
"""Unmaps specified port"""
pass
class TestServerThread(threading.Thread):
"""A thread to run the test server in a separate process."""
def __init__(self, ready_event, arguments, port_forwarder):
"""Initialize TestServerThread with the following argument.
Args:
ready_event: event which will be set when the test server is ready.
arguments: dictionary of arguments to run the test server.
device: An instance of DeviceUtils.
tool: instance of runtime error detection tool.
"""
threading.Thread.__init__(self)
self.wait_event = threading.Event()
self.stop_event = threading.Event()
self.ready_event = ready_event
self.ready_event.clear()
self.arguments = arguments
self.port_forwarder = port_forwarder
self.test_server_process = None
self.is_ready = False
self.host_port = self.arguments['port']
self.host_ocsp_port = 0
assert isinstance(self.host_port, int)
# The forwarder device port now is dynamically allocated.
self.forwarder_device_port = 0
self.forwarder_ocsp_device_port = 0
# Anonymous pipe in order to get port info from test server.
self.pipe_in = None
self.pipe_out = None
self.process = None
self.command_line = []
def _WaitToStartAndGetPortFromTestServer(self):
"""Waits for the Python test server to start and gets the port it is using.
The port information is passed by the Python test server with a pipe given
by self.pipe_out. It is written as a result to |self.host_port|.
Returns:
Whether the port used by the test server was successfully fetched.
"""
assert self.host_port == 0 and self.pipe_out and self.pipe_in
(in_fds, _, _) = select.select([self.pipe_in, ], [], [],
_TEST_SERVER_STARTUP_TIMEOUT)
if len(in_fds) == 0:
_logger.error('Failed to wait to the Python test server to be started.')
return False
# First read the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the Python test server packs
# size as native byte order and all Chromium platforms so far are
# configured to use little-endian.
# TODO(jnd): Change the Python test server and local_test_server_*.cc to
# use a unified byte order (either big-endian or little-endian).
data_length = os.read(self.pipe_in, struct.calcsize('=L'))
if data_length:
(data_length,) = struct.unpack('=L', data_length)
assert data_length
if not data_length:
_logger.error('Failed to get length of server data.')
return False
server_data_json = os.read(self.pipe_in, data_length)
if not server_data_json:
_logger.error('Failed to get server data.')
return False
_logger.info('Got port json data: %s', server_data_json)
parsed_server_data = None
try:
parsed_server_data = json.loads(server_data_json)
except ValueError:
pass
if not isinstance(parsed_server_data, dict):
_logger.error('Failed to parse server_data: %s' % server_data_json)
return False
if not isinstance(parsed_server_data.get('port'), int):
_logger.error('Failed to get port information from the server data.')
return False
self.host_port = parsed_server_data['port']
self.host_ocsp_port = parsed_server_data.get('ocsp_port', 0)
return self.port_forwarder.WaitPortNotAvailable(self.host_port)
def _GenerateCommandLineArguments(self):
"""Generates the command line to run the test server.
Note that all options are processed by following the definitions in
testserver.py.
"""
if self.command_line:
return
args_copy = dict(self.arguments)
# Translate the server type.
type_cmd = _GetServerTypeCommandLine(args_copy.pop('server-type'))
if type_cmd:
self.command_line.append(type_cmd)
# Use a pipe to get the port given by the instance of Python test server
# if the test does not specify the port.
assert self.host_port == args_copy['port']
if self.host_port == 0:
(self.pipe_in, self.pipe_out) = os.pipe()
self.command_line.append('--startup-pipe=%d' % self.pipe_out)
# Pass the remaining arguments as-is.
for key, values in args_copy.iteritems():
if not isinstance(values, list):
values = [values]
for value in values:
if value is None:
self.command_line.append('--%s' % key)
else:
self.command_line.append('--%s=%s' % (key, value))
def _CloseUnnecessaryFDsForTestServerProcess(self):
# This is required to avoid subtle deadlocks that could be caused by the
# test server child process inheriting undesirable file descriptors such as
# file lock file descriptors.
for fd in xrange(0, 1024):
if fd != self.pipe_out:
try:
os.close(fd)
except:
pass
def run(self):
_logger.info('Start running the thread!')
self.wait_event.clear()
self._GenerateCommandLineArguments()
command = [sys.executable,
os.path.join(_DIR_SOURCE_ROOT, 'net', 'tools', 'testserver',
'testserver.py')] + self.command_line
_logger.info('Running: %s', command)
# Disable PYTHONUNBUFFERED because it has a bad interaction with the
# testserver. Remove once this interaction is fixed.
unbuf = os.environ.pop('PYTHONUNBUFFERED', None)
# Pass _DIR_SOURCE_ROOT as the child's working directory so that relative
# paths in the arguments are resolved correctly.
self.process = subprocess.Popen(
command, preexec_fn=self._CloseUnnecessaryFDsForTestServerProcess,
cwd=_DIR_SOURCE_ROOT)
if unbuf:
os.environ['PYTHONUNBUFFERED'] = unbuf
if self.process:
if self.pipe_out:
self.is_ready = self._WaitToStartAndGetPortFromTestServer()
else:
self.is_ready = self.port_forwarder.WaitPortNotAvailable(self.host_port)
if self.is_ready:
port_map = [(0, self.host_port)]
if self.host_ocsp_port:
port_map.extend([(0, self.host_ocsp_port)])
self.port_forwarder.Map(port_map)
self.forwarder_device_port = \
self.port_forwarder.GetDevicePortForHostPort(self.host_port)
if self.host_ocsp_port:
self.forwarder_ocsp_device_port = \
self.port_forwarder.GetDevicePortForHostPort(self.host_ocsp_port)
# Check whether the forwarder is ready on the device.
self.is_ready = self.forwarder_device_port and \
self.port_forwarder.WaitDevicePortReady(self.forwarder_device_port)
# Wake up the request handler thread.
self.ready_event.set()
# Keep thread running until Stop() gets called.
self.stop_event.wait()
if self.process.poll() is None:
self.process.kill()
# Wait for process to actually terminate.
# (crbug.com/946475)
self.process.wait()
self.port_forwarder.Unmap(self.forwarder_device_port)
self.process = None
self.is_ready = False
if self.pipe_out:
os.close(self.pipe_in)
os.close(self.pipe_out)
self.pipe_in = None
self.pipe_out = None
_logger.info('Test-server has died.')
self.wait_event.set()
def Stop(self):
"""Blocks until the loop has finished.
Note that this must be called in another thread.
"""
if not self.process:
return
self.stop_event.set()
self.wait_event.wait()
class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler used to process http GET/POST request."""
def _SendResponse(self, response_code, response_reason, additional_headers,
contents):
"""Generates a response sent to the client from the provided parameters.
Args:
response_code: number of the response status.
response_reason: string of reason description of the response.
additional_headers: dict of additional headers. Each key is the name of
the header, each value is the content of the header.
contents: string of the contents we want to send to client.
"""
self.send_response(response_code, response_reason)
self.send_header('Content-Type', 'text/html')
# Specify the content-length as without it the http(s) response will not
# be completed properly (and the browser keeps expecting data).
self.send_header('Content-Length', len(contents))
for header_name in additional_headers:
self.send_header(header_name, additional_headers[header_name])
self.end_headers()
self.wfile.write(contents)
self.wfile.flush()
def _StartTestServer(self):
"""Starts the test server thread."""
_logger.info('Handling request to spawn a test server.')
content_type = self.headers.getheader('content-type')
if content_type != 'application/json':
raise Exception('Bad content-type for start request.')
content_length = self.headers.getheader('content-length')
if not content_length:
content_length = 0
try:
content_length = int(content_length)
except:
raise Exception('Bad content-length for start request.')
_logger.info(content_length)
test_server_argument_json = self.rfile.read(content_length)
_logger.info(test_server_argument_json)
if len(self.server.test_servers) >= self.server.max_instances:
self._SendResponse(400, 'Invalid request', {},
'Too many test servers running')
return
ready_event = threading.Event()
new_server = TestServerThread(ready_event,
json.loads(test_server_argument_json),
self.server.port_forwarder)
new_server.setDaemon(True)
new_server.start()
ready_event.wait()
if new_server.is_ready:
response = {'port': new_server.forwarder_device_port,
'message': 'started'};
if new_server.forwarder_ocsp_device_port:
response['ocsp_port'] = new_server.forwarder_ocsp_device_port
self._SendResponse(200, 'OK', {}, json.dumps(response))
_logger.info('Test server is running on port %d forwarded to %d.' %
(new_server.forwarder_device_port, new_server.host_port))
port = new_server.forwarder_device_port
assert port not in self.server.test_servers
self.server.test_servers[port] = new_server
else:
new_server.Stop()
self._SendResponse(500, 'Test Server Error.', {}, '')
_logger.info('Encounter problem during starting a test server.')
def _KillTestServer(self, params):
"""Stops the test server instance."""
try:
port = int(params['port'][0])
except ValueError:
port = None
if port == None or port <= 0:
self._SendResponse(400, 'Invalid request.', {}, 'port must be specified')
return
if port not in self.server.test_servers:
self._SendResponse(400, 'Invalid request.', {},
"testserver isn't running on port %d" % port)
return
server = self.server.test_servers.pop(port)
_logger.info('Handling request to kill a test server on port: %d.', port)
server.Stop()
# Make sure the status of test server is correct before sending response.
if self.server.port_forwarder.WaitHostPortAvailable(port):
self._SendResponse(200, 'OK', {}, 'killed')
_logger.info('Test server on port %d is killed', port)
else:
# We expect the port to be free, but nothing stops the system from
# binding something else to that port, so don't throw error.
# (crbug.com/946475)
self._SendResponse(200, 'OK', {}, '')
_logger.warn('Port %s is not free after killing test server.' % port)
def log_message(self, format, *args):
# Suppress the default HTTP logging behavior if the logging level is higher
# than INFO.
if _logger.getEffectiveLevel() <= logging.INFO:
pass
def do_POST(self):
parsed_path = urllib.parse.urlparse(self.path)
action = parsed_path.path
_logger.info('Action for POST method is: %s.', action)
if action == '/start':
self._StartTestServer()
else:
self._SendResponse(400, 'Unknown request.', {}, '')
_logger.info('Encounter unknown request: %s.', action)
def do_GET(self):
parsed_path = urllib.parse.urlparse(self.path)
action = parsed_path.path
params = urllib.parse.parse_qs(parsed_path.query, keep_blank_values=1)
_logger.info('Action for GET method is: %s.', action)
for param in params:
_logger.info('%s=%s', param, params[param][0])
if action == '/kill':
self._KillTestServer(params)
elif action == '/ping':
# The ping handler is used to check whether the spawner server is ready
# to serve the requests. We don't need to test the status of the test
# server when handling ping request.
self._SendResponse(200, 'OK', {}, 'ready')
_logger.info('Handled ping request and sent response.')
else:
self._SendResponse(400, 'Unknown request', {}, '')
_logger.info('Encounter unknown request: %s.', action)
class SpawningServer(object):
"""The class used to start/stop a http server."""
def __init__(self, test_server_spawner_port, port_forwarder, max_instances):
self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
SpawningServerRequestHandler)
self.server_port = self.server.server_port
_logger.info('Started test server spawner on port: %d.', self.server_port)
self.server.port_forwarder = port_forwarder
self.server.test_servers = {}
self.server.max_instances = max_instances
def _Listen(self):
_logger.info('Starting test server spawner.')
self.server.serve_forever()
def Start(self):
"""Starts the test server spawner."""
listener_thread = threading.Thread(target=self._Listen)
listener_thread.setDaemon(True)
listener_thread.start()
def Stop(self):
"""Stops the test server spawner.
Also cleans the server state.
"""
self.CleanupState()
self.server.shutdown()
def CleanupState(self):
"""Cleans up the spawning server state.
This should be called if the test server spawner is reused,
to avoid sharing the test server instance.
"""
if self.server.test_servers:
_logger.warning('Not all test servers were stopped.')
for port in self.server.test_servers:
_logger.warning('Stopping test server on port %d' % port)
self.server.test_servers[port].Stop()
self.server.test_servers = {}
| bsd-3-clause | 3,188,136,761,027,053,600 | 34.553911 | 80 | 0.662068 | false |
onyxfish/votersdaily_web | api/couchdb/log_views.py | 1 | 2166 | import couchdb
from couchdb.design import ViewDefinition
"""
This module defines a collection of functions which accept a CouchDB database
as an argument, are named with a 'make_views_*' convention, and return a list
of generated CouchDB ViewDefinitions.
The 'syncviews' management command dynamically executes each method to compile
a list of all Couchdb views.
"""
def make_views_all_documents(event_db):
"""
Generate a view that includes all documents.
"""
all_view_map_function = \
'''
function(doc) {
emit(doc.access_datetime, doc)
}
'''
return [ViewDefinition('api', 'all', all_view_map_function)]
def make_views_error_documents(event_db):
"""
Generate a view that includes all documents.
"""
error_view_map_function = \
'''
function(doc) {
if (doc.result != "success") {
emit(doc.access_datetime, doc)
}
}
'''
return [ViewDefinition('api', 'errors', error_view_map_function)]
def get_parser_list(event_db):
"""
Return a list of unique parser names in the database.
"""
parser_list_map_function = \
'''
function(doc) {
emit(doc.parser_name, null);
}
'''
parser_list_reduce_function = \
'''
function(keys, values) {
return null;
}
'''
return [
e.key for e in event_db.query(
parser_list_map_function,
parser_list_reduce_function,
group=True)]
def make_views_parser_lists(event_db):
"""
Return a list of views, one for each parser, using templated view
functions.
"""
parser_names = get_parser_list(event_db)
parser_view_map_function = \
'''
function(doc) {
if (doc.parser_name == "%(parser_name)s") {
emit(doc.parser_name, doc)
}
}
'''
return [
ViewDefinition('api', name,
parser_view_map_function % { 'parser_name': name })
for name in parser_names] | gpl-3.0 | 4,641,316,289,698,557,000 | 23.908046 | 78 | 0.548476 | false |
pr-omethe-us/PyKED | pyked/chemked.py | 1 | 44185 | """
Main ChemKED module
"""
# Standard libraries
from os.path import exists
from collections import namedtuple
from warnings import warn
from copy import deepcopy
import xml.etree.ElementTree as etree
import xml.dom.minidom as minidom
from itertools import chain
import numpy as np
# Local imports
from .validation import schema, OurValidator, yaml, Q_
from .converters import datagroup_properties, ReSpecTh_to_ChemKED
VolumeHistory = namedtuple('VolumeHistory', ['time', 'volume'])
VolumeHistory.__doc__ = 'Time history of the volume in an RCM experiment. Deprecated, to be removed after PyKED 0.4' # noqa: E501
VolumeHistory.time.__doc__ = '(`~numpy.ndarray`): the time during the experiment'
VolumeHistory.volume.__doc__ = '(`~numpy.ndarray`): the volume during the experiment'
TimeHistory = namedtuple('TimeHistory', ['time', 'quantity', 'type'])
TimeHistory.__doc__ = 'Time history of the quantity in an RCM experiment'
TimeHistory.time.__doc__ = '(`~numpy.ndarray`): the time during the experiment'
TimeHistory.quantity.__doc__ = '(`~numpy.ndarray`): the quantity of interest during the experiment'
TimeHistory.type.__doc__ = """\
(`str`): the type of time history represented. Possible options are:
* volume
* temperature
* pressure
* piston position
* light emission
* OH emission
* absorption
"""
RCMData = namedtuple(
'RCMData',
['compressed_pressure', 'compressed_temperature', 'compression_time', 'stroke',
'clearance', 'compression_ratio']
)
RCMData.__doc__ = 'Data fields specific to rapid compression machine experiments'
RCMData.compressed_pressure.__doc__ = '(`~pint.Quantity`) The pressure at the end of compression'
RCMData.compressed_temperature.__doc__ = """\
(`~pint.Quantity`) The temperature at the end of compression"""
RCMData.compression_time.__doc__ = '(`~pint.Quantity`) The duration of the compression stroke'
RCMData.stroke.__doc__ = '(`~pint.Quantity`) The length of the stroke'
RCMData.clearance.__doc__ = """\
(`~pint.Quantity`) The clearance between piston face and end wall at the end of compression"""
RCMData.compression_ratio.__doc__ = '(`~pint.Quantity`) The volumetric compression ratio'
Reference = namedtuple('Reference',
['volume', 'journal', 'doi', 'authors', 'detail', 'year', 'pages'])
Reference.__doc__ = 'Information about the article or report where the data can be found'
Reference.volume.__doc__ = '(`str`) The journal volume'
Reference.journal.__doc__ = '(`str`) The name of the journal'
Reference.doi.__doc__ = '(`str`) The Digital Object Identifier of the article'
Reference.authors.__doc__ = '(`list`) The list of authors of the article'
Reference.detail.__doc__ = '(`str`) Detail about where the data can be found in the article'
Reference.year.__doc__ = '(`str`) The year the article was published'
Reference.pages.__doc__ = '(`str`) The pages in the journal where the article was published'
Apparatus = namedtuple('Apparatus', ['kind', 'institution', 'facility'])
Apparatus.__doc__ = 'Information about the experimental apparatus used to generate the data'
Apparatus.kind.__doc__ = '(`str`) The kind of experimental apparatus'
Apparatus.institution.__doc__ = '(`str`) The institution where the experiment is located'
Apparatus.facility.__doc__ = '(`str`) The particular experimental facility at the location'
Composition = namedtuple('Composition', 'species_name InChI SMILES atomic_composition amount')
Composition.__doc__ = 'Detail of the initial composition of the mixture for the experiment'
Composition.species_name.__doc__ = '(`str`) The name of the species'
Composition.InChI.__doc__ = '(`str`) The InChI identifier for the species'
Composition.SMILES.__doc__ = '(`str`) The SMILES identifier for the species'
Composition.atomic_composition.__doc__ = '(`dict`) The atomic composition of the species'
Composition.amount.__doc__ = '(`~pint.Quantity`) The amount of this species'
class ChemKED(object):
"""Main ChemKED class.
The ChemKED class stores information about the contents of a ChemKED database
file. It stores each datapoint associated with the database and provides access
the the reference information, versions, and file author.
Arguments:
yaml_file (`str`, optional): The filename of the YAML database in ChemKED format.
dict_input (`dict`, optional): A dictionary with the parsed ouput of YAML file in ChemKED
format.
skip_validation (`bool`, optional): Whether validation of the ChemKED should be done. Must
be supplied as a keyword-argument.
Attributes:
datapoints (`list`): List of `DataPoint` objects storing each datapoint in the database.
reference (`~collections.namedtuple`): Attributes include ``volume``, ``journal``, ``doi``,
``authors``, ``detail``, ``year``, and ``pages`` describing the reference from which the
datapoints are derived.
apparatus (`~collections.namedtuple`): Attributes include ``kind`` of experimental
apparatus, and the ``institution`` and ``facility`` where the experimental apparatus is
located.
chemked_version (`str`): Version of the ChemKED database schema used in this file.
experiment_type (`str`): Type of exeperimental data contained in this database.
file_author (`dict`): Information about the author of the ChemKED database file.
file_version (`str`): Version of the ChemKED database file.
_properties (`dict`): Original dictionary read from ChemKED database file, meant for
internal use.
"""
def __init__(self, yaml_file=None, dict_input=None, *, skip_validation=False):
if yaml_file is not None:
with open(yaml_file, 'r') as f:
self._properties = yaml.safe_load(f)
elif dict_input is not None:
self._properties = dict_input
else:
raise NameError("ChemKED needs either a YAML filename or dictionary as input.")
if not skip_validation:
self.validate_yaml(self._properties)
self.datapoints = []
for point in self._properties['datapoints']:
self.datapoints.append(DataPoint(point))
self.reference = Reference(
volume=self._properties['reference'].get('volume'),
journal=self._properties['reference'].get('journal'),
doi=self._properties['reference'].get('doi'),
authors=self._properties['reference'].get('authors'),
detail=self._properties['reference'].get('detail'),
year=self._properties['reference'].get('year'),
pages=self._properties['reference'].get('pages'),
)
self.apparatus = Apparatus(
kind=self._properties['apparatus'].get('kind'),
institution=self._properties['apparatus'].get('institution'),
facility=self._properties['apparatus'].get('facility'),
)
for prop in ['chemked-version', 'experiment-type', 'file-authors', 'file-version']:
setattr(self, prop.replace('-', '_'), self._properties[prop])
@classmethod
def from_respecth(cls, filename_xml, file_author='', file_author_orcid=''):
"""Construct a ChemKED instance directly from a ReSpecTh file.
Arguments:
filename_xml (`str`): Filename of the ReSpecTh-formatted XML file to be imported
file_author (`str`, optional): File author to be added to the list generated from the
XML file
file_author_orcid (`str`, optional): ORCID for the file author being added to the list
of file authors
Returns:
`ChemKED`: Instance of the `ChemKED` class containing the data in ``filename_xml``.
Examples:
>>> ck = ChemKED.from_respecth('respecth_file.xml')
>>> ck = ChemKED.from_respecth('respecth_file.xml', file_author='Bryan W. Weber')
>>> ck = ChemKED.from_respecth('respecth_file.xml', file_author='Bryan W. Weber',
file_author_orcid='0000-0000-0000-0000')
"""
properties = ReSpecTh_to_ChemKED(filename_xml, file_author, file_author_orcid,
validate=False)
return cls(dict_input=properties)
def validate_yaml(self, properties):
"""Validate the parsed YAML file for adherance to the ChemKED format.
Arguments:
properties (`dict`): Dictionary created from the parsed YAML file
Raises:
`ValueError`: If the YAML file cannot be validated, a `ValueError` is raised whose
string contains the errors that are present.
"""
validator = OurValidator(schema)
if not validator.validate(properties):
for key, value in validator.errors.items():
if any(['unallowed value' in v for v in value]):
print(('{key} has an illegal value. Allowed values are {values} and are case '
'sensitive.').format(key=key, values=schema[key]['allowed']))
raise ValueError(validator.errors)
def get_dataframe(self, output_columns=None):
"""Get a Pandas DataFrame of the datapoints in this instance.
Arguments:
output_columns (`list`, optional): List of strings specifying the columns to include
in the output DataFrame. The default is `None`, which outputs all of the
columns. Options include (not case sensitive):
* ``Temperature``
* ``Pressure``
* ``Ignition Delay``
* ``Composition``
* ``Equivalence Ratio``
* ``Reference``
* ``Apparatus``
* ``Experiment Type``
* ``File Author``
* ``File Version``
* ``ChemKED Version``
In addition, specific fields from the ``Reference`` and ``Apparatus`` attributes can
be included by specifying the name after a colon. These options are:
* ``Reference:Volume``
* ``Reference:Journal``
* ``Reference:DOI``
* ``Reference:Authors``
* ``Reference:Detail``
* ``Reference:Year``
* ``Reference:Pages``
* ``Apparatus:Kind``
* ``Apparatus:Facility``
* ``Apparatus:Institution``
Only the first author is printed when ``Reference`` or ``Reference:Authors`` is
selected because the whole author list may be quite long.
Note:
If the Composition is selected as an output type, the composition specified in the
`DataPoint` is used. No attempt is made to convert to a consistent basis; mole fractions
will remain mole fractions, mass fractions will remain mass fractions, and mole percent
will remain mole percent. Therefore, it is possible to end up with more than one type of
composition specification in a given column. However, if the composition is included
in the resulting dataframe, the type of each composition will be specified by the "Kind"
field in each row.
Examples:
>>> df = ChemKED(yaml_file).get_dataframe()
>>> df = ChemKED(yaml_file).get_dataframe(['Temperature', 'Ignition Delay'])
Returns:
`~pandas.DataFrame`: Contains the information regarding each point in the ``datapoints``
attribute
"""
import pandas as pd
valid_labels = [a.replace('_', ' ') for a in self.__dict__
if not (a.startswith('__') or a.startswith('_'))
]
valid_labels.remove('datapoints')
valid_labels.extend(
['composition', 'ignition delay', 'temperature', 'pressure', 'equivalence ratio']
)
ref_index = valid_labels.index('reference')
valid_labels[ref_index:ref_index + 1] = ['reference:' + a for a in Reference._fields]
app_index = valid_labels.index('apparatus')
valid_labels[app_index:app_index + 1] = ['apparatus:' + a for a in Apparatus._fields]
species_list = list(set(chain(*[list(d.composition.keys()) for d in self.datapoints])))
if output_columns is None or len(output_columns) == 0:
col_labels = valid_labels
comp_index = col_labels.index('composition')
col_labels[comp_index:comp_index + 1] = species_list + ['Composition:Kind']
else:
output_columns = [a.lower() for a in output_columns]
col_labels = []
for col in output_columns:
if col in valid_labels or col in ['reference', 'apparatus']:
col_labels.append(col)
else:
raise ValueError('{} is not a valid output column choice'.format(col))
if 'composition' in col_labels:
comp_index = col_labels.index('composition')
col_labels[comp_index:comp_index + 1] = species_list + ['Composition:Kind']
if 'reference' in col_labels:
ref_index = col_labels.index('reference')
col_labels[ref_index:ref_index + 1] = ['reference:' + a for a in Reference._fields]
if 'apparatus' in col_labels:
app_index = col_labels.index('apparatus')
col_labels[app_index:app_index + 1] = ['apparatus:' + a for a in Apparatus._fields]
data = []
for d in self.datapoints:
row = []
d_species = list(d.composition.keys())
for col in col_labels:
if col in species_list:
if col in d_species:
row.append(d.composition[col].amount)
else:
row.append(Q_(0.0, 'dimensionless'))
elif 'reference' in col or 'apparatus' in col:
split_col = col.split(':')
if split_col[1] == 'authors':
row.append(getattr(getattr(self, split_col[0]), split_col[1])[0]['name'])
else:
row.append(getattr(getattr(self, split_col[0]), split_col[1]))
elif col in ['temperature', 'pressure', 'ignition delay', 'equivalence ratio']:
row.append(getattr(d, col.replace(' ', '_')))
elif col == 'file authors':
row.append(getattr(self, col.replace(' ', '_'))[0]['name'])
elif col == 'Composition:Kind':
row.append(d.composition_type)
else:
row.append(getattr(self, col.replace(' ', '_')))
data.append(row)
col_labels = [a.title() for a in col_labels]
columns = pd.Index(col_labels)
return pd.DataFrame(data=data, columns=columns)
def write_file(self, filename, *, overwrite=False):
"""Write new ChemKED YAML file based on object.
Arguments:
filename (`str`): Filename for target YAML file
overwrite (`bool`, optional): Whether to overwrite file with given name if present.
Must be supplied as a keyword-argument.
Raises:
`NameError`: If ``filename`` is already present, and ``overwrite`` is not ``True``.
Example:
>>> dataset = ChemKED(yaml_file)
>>> dataset.write_file(new_yaml_file)
"""
# Ensure file isn't already present
if exists(filename) and not overwrite:
raise OSError(filename + ' already present. Specify "overwrite=True" '
'to overwrite, or rename.'
)
with open(filename, 'w') as yaml_file:
yaml.dump(self._properties, yaml_file)
def convert_to_ReSpecTh(self, filename):
"""Convert ChemKED record to ReSpecTh XML file.
This converter uses common information in a ChemKED file to generate a
ReSpecTh XML file. Note that some information may be lost, as ChemKED stores
some additional attributes.
Arguments:
filename (`str`): Filename for output ReSpecTh XML file.
Example:
>>> dataset = ChemKED(yaml_file)
>>> dataset.convert_to_ReSpecTh(xml_file)
"""
root = etree.Element('experiment')
file_author = etree.SubElement(root, 'fileAuthor')
file_author.text = self.file_authors[0]['name']
# right now ChemKED just uses an integer file version
file_version = etree.SubElement(root, 'fileVersion')
major_version = etree.SubElement(file_version, 'major')
major_version.text = str(self.file_version)
minor_version = etree.SubElement(file_version, 'minor')
minor_version.text = '0'
respecth_version = etree.SubElement(root, 'ReSpecThVersion')
major_version = etree.SubElement(respecth_version, 'major')
major_version.text = '1'
minor_version = etree.SubElement(respecth_version, 'minor')
minor_version.text = '0'
# Only ignition delay currently supported
exp = etree.SubElement(root, 'experimentType')
if self.experiment_type == 'ignition delay':
exp.text = 'Ignition delay measurement'
else:
raise NotImplementedError('Only ignition delay type supported for conversion.')
reference = etree.SubElement(root, 'bibliographyLink')
citation = ''
for author in self.reference.authors:
citation += author['name'] + ', '
citation += (self.reference.journal + ' (' + str(self.reference.year) + ') ' +
str(self.reference.volume) + ':' + self.reference.pages + '. ' +
self.reference.detail
)
reference.set('preferredKey', citation)
reference.set('doi', self.reference.doi)
apparatus = etree.SubElement(root, 'apparatus')
kind = etree.SubElement(apparatus, 'kind')
kind.text = self.apparatus.kind
common_properties = etree.SubElement(root, 'commonProperties')
# ChemKED objects have no common properties once loaded. Check for properties
# among datapoints that tend to be common
common = []
composition = self.datapoints[0].composition
# Composition type *has* to be the same
composition_type = self.datapoints[0].composition_type
if not all(dp.composition_type == composition_type for dp in self.datapoints):
raise NotImplementedError('Error: ReSpecTh does not support varying composition '
'type among datapoints.'
)
if all([composition == dp.composition for dp in self.datapoints]):
# initial composition is common
common.append('composition')
prop = etree.SubElement(common_properties, 'property')
prop.set('name', 'initial composition')
for species_name, species in composition.items():
component = etree.SubElement(prop, 'component')
species_link = etree.SubElement(component, 'speciesLink')
species_link.set('preferredKey', species_name)
if species.InChI is not None:
species_link.set('InChI', species.InChI)
amount = etree.SubElement(component, 'amount')
amount.set('units', composition_type)
amount.text = str(species.amount.magnitude)
# If multiple datapoints present, then find any common properties. If only
# one datapoint, then composition should be the only "common" property.
if len(self.datapoints) > 1:
for prop_name in datagroup_properties:
attribute = prop_name.replace(' ', '_')
quantities = [getattr(dp, attribute, False) for dp in self.datapoints]
# All quantities must have the property in question and all the
# values must be equal
if all(quantities) and quantities.count(quantities[0]) == len(quantities):
common.append(prop_name)
prop = etree.SubElement(common_properties, 'property')
prop.set('description', '')
prop.set('name', prop_name)
prop.set('units', str(quantities[0].units))
value = etree.SubElement(prop, 'value')
value.text = str(quantities[0].magnitude)
# Ignition delay can't be common, unless only a single datapoint.
datagroup = etree.SubElement(root, 'dataGroup')
datagroup.set('id', 'dg1')
datagroup_link = etree.SubElement(datagroup, 'dataGroupLink')
datagroup_link.set('dataGroupID', '')
datagroup_link.set('dataPointID', '')
property_idx = {}
labels = {'temperature': 'T', 'pressure': 'P',
'ignition delay': 'tau', 'pressure rise': 'dP/dt',
}
for prop_name in datagroup_properties:
attribute = prop_name.replace(' ', '_')
# This can't be hasattr because properties are set to the value None
# if no value is specified in the file, so the attribute always exists
prop_indices = [i for i, dp in enumerate(self.datapoints)
if getattr(dp, attribute) is not None
]
if prop_name in common or not prop_indices:
continue
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
prop.set('name', prop_name)
units = str(getattr(self.datapoints[prop_indices[0]], attribute).units)
prop.set('units', units)
idx = 'x{}'.format(len(property_idx) + 1)
property_idx[idx] = {'name': prop_name, 'units': units}
prop.set('id', idx)
prop.set('label', labels[prop_name])
# Need to handle datapoints with possibly different species in the initial composition
if 'composition' not in common:
for dp in self.datapoints:
for species in dp.composition.values():
# Only add new property for species not already considered
has_spec = any([species.species_name in d.values()
for d in property_idx.values()
])
if not has_spec:
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
idx = 'x{}'.format(len(property_idx) + 1)
property_idx[idx] = {'name': species.species_name}
prop.set('id', idx)
prop.set('label', '[' + species.species_name + ']')
prop.set('name', 'composition')
prop.set('units', self.datapoints[0].composition_type)
species_link = etree.SubElement(prop, 'speciesLink')
species_link.set('preferredKey', species.species_name)
if species.InChI is not None:
species_link.set('InChI', species.InChI)
for dp in self.datapoints:
datapoint = etree.SubElement(datagroup, 'dataPoint')
for idx, val in property_idx.items():
# handle regular properties a bit differently than composition
if val['name'] in datagroup_properties:
value = etree.SubElement(datapoint, idx)
quantity = getattr(dp, val['name'].replace(' ', '_')).to(val['units'])
value.text = str(quantity.magnitude)
else:
# composition
for item in dp.composition.values():
if item.species_name == val['name']:
value = etree.SubElement(datapoint, idx)
value.text = str(item.amount.magnitude)
# See https://stackoverflow.com/a/16097112 for the None.__ne__
history_types = ['volume_history', 'temperature_history', 'pressure_history',
'piston_position_history', 'light_emission_history',
'OH_emission_history', 'absorption_history']
time_histories = [getattr(dp, p) for dp in self.datapoints for p in history_types]
time_histories = list(filter(None.__ne__, time_histories))
if len(self.datapoints) > 1 and len(time_histories) > 1:
raise NotImplementedError('Error: ReSpecTh files do not support multiple datapoints '
'with a time history.')
elif len(time_histories) > 0:
for dg_idx, hist in enumerate(time_histories):
if hist.type not in ['volume', 'temperature', 'pressure']:
warn('The time-history type {} is not supported by ReSpecTh for '
'ignition delay experiments'.format(hist.type))
continue
datagroup = etree.SubElement(root, 'dataGroup')
datagroup.set('id', 'dg{}'.format(dg_idx))
datagroup_link = etree.SubElement(datagroup, 'dataGroupLink')
datagroup_link.set('dataGroupID', '')
datagroup_link.set('dataPointID', '')
# Time history has two properties: time and quantity.
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
prop.set('name', 'time')
prop.set('units', str(hist.time.units))
time_idx = 'x{}'.format(len(property_idx) + 1)
property_idx[time_idx] = {'name': 'time'}
prop.set('id', time_idx)
prop.set('label', 't')
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
prop.set('name', hist.type)
prop.set('units', str(hist.quantity.units))
quant_idx = 'x{}'.format(len(property_idx) + 1)
property_idx[quant_idx] = {'name': hist.type}
prop.set('id', quant_idx)
prop.set('label', 'V')
for time, quantity in zip(hist.time, hist.quantity):
datapoint = etree.SubElement(datagroup, 'dataPoint')
value = etree.SubElement(datapoint, time_idx)
value.text = str(time.magnitude)
value = etree.SubElement(datapoint, quant_idx)
value.text = str(quantity.magnitude)
ign_types = [getattr(dp, 'ignition_type', False) for dp in self.datapoints]
# All datapoints must have the same ignition target and type
if all(ign_types) and ign_types.count(ign_types[0]) == len(ign_types):
# In ReSpecTh files all datapoints must share ignition type
ignition = etree.SubElement(root, 'ignitionType')
if ign_types[0]['target'] in ['pressure', 'temperature']:
ignition.set('target', ign_types[0]['target'][0].upper())
else:
# options left are species
ignition.set('target', self.datapoints[0].ignition_type['target'])
if ign_types[0]['type'] == 'd/dt max extrapolated':
ignition.set('type', 'baseline max intercept from d/dt')
else:
ignition.set('type', self.datapoints[0].ignition_type['type'])
else:
raise NotImplementedError('Different ignition targets or types for multiple datapoints '
'are not supported in ReSpecTh.')
et = etree.ElementTree(root)
et.write(filename, encoding='utf-8', xml_declaration=True)
# now do a "pretty" rewrite
xml = minidom.parse(filename)
xml_string = xml.toprettyxml(indent=' ')
with open(filename, 'w') as f:
f.write(xml_string)
print('Converted to ' + filename)
class DataPoint(object):
"""Class for a single datapoint.
The `DataPoint` class stores the information associated with a single data point in the dataset
parsed from the `ChemKED` YAML input.
Arguments:
properties (`dict`): Dictionary adhering to the ChemKED format for ``datapoints``
Attributes:
composition (`list`): List of dictionaries representing the species and their quantities
ignition_delay (pint.Quantity): The ignition delay of the experiment
temperature (pint.Quantity): The temperature of the experiment
pressure (pint.Quantity): The pressure of the experiment
pressure_rise (pint.Quantity, optional): The amount of pressure rise during the induction
period of a shock tube experiment.
compression_time (pint.Quantity, optional): The compression time for an RCM experiment.
compressed_pressure (pint.Quantity, optional): The pressure at the end of compression for
an RCM experiment.
compressed_temperature (pint.Quantity, optional): The temperature at the end of compression
for an RCM experiment.
first_stage_ignition_delay (pint.Quantity, optional): The first stage ignition delay of the
experiment.
compression_time (pint.Quantity, optional): The compression time for an RCM experiment.
ignition_type (`dict`): Dictionary with the ignition target and type.
volume_history (`~collections.namedtuple`, optional): The volume history of the reactor
during an RCM experiment.
pressure_history (`~collections.namedtuple`, optional): The pressure history of the reactor
during an experiment.
temperature_history (`~collections.namedtuple`, optional): The temperature history of the
reactor during an experiment.
piston_position_history (`~collections.namedtuple`, optional): The piston position history
of the reactor during an RCM experiment.
light_emission_history (`~collections.namedtuple`, optional): The light emission history
of the reactor during an experiment.
OH_emission_history (`~collections.namedtuple`, optional): The OH emission history of the
reactor during an experiment.
absorption_history (`~collections.namedtuple`, optional): The absorption history of the
reactor during an experiment.
"""
value_unit_props = [
'ignition-delay', 'first-stage-ignition-delay', 'temperature', 'pressure',
'pressure-rise',
]
rcm_data_props = [
'compressed-pressure', 'compressed-temperature', 'compression-time', 'stroke', 'clearance',
'compression-ratio'
]
def __init__(self, properties):
for prop in self.value_unit_props:
if prop in properties:
quant = self.process_quantity(properties[prop])
setattr(self, prop.replace('-', '_'), quant)
else:
setattr(self, prop.replace('-', '_'), None)
if 'rcm-data' in properties:
orig_rcm_data = properties['rcm-data']
rcm_props = {}
for prop in self.rcm_data_props:
if prop in orig_rcm_data:
quant = self.process_quantity(orig_rcm_data[prop])
rcm_props[prop.replace('-', '_')] = quant
else:
rcm_props[prop.replace('-', '_')] = None
self.rcm_data = RCMData(**rcm_props)
else:
self.rcm_data = None
self.composition_type = properties['composition']['kind']
composition = {}
for species in properties['composition']['species']:
species_name = species['species-name']
amount = self.process_quantity(species['amount'])
InChI = species.get('InChI')
SMILES = species.get('SMILES')
atomic_composition = species.get('atomic-composition')
composition[species_name] = Composition(
species_name=species_name, InChI=InChI, SMILES=SMILES,
atomic_composition=atomic_composition, amount=amount)
setattr(self, 'composition', composition)
self.equivalence_ratio = properties.get('equivalence-ratio')
self.ignition_type = deepcopy(properties.get('ignition-type'))
if 'time-histories' in properties and 'volume-history' in properties:
raise TypeError('time-histories and volume-history are mutually exclusive')
if 'time-histories' in properties:
for hist in properties['time-histories']:
if hasattr(self, '{}_history'.format(hist['type'].replace(' ', '_'))):
raise ValueError('Each history type may only be specified once. {} was '
'specified multiple times'.format(hist['type']))
time_col = hist['time']['column']
time_units = hist['time']['units']
quant_col = hist['quantity']['column']
quant_units = hist['quantity']['units']
if isinstance(hist['values'], list):
values = np.array(hist['values'])
else:
# Load the values from a file
values = np.genfromtxt(hist['values']['filename'], delimiter=',')
time_history = TimeHistory(
time=Q_(values[:, time_col], time_units),
quantity=Q_(values[:, quant_col], quant_units),
type=hist['type'],
)
setattr(self, '{}_history'.format(hist['type'].replace(' ', '_')), time_history)
if 'volume-history' in properties:
warn('The volume-history field should be replaced by time-histories. '
'volume-history will be removed after PyKED 0.4',
DeprecationWarning)
time_col = properties['volume-history']['time']['column']
time_units = properties['volume-history']['time']['units']
volume_col = properties['volume-history']['volume']['column']
volume_units = properties['volume-history']['volume']['units']
values = np.array(properties['volume-history']['values'])
self.volume_history = VolumeHistory(
time=Q_(values[:, time_col], time_units),
volume=Q_(values[:, volume_col], volume_units),
)
history_types = ['volume', 'temperature', 'pressure', 'piston_position', 'light_emission',
'OH_emission', 'absorption']
for h in history_types:
if not hasattr(self, '{}_history'.format(h)):
setattr(self, '{}_history'.format(h), None)
def process_quantity(self, properties):
"""Process the uncertainty information from a given quantity and return it
"""
quant = Q_(properties[0])
if len(properties) > 1:
unc = properties[1]
uncertainty = unc.get('uncertainty', False)
upper_uncertainty = unc.get('upper-uncertainty', False)
lower_uncertainty = unc.get('lower-uncertainty', False)
uncertainty_type = unc.get('uncertainty-type')
if uncertainty_type == 'relative':
if uncertainty:
quant = quant.plus_minus(float(uncertainty), relative=True)
elif upper_uncertainty and lower_uncertainty:
warn('Asymmetric uncertainties are not supported. The '
'maximum of lower-uncertainty and upper-uncertainty '
'has been used as the symmetric uncertainty.')
uncertainty = max(float(upper_uncertainty), float(lower_uncertainty))
quant = quant.plus_minus(uncertainty, relative=True)
else:
raise ValueError('Either "uncertainty" or "upper-uncertainty" and '
'"lower-uncertainty" need to be specified.')
elif uncertainty_type == 'absolute':
if uncertainty:
uncertainty = Q_(uncertainty)
quant = quant.plus_minus(uncertainty.to(quant.units).magnitude)
elif upper_uncertainty and lower_uncertainty:
warn('Asymmetric uncertainties are not supported. The '
'maximum of lower-uncertainty and upper-uncertainty '
'has been used as the symmetric uncertainty.')
uncertainty = max(Q_(upper_uncertainty), Q_(lower_uncertainty))
quant = quant.plus_minus(uncertainty.to(quant.units).magnitude)
else:
raise ValueError('Either "uncertainty" or "upper-uncertainty" and '
'"lower-uncertainty" need to be specified.')
else:
raise ValueError('uncertainty-type must be one of "absolute" or "relative"')
return quant
def get_cantera_composition_string(self, species_conversion=None):
"""Get the composition in a string format suitable for input to Cantera.
Returns a formatted string no matter the type of composition. As such, this method
is not recommended for end users; instead, prefer the `get_cantera_mole_fraction`
or `get_cantera_mass_fraction` methods.
Arguments:
species_conversion (`dict`, optional): Mapping of species identifier to a
species name. This argument should be supplied when the name of the
species in the ChemKED YAML file does not match the name of the same
species in a chemical kinetic mechanism. The species identifier (the key
of the mapping) can be the name, InChI, or SMILES provided in the ChemKED
file, while the value associated with a key should be the desired name in
the Cantera format output string.
Returns:
`str`: String in the ``SPEC:AMT, SPEC:AMT`` format
Raises:
`ValueError`: If the composition type of the `DataPoint` is not one of
``'mass fraction'``, ``'mole fraction'``, or ``'mole percent'``
"""
if self.composition_type in ['mole fraction', 'mass fraction']:
factor = 1.0
elif self.composition_type == 'mole percent':
factor = 100.0
else:
raise ValueError('Unknown composition type: {}'.format(self.composition_type))
if species_conversion is None:
comps = ['{!s}:{:.4e}'.format(c.species_name,
c.amount.magnitude/factor) for c in self.composition.values()]
else:
comps = []
for c in self.composition.values():
amount = c.amount.magnitude/factor
idents = [getattr(c, s, False) for s in ['species_name', 'InChI', 'SMILES']]
present = [i in species_conversion for i in idents]
if not any(present):
comps.append('{!s}:{:.4e}'.format(c.species_name, amount))
else:
if len([i for i in present if i]) > 1:
raise ValueError('More than one conversion present for species {}'.format(
c.species_name))
ident = idents[present.index(True)]
species_replacement_name = species_conversion.pop(ident)
comps.append('{!s}:{:.4e}'.format(species_replacement_name, amount))
if len(species_conversion) > 0:
raise ValueError('Unknown species in conversion: {}'.format(species_conversion))
return ', '.join(comps)
def get_cantera_mole_fraction(self, species_conversion=None):
"""Get the mole fractions in a string format suitable for input to Cantera.
Arguments:
species_conversion (`dict`, optional): Mapping of species identifier to a
species name. This argument should be supplied when the name of the
species in the ChemKED YAML file does not match the name of the same
species in a chemical kinetic mechanism. The species identifier (the key
of the mapping) can be the name, InChI, or SMILES provided in the ChemKED
file, while the value associated with a key should be the desired name in
the Cantera format output string.
Returns:
`str`: String of mole fractions in the ``SPEC:AMT, SPEC:AMT`` format
Raises:
`ValueError`: If the composition type is ``'mass fraction'``, the conversion cannot
be done because no molecular weight information is known
Examples:
>>> dp = DataPoint(properties)
>>> dp.get_cantera_mole_fraction()
'H2:4.4400e-03, O2:5.5600e-03, Ar:9.9000e-01'
>>> species_conversion = {'H2': 'h2', 'O2': 'o2'}
>>> dp.get_cantera_mole_fraction(species_conversion)
'h2:4.4400e-03, o2:5.5600e-03, Ar:9.9000e-01'
>>> species_conversion = {'1S/H2/h1H': 'h2', '1S/O2/c1-2': 'o2'}
>>> dp.get_cantera_mole_fraction(species_conversion)
'h2:4.4400e-03, o2:5.5600e-03, Ar:9.9000e-01'
"""
if self.composition_type == 'mass fraction':
raise ValueError('Cannot get mole fractions from the given composition.\n'
'{}'.format(self.composition))
else:
return self.get_cantera_composition_string(species_conversion)
def get_cantera_mass_fraction(self, species_conversion=None):
"""Get the mass fractions in a string format suitable for input to Cantera.
Arguments:
species_conversion (`dict`, optional): Mapping of species identifier to a
species name. This argument should be supplied when the name of the
species in the ChemKED YAML file does not match the name of the same
species in a chemical kinetic mechanism. The species identifier (the key
of the mapping) can be the name, InChI, or SMILES provided in the ChemKED
file, while the value associated with a key should be the desired name in
the Cantera format output string.
Returns:
`str`: String of mass fractions in the ``SPEC:AMT, SPEC:AMT`` format
Raises:
`ValueError`: If the composition type is ``'mole fraction'`` or
``'mole percent'``, the conversion cannot be done because no molecular
weight information is known
Examples:
>>> dp = DataPoint(properties)
>>> dp.get_cantera_mass_fraction()
'H2:2.2525e-04, O2:4.4775e-03, Ar:9.9530e-01'
>>> species_conversion = {'H2': 'h2', 'O2': 'o2'}
>>> dp.get_cantera_mass_fraction(species_conversion)
'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01'
>>> species_conversion = {'1S/H2/h1H': 'h2', '1S/O2/c1-2': 'o2'}
>>> dp.get_cantera_mass_fraction(species_conversion)
'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01'
"""
if self.composition_type in ['mole fraction', 'mole percent']:
raise ValueError('Cannot get mass fractions from the given composition.\n'
'{}'.format(self.composition)
)
else:
return self.get_cantera_composition_string(species_conversion)
| bsd-3-clause | -6,256,631,093,448,223,000 | 48.701912 | 130 | 0.583388 | false |
bjornaa/ladim | ladim/state.py | 1 | 6096 | """
Class for the state of the model
"""
import sys
import os
import importlib
import logging
from typing import Any, Dict, Sized # mypy
import numpy as np
from netCDF4 import Dataset, num2date
from .tracker import Tracker
from .gridforce import Grid, Forcing
# ------------------------
Config = Dict[str, Any]
class State(Sized):
"""The model variables at a given time"""
def __init__(self, config: Config, grid: Grid) -> None:
logging.info("Initializing the model state")
self.timestep = 0
self.timestamp = config["start_time"].astype("datetime64[s]")
self.dt = np.timedelta64(config["dt"], "s")
self.position_variables = ["X", "Y", "Z"]
if "ibm" in config and "variables" in config["ibm"]:
self.ibm_variables = config["ibm"]["variables"]
else:
self.ibm_variables = config.get("ibm_variables", [])
self.ibm_forcing = config.get("ibm_forcing", [])
self.particle_variables = config["particle_variables"]
self.instance_variables = self.position_variables + [
var for var in self.ibm_variables if var not in self.particle_variables
]
self.pid = np.array([], dtype=int)
for name in self.instance_variables:
setattr(self, name, np.array([], dtype=float))
for name in self.particle_variables:
setattr(self, name, np.array([], dtype=config["release_dtype"][name]))
self.track = Tracker(config)
self.dt = config["dt"]
if config["ibm_module"]:
# Import the module
logging.info("Initializing the IBM")
sys.path.insert(0, os.getcwd())
ibm_module = importlib.import_module(config["ibm_module"])
# Initiate the IBM object
self.ibm = ibm_module.IBM(config)
else:
self.ibm = None
# self.num_particles = len(self.X)
self.nnew = 0 # Modify with warm start?
if config["warm_start_file"]:
self.warm_start(config, grid)
def __getitem__(self, name: str) -> None:
return getattr(self, name)
def __setitem__(self, name: str, value: Any) -> None:
return setattr(self, name, value)
def __len__(self) -> int:
return len(getattr(self, "X"))
def append(self, new: Dict[str, Any], forcing: Forcing) -> None:
"""Append new particles to the model state"""
nnew = len(new["pid"])
self.pid = np.concatenate((self.pid, new["pid"]))
for name in self.instance_variables:
if name in new:
self[name] = np.concatenate((self[name], new[name]))
elif name in self.ibm_forcing:
# Take values as Z must be a numpy array
self[name] = np.concatenate(
(self[name], forcing.field(new["X"], new["Y"], new["Z"].values, name))
)
else: # Initialize to zero
self[name] = np.concatenate((self[name], np.zeros(nnew)))
self.nnew = nnew
def update(self, grid: Grid, forcing: Forcing) -> None:
"""Update the model state to the next timestep"""
# From physics all particles are alive
# self.alive = np.ones(len(self), dtype="bool")
self.alive = grid.ingrid(self.X, self.Y)
self.timestep += 1
self.timestamp += np.timedelta64(self.dt, "s")
self.track.move_particles(grid, forcing, self)
# logging.info(
# "Model time = {}".format(self.timestamp.astype('M8[h]')))
if self.timestamp.astype("int") % 3600 == 0: # New hour
logging.info("Model time = {}".format(self.timestamp.astype("M8[h]")))
# Update the IBM
if self.ibm:
self.ibm.update_ibm(grid, self, forcing)
# Extension, allow inactive particles (not moved next time)
if "active" in self.ibm_variables:
pass
# self.active = self.ibm_variables['active']
else: # Default = active
self.active = np.ones_like(self.pid)
# Surface/bottom boundary conditions
# Reflective at surface
I = self.Z < 0
self.Z[I] = -self.Z[I]
# Keep just above bottom
H = grid.sample_depth(self.X, self.Y)
I = self.Z > H
self.Z[I] = 0.99 * H[I]
# Compactify by removing dead particles
# Could have a switch to avoid this if no deaths
self.pid = self.pid[self.alive]
for key in self.instance_variables:
self[key] = self[key][self.alive]
def warm_start(self, config: Config, grid: Grid) -> None:
"""Perform a warm (re)start"""
warm_start_file = config["warm_start_file"]
try:
f = Dataset(warm_start_file)
except FileNotFoundError:
logging.critical(f"Can not open warm start file: {warm_start_file}")
raise SystemExit(1)
logging.info("Reading warm start file")
# Using last record in file
tvar = f.variables["time"]
warm_start_time = np.datetime64(num2date(tvar[-1], tvar.units))
# Not needed anymore, explicitly set in configuration
# if warm_start_time != config['start_time']:
# print("warm start time = ", warm_start_time)
# print("start time = ", config['start_time'])
# logging.error("Warm start time and start time differ")
# raise SystemExit(1)
pstart = f.variables["particle_count"][:-1].sum()
pcount = f.variables["particle_count"][-1]
self.pid = f.variables["pid"][pstart : pstart + pcount]
# Give error if variable not in restart file
for var in config["warm_start_variables"]:
logging.debug(f"Reading {var} from warm start file")
self[var] = f.variables[var][pstart : pstart + pcount]
# Remove particles near edge of grid
I = grid.ingrid(self["X"], self["Y"])
self.pid = self.pid[I]
for var in config["warm_start_variables"]:
self[var] = self[var][I]
| mit | 6,137,527,517,948,302,000 | 35.285714 | 90 | 0.57103 | false |
Anvil/maestro-ng | maestro/loader.py | 1 | 2758 | # Copyright (C) 2015 SignalFx, Inc. All rights reserved.
#
# Docker container orchestration utility.
import jinja2
import os
import sys
import yaml
from . import exceptions
class MaestroYamlConstructor(yaml.constructor.Constructor):
"""A PyYAML object constructor that errors on duplicate keys in YAML
mappings. Because for some reason PyYAML doesn't do that since 3.x."""
def construct_mapping(self, node, deep=False):
if not isinstance(node, yaml.nodes.MappingNode):
raise yaml.constructor.ConstructorError(
None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
keys = set()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
if key in keys:
raise yaml.constructor.ConstructorError(
"while constructing a mapping", node.start_mark,
"found duplicate key (%s)" % key, key_node.start_mark)
keys.add(key)
return yaml.constructor.Constructor.construct_mapping(self, node, deep)
class MaestroYamlLoader(yaml.reader.Reader, yaml.scanner.Scanner,
yaml.parser.Parser, yaml.composer.Composer,
MaestroYamlConstructor, yaml.resolver.Resolver):
"""A custom YAML Loader that uses the custom MaestroYamlConstructor."""
def __init__(self, stream):
yaml.reader.Reader.__init__(self, stream)
yaml.scanner.Scanner.__init__(self)
yaml.parser.Parser.__init__(self)
yaml.composer.Composer.__init__(self)
MaestroYamlConstructor.__init__(self)
yaml.resolver.Resolver.__init__(self)
def load(filename):
"""Load a config from the given file.
Args:
filename (string): Path to the YAML environment description
configuration file to load. Use '-' for stdin.
Returns:
A python data structure corresponding to the YAML configuration.
"""
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(filename)),
extensions=['jinja2.ext.with_'])
try:
if filename == '-':
template = env.from_string(sys.stdin.read())
else:
template = env.get_template(os.path.basename(filename))
except jinja2.exceptions.TemplateNotFound:
raise exceptions.MaestroException(
'Environment description file {} not found!'.format(filename))
except Exception as e:
raise exceptions.MaestroException(
'Error reading environment description file {}: {}!'
.format(filename, e))
return yaml.load(template.render(env=os.environ), Loader=MaestroYamlLoader)
| apache-2.0 | 4,613,575,730,410,633,000 | 36.27027 | 79 | 0.638869 | false |
bootstraponline/testdroid_device_finder | device_finder.py | 1 | 4900 | # -*- coding: utf-8 -*-
# from: https://github.com/bitbar/testdroid-samples/blob/03fc043ba98235b9ea46a0ab8646f3b20dd1960e/appium/sample-scripts/python/device_finder.py
import os, sys, requests, json, time, httplib
from optparse import OptionParser
from urlparse import urljoin
from datetime import datetime
class DeviceFinder:
# Cloud URL (not including API path)
url = None
# Oauth access token
access_token = None
# Oauth refresh token
refresh_token = None
# Unix timestamp (seconds) when token expires
token_expiration_time = None
""" Full constructor with username and password
"""
def __init__(self, username=None, password=None, url="https://cloud.testdroid.com", download_buffer_size=65536):
self.username = username
self.password = password
self.cloud_url = url
self.download_buffer_size = download_buffer_size
""" Get Oauth2 token
"""
def get_token(self):
if not self.access_token:
# TODO: refresh
url = "%s/oauth/token" % self.cloud_url
payload = {
"client_id": "testdroid-cloud-api",
"grant_type": "password",
"username": self.username,
"password": self.password
}
res = requests.post(
url,
data = payload,
headers = { "Accept": "application/json" }
)
if res.status_code != 200:
print "FAILED: Authentication or connection failure. Check Testdroid Cloud URL and your credentials."
sys.exit(-1)
reply = res.json()
self.access_token = reply['access_token']
self.refresh_token = reply['refresh_token']
self.token_expiration_time = time.time() + reply['expires_in']
elif self.token_expiration_time < time.time():
url = "%s/oauth/token" % self.cloud_url
payload = {
"client_id": "testdroid-cloud-api",
"grant_type": "refresh_token",
"refresh_token": self.refresh_token
}
res = requests.post(
url,
data = payload,
headers = { "Accept": "application/json" }
)
if res.status_code != 200:
print "FAILED: Unable to get a new access token using refresh token"
self.access_token = None
return self.get_token()
reply = res.json()
self.access_token = reply['access_token']
self.refresh_token = reply['refresh_token']
self.token_expiration_time = time.time() + reply['expires_in']
return self.access_token
""" Helper method for getting necessary headers to use for API calls, including authentication
"""
def _build_headers(self):
return { "Authorization": "Bearer %s" % self.get_token(), "Accept": "application/json" }
""" GET from API resource
"""
def get(self, path=None, payload={}, headers={}):
if path.find('v2/') >= 0:
cut_path = path.split('v2/')
path = cut_path[1]
url = "%s/api/v2/%s" % (self.cloud_url, path)
headers = dict(self._build_headers().items() + headers.items())
res = requests.get(url, params=payload, headers=headers)
if headers['Accept'] == 'application/json':
return res.json()
else:
return res.text
""" Returns list of devices
"""
def get_devices(self, limit=0):
return self.get("devices?limit=%s" % (limit))
""" Find available free Android device
"""
def available_free_android_device(self, limit=0):
print "Searching Available Free Android Device..."
for device in self.get_devices(limit)['data']:
if device['creditsPrice'] == 0 and device['locked'] == False and device['osType'] == "ANDROID" and device['softwareVersion']['apiLevel'] > 16:
print "Found device '%s'" % device['displayName']
print ""
return device['displayName']
print "No available device found"
print ""
return ""
""" Find available free iOS device
"""
def available_free_ios_device(self, limit=0):
print "Searching Available Free iOS Device..."
for device in self.get_devices(limit)['data']:
if device['creditsPrice'] == 0 and device['locked'] == False and device['osType'] == "IOS":
print "Found device '%s'" % device['displayName']
print ""
return device['displayName']
print "No available device found"
print ""
return ""
| apache-2.0 | -8,658,598,604,070,463,000 | 35.842105 | 154 | 0.546327 | false |
icgc/icgc-get | tests/test_ega.py | 1 | 1474 | #
# Copyright (c) 2016 The Ontario Institute for Cancer Research. All rights reserved.
#
# This program and the accompanying materials are made available under the terms of the GNU Public License v3.0.
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from conftest import download_test
def test_ega_file(data_dir):
download_test(['FI98765'], 'status', 'ega', ['_methylationCEL_CLL-174.CEL'], [5556766], data_dir)
def test_ega_manifest(data_dir):
download_test(["4294ed2b-4d41-4967-8c5d-231027fa40c7"], 'download', 'ega', ['_methylationCEL_CLL-174.CEL'],
[5556766], data_dir)
| gpl-3.0 | -8,480,794,609,064,911,000 | 51.642857 | 112 | 0.751696 | false |
bolkedebruin/airflow | tests/providers/datadog/hooks/test_datadog.py | 1 | 4769 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
from unittest import mock
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.datadog.hooks.datadog import DatadogHook
APP_KEY = 'app_key'
API_KEY = 'api_key'
METRIC_NAME = 'metric'
DATAPOINT = 7
TAGS = ['tag']
TYPE = 'rate'
INTERVAL = 30
TITLE = 'title'
TEXT = 'text'
AGGREGATION_KEY = 'aggregation-key'
ALERT_TYPE = 'warning'
DATE_HAPPENED = 12345
HANDLE = 'handle'
PRIORITY = 'normal'
RELATED_EVENT_ID = 7
DEVICE_NAME = 'device-name'
class TestDatadogHook(unittest.TestCase):
@mock.patch('airflow.providers.datadog.hooks.datadog.initialize')
@mock.patch('airflow.providers.datadog.hooks.datadog.DatadogHook.get_connection')
def setUp(self, mock_get_connection, mock_initialize):
mock_get_connection.return_value = Connection(extra=json.dumps({
'app_key': APP_KEY,
'api_key': API_KEY,
}))
self.hook = DatadogHook()
@mock.patch('airflow.providers.datadog.hooks.datadog.initialize')
@mock.patch('airflow.providers.datadog.hooks.datadog.DatadogHook.get_connection')
def test_api_key_required(self, mock_get_connection, mock_initialize):
mock_get_connection.return_value = Connection()
with self.assertRaises(AirflowException) as ctx:
DatadogHook()
self.assertEqual(str(ctx.exception),
'api_key must be specified in the Datadog connection details')
def test_validate_response_valid(self):
try:
self.hook.validate_response({'status': 'ok'})
except AirflowException:
self.fail('Unexpected AirflowException raised')
def test_validate_response_invalid(self):
with self.assertRaises(AirflowException):
self.hook.validate_response({'status': 'error'})
@mock.patch('airflow.providers.datadog.hooks.datadog.api.Metric.send')
def test_send_metric(self, mock_send):
mock_send.return_value = {'status': 'ok'}
self.hook.send_metric(
METRIC_NAME,
DATAPOINT,
tags=TAGS,
type_=TYPE,
interval=INTERVAL,
)
mock_send.assert_called_once_with(
metric=METRIC_NAME,
points=DATAPOINT,
host=self.hook.host,
tags=TAGS,
type=TYPE,
interval=INTERVAL,
)
@mock.patch('airflow.providers.datadog.hooks.datadog.api.Metric.query')
@mock.patch('airflow.providers.datadog.hooks.datadog.time.time')
def test_query_metric(self, mock_time, mock_query):
now = 12345
mock_time.return_value = now
mock_query.return_value = {'status': 'ok'}
self.hook.query_metric('query', 60, 30)
mock_query.assert_called_once_with(
start=now - 60,
end=now - 30,
query='query',
)
@mock.patch('airflow.providers.datadog.hooks.datadog.api.Event.create')
def test_post_event(self, mock_create):
mock_create.return_value = {'status': 'ok'}
self.hook.post_event(
TITLE,
TEXT,
aggregation_key=AGGREGATION_KEY,
alert_type=ALERT_TYPE,
date_happened=DATE_HAPPENED,
handle=HANDLE,
priority=PRIORITY,
related_event_id=RELATED_EVENT_ID,
tags=TAGS,
device_name=DEVICE_NAME,
)
mock_create.assert_called_once_with(
title=TITLE,
text=TEXT,
aggregation_key=AGGREGATION_KEY,
alert_type=ALERT_TYPE,
date_happened=DATE_HAPPENED,
handle=HANDLE,
priority=PRIORITY,
related_event_id=RELATED_EVENT_ID,
tags=TAGS,
host=self.hook.host,
device_name=DEVICE_NAME,
source_type_name=self.hook.source_type_name,
)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -8,385,485,857,153,462,000 | 33.064286 | 87 | 0.63787 | false |
chrisfilda/edx_platform | lms/envs/common.py | 1 | 51252 | # -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0611, W0614, C0103
import sys
import os
import imp
import json
from path import path
from .discussionsettings import *
from lms.lib.xblock.mixin import LmsBlockMixin
################################### FEATURES ###################################
# The display name of the platform to be used in templates/emails/etc.
PLATFORM_NAME = "edX"
CC_MERCHANT_NAME = PLATFORM_NAME
COURSEWARE_ENABLED = True
ENABLE_JASMINE = False
DISCUSSION_SETTINGS = {
'MAX_COMMENT_DEPTH': 2,
}
# Features
FEATURES = {
'SAMPLE': False,
'USE_DJANGO_PIPELINE': True,
'DISPLAY_DEBUG_INFO_TO_STAFF': True,
'DISPLAY_HISTOGRAMS_TO_STAFF': False, # For large courses this slows down courseware access for staff.
'REROUTE_ACTIVATION_EMAIL': False, # nonempty string = address for all activation emails
'DEBUG_LEVEL': 0, # 0 = lowest level, least verbose, 255 = max level, most verbose
## DO NOT SET TO True IN THIS FILE
## Doing so will cause all courses to be released on production
'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date
# When True, will only publicly list courses by the subdomain. Expects you
# to define COURSE_LISTINGS, a dictionary mapping subdomains to lists of
# course_ids (see dev_int.py for an example)
'SUBDOMAIN_COURSE_LISTINGS': False,
# When True, will override certain branding with university specific values
# Expects a SUBDOMAIN_BRANDING dictionary that maps the subdomain to the
# university to use for branding purposes
'SUBDOMAIN_BRANDING': False,
'FORCE_UNIVERSITY_DOMAIN': False, # set this to the university domain to use, as an override to HTTP_HOST
# set to None to do no university selection
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the corresponding ones in cms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True, # enables the student notes API and UI.
# discussion home panel, which includes a subscription on/off setting for discussion digest emails.
# this should remain off in production until digest notifications are online.
'ENABLE_DISCUSSION_HOME_PANEL': False,
'ENABLE_PSYCHOMETRICS': False, # real-time psychometrics (eg item response theory analysis in instructor dashboard)
'ENABLE_DJANGO_ADMIN_SITE': True, # set true to enable django's admin site, even on prod (e.g. for course ops)
'ENABLE_SQL_TRACKING_LOGS': False,
'ENABLE_LMS_MIGRATION': False,
'ENABLE_MANUAL_GIT_RELOAD': False,
'ENABLE_MASQUERADE': True, # allow course staff to change to student view of courseware
'ENABLE_SYSADMIN_DASHBOARD': False, # sysadmin dashboard, to see what courses are loaded, to delete & load courses
'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL
# extrernal access methods
'ACCESS_REQUIRE_STAFF_FOR_COURSE': False,
'AUTH_USE_OPENID': False,
'AUTH_USE_CERTIFICATES': False,
'AUTH_USE_OPENID_PROVIDER': False,
# Even though external_auth is in common, shib assumes the LMS views / urls, so it should only be enabled
# in LMS
'AUTH_USE_SHIB': False,
'AUTH_USE_CAS': False,
# This flag disables the requirement of having to agree to the TOS for users registering
# with Shib. Feature was requested by Stanford's office of general counsel
'SHIB_DISABLE_TOS': False,
# Can be turned off if course lists need to be hidden. Effects views and templates.
'COURSES_ARE_BROWSABLE': True,
# Enables ability to restrict enrollment in specific courses by the user account login method
'RESTRICT_ENROLL_BY_REG_METHOD': False,
# analytics experiments
'ENABLE_INSTRUCTOR_ANALYTICS': False,
# Enables the LMS bulk email feature for course staff
'ENABLE_INSTRUCTOR_EMAIL': True,
# If True and ENABLE_INSTRUCTOR_EMAIL: Forces email to be explicitly turned on
# for each course via django-admin interface.
# If False and ENABLE_INSTRUCTOR_EMAIL: Email will be turned on by default
# for all Mongo-backed courses.
'REQUIRE_COURSE_EMAIL_AUTH': True,
# enable analytics server.
# WARNING: THIS SHOULD ALWAYS BE SET TO FALSE UNDER NORMAL
# LMS OPERATION. See analytics.py for details about what
# this does.
'RUN_AS_ANALYTICS_SERVER_ENABLED': False,
# Flip to True when the YouTube iframe API breaks (again)
'USE_YOUTUBE_OBJECT_API': False,
# Give a UI to show a student's submission history in a problem by the
# Staff Debug tool.
'ENABLE_STUDENT_HISTORY_VIEW': True,
# segment.io for LMS--need to explicitly turn it on for production.
'SEGMENT_IO_LMS': False,
# Provide a UI to allow users to submit feedback from the LMS (left-hand help modal)
'ENABLE_FEEDBACK_SUBMISSION': False,
# Turn on a page that lets staff enter Python code to be run in the
# sandbox, for testing whether it's enabled properly.
'ENABLE_DEBUG_RUN_PYTHON': False,
# Enable URL that shows information about the status of variuous services
'ENABLE_SERVICE_STATUS': False,
# Toggle to indicate use of a custom theme
'USE_CUSTOM_THEME': False,
# Don't autoplay videos for students
'AUTOPLAY_VIDEOS': False,
# Enable instructor dash to submit background tasks
'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True,
# Enable instructor to assign individual due dates
'INDIVIDUAL_DUE_DATES': False,
# Enable instructor dash beta version link
'ENABLE_INSTRUCTOR_BETA_DASHBOARD': True,
# Toggle to enable certificates of courses on dashboard
'ENABLE_VERIFIED_CERTIFICATES': False,
# Allow use of the hint managment instructor view.
'ENABLE_HINTER_INSTRUCTOR_VIEW': False,
# for load testing
'AUTOMATIC_AUTH_FOR_TESTING': False,
# Toggle to enable chat availability (configured on a per-course
# basis in Studio)
'ENABLE_CHAT': False,
# Allow users to enroll with methods other than just honor code certificates
'MULTIPLE_ENROLLMENT_ROLES': False,
# Toggle the availability of the shopping cart page
'ENABLE_SHOPPING_CART': False,
# Toggle storing detailed billing information
'STORE_BILLING_INFO': False,
# Enable flow for payments for course registration (DIFFERENT from verified student flow)
'ENABLE_PAID_COURSE_REGISTRATION': False,
# Automatically approve student identity verification attempts
'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': False,
# Disable instructor dash buttons for downloading course data
# when enrollment exceeds this number
'MAX_ENROLLMENT_INSTR_BUTTONS': 200,
# Grade calculation started from the new instructor dashboard will write
# grades CSV files to S3 and give links for downloads.
'ENABLE_S3_GRADE_DOWNLOADS': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# Give course staff unrestricted access to grade downloads (if set to False,
# only edX superusers can perform the downloads)
'ALLOW_COURSE_STAFF_GRADE_DOWNLOADS': False,
'ENABLED_PAYMENT_REPORTS': ["refund_report", "itemized_purchase_report", "university_revenue_share", "certificate_status"],
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggle embargo functionality
'EMBARGO': False,
# Whether the Wiki subsystem should be accessible via the direct /wiki/ paths. Setting this to True means
# that people can submit content and modify the Wiki in any arbitrary manner. We're leaving this as True in the
# defaults, so that we maintain current behavior
'ALLOW_WIKI_ROOT_ACCESS': True,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
# Turn on third-party auth. Disabled for now because full implementations are not yet available. Remember to syncdb
# if you enable this; we don't create tables by default.
'ENABLE_THIRD_PARTY_AUTH': False,
# Toggle to enable alternate urls for marketing links
'ENABLE_MKTG_SITE': False,
# Prevent concurrent logins per user
'PREVENT_CONCURRENT_LOGINS': False,
# Turn off Advanced Security by default
'ADVANCED_SECURITY': False,
# Show a "Download your certificate" on the Progress page if the lowest
# nonzero grade cutoff is met
'SHOW_PROGRESS_SUCCESS_BUTTON': False,
}
# Used for A/B testing
DEFAULT_GROUPS = []
# If this is true, random scores will be generated for the purpose of debugging the profile graphs
GENERATE_PROFILE_SCORES = False
# Used with XQueue
XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/lms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
COURSES_ROOT = ENV_ROOT / "data"
DATA_DIR = COURSES_ROOT
# TODO: Remove the rest of the sys.path modification here and in cms/envs/common.py
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'lib')
# For Node.js
system_node_path = os.environ.get("NODE_PATH", REPO_ROOT / 'node_modules')
node_paths = [
COMMON_ROOT / "static/js/vendor",
COMMON_ROOT / "static/coffee/src",
system_node_path,
]
NODE_PATH = ':'.join(node_paths)
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
# Where to look for a status message
STATUS_MESSAGE_PATH = ENV_ROOT / "status_message.json"
############################ OpenID Provider ##################################
OPENID_PROVIDER_TRUSTED_ROOTS = ['cs50.net', '*.cs50.net']
################################## EDX WEB #####################################
# This is where we stick our compiled template files. Most of the app uses Mako
# templates
from tempdir import mkdtemp_clean
MAKO_MODULE_DIR = mkdtemp_clean('mako')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates']
# This is where Django Template lookup is defined. There are a few of these
# still left lying around.
TEMPLATE_DIRS = [
PROJECT_ROOT / "templates",
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
# Added for django-wiki
'django.core.context_processors.media',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'sekizai.context_processors.sekizai',
# Hack to get required link URLs to password reset templates
'edxmako.shortcuts.marketing_link_context_processor',
# Shoppingcart processor (detects if request.user has a cart)
'shoppingcart.context_processor.user_has_cart_context_processor',
)
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
STUDENT_FILEUPLOAD_MAX_SIZE = 4 * 1000 * 1000 # 4 MB
MAX_FILEUPLOADS_PER_INPUT = 20
# FIXME:
# We should have separate S3 staged URLs in case we need to make changes to
# these assets and test them.
LIB_URL = '/static/js/'
# Dev machines shouldn't need the book
# BOOK_URL = '/static/book/'
BOOK_URL = 'https://mitxstatic.s3.amazonaws.com/book_images/' # For AWS deploys
RSS_TIMEOUT = 600
# Configuration option for when we want to grab server error pages
STATIC_GRAB = False
DEV_CONTENT = True
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/accounts/login'
LOGIN_URL = EDX_ROOT_URL + '/accounts/login'
COURSE_NAME = "6.002_Spring_2012"
COURSE_NUMBER = "6.002x"
COURSE_TITLE = "Circuits and Electronics"
### Dark code. Should be enabled in local settings for devel.
ENABLE_MULTICOURSE = False # set to False to disable multicourse display (see lib.util.views.edXhome)
WIKI_ENABLED = False
###
COURSE_DEFAULT = '6.002x_Fall_2012'
COURSE_SETTINGS = {
'6.002x_Fall_2012': {
'number': '6.002x',
'title': 'Circuits and Electronics',
'xmlpath': '6002x/',
'location': 'i4x://edx/6002xs12/course/6.002x_Fall_2012',
}
}
# IP addresses that are allowed to reload the course, etc.
# TODO (vshnayder): Will probably need to change as we get real access control in.
LMS_MIGRATION_ALLOWED_IPS = []
############################## EVENT TRACKING #################################
# FIXME: Should we be doing this truncation?
TRACK_MAX_EVENT = 50000
DEBUG_TRACK_LOG = False
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat']
EVENT_TRACKING_ENABLED = True
EVENT_TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'eventtracking.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking',
'max_event_size': TRACK_MAX_EVENT,
}
}
}
EVENT_TRACKING_PROCESSORS = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
# Backwards compatibility with ENABLE_SQL_TRACKING_LOGS feature flag.
# In the future, adding the backend to TRACKING_BACKENDS should be enough.
if FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
TRACKING_BACKENDS.update({
'sql': {
'ENGINE': 'track.backends.django.DjangoBackend'
}
})
EVENT_TRACKING_BACKENDS.update({
'sql': {
'ENGINE': 'track.backends.django.DjangoBackend'
}
})
######################## GOOGLE ANALYTICS ###########################
GOOGLE_ANALYTICS_ACCOUNT = 'GOOGLE_ANALYTICS_ACCOUNT_DUMMY'
GOOGLE_ANALYTICS_LINKEDIN = 'GOOGLE_ANALYTICS_LINKEDIN_DUMMY'
######################## subdomain specific settings ###########################
COURSE_LISTINGS = {}
SUBDOMAIN_BRANDING = {}
VIRTUAL_UNIVERSITIES = []
############################### XModule Store ##################################
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': DATA_DIR,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
}
}
}
CONTENTSTORE = None
DOC_STORE_CONFIG = {
'host': 'localhost',
'db': 'xmodule',
'collection': 'modulestore',
}
############# XBlock Configuration ##########
# Import after sys.path fixup
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore import prefer_xmodules
from xmodule.x_module import XModuleMixin
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin)
# Allow any XBlock in the LMS
XBLOCK_SELECT_FUNCTION = prefer_xmodules
#################### Python sandbox ############################################
CODE_JAIL = {
# Path to a sandboxed Python executable. None means don't bother.
'python_bin': None,
# User to run as in the sandbox.
'user': 'sandbox',
# Configurable limits.
'limits': {
# How many CPU seconds can jailed code use?
'CPU': 1,
},
}
# Some courses are allowed to run unsafe code. This is a list of regexes, one
# of them must match the course id for that course to run unsafe code.
#
# For example:
#
# COURSES_WITH_UNSAFE_CODE = [
# r"Harvard/XY123.1/.*"
# ]
COURSES_WITH_UNSAFE_CODE = []
############################### DJANGO BUILT-INS ###############################
# Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here
DEBUG = False
TEMPLATE_DEBUG = False
USE_TZ = True
# CMS base
CMS_BASE = 'localhost:8001'
# Site info
SITE_ID = 1
SITE_NAME = "edx.org"
HTTPS = 'on'
ROOT_URLCONF = 'lms.urls'
# NOTE: Please set ALLOWED_HOSTS to some sane value, as we do not allow the default '*'
# Platform Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_FEEDBACK_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
TECH_SUPPORT_EMAIL = '[email protected]'
CONTACT_EMAIL = '[email protected]'
BUGS_EMAIL = '[email protected]'
ADMINS = ()
MANAGERS = ADMINS
# Static content
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = ENV_ROOT / "staticfiles"
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
]
FAVICON_PATH = 'images/favicon.ico'
# Locale/Internationalization
TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html
# Sourced from http://www.localeplanet.com/icu/ and wikipedia
LANGUAGES = (
('en', u'English'),
('eo', u'Dummy Language (Esperanto)'), # Dummy languaged used for testing
('fake2', u'Fake translations'), # Another dummy language for testing (not pushed to prod)
('ar', u'العربية'), # Arabic
('az', u'azərbaycanca'), # Azerbaijani
('bg-bg', u'български (България)'), # Bulgarian (Bulgaria)
('bn-bd', u'বাংলা (বাংলাদেশ)'), # Bengali (Bangladesh)
('bn-in', u'বাংলা (ভারত)'), # Bengali (India)
('bs', u'bosanski'), # Bosnian
('ca', u'Català'), # Catalan
('ca@valencia', u'Català (València)'), # Catalan (Valencia)
('cs', u'Čeština'), # Czech
('cy', u'Cymraeg'), # Welsh
('da', u'dansk'), # Danish
('de-de', u'Deutsch (Deutschland)'), # German (Germany)
('el', u'Ελληνικά'), # Greek
('en@lolcat', u'LOLCAT English'), # LOLCAT English
('en@pirate', u'Pirate English'), # Pirate English
('es-419', u'Español (Latinoamérica)'), # Spanish (Latin America)
('es-ar', u'Español (Argentina)'), # Spanish (Argentina)
('es-ec', u'Español (Ecuador)'), # Spanish (Ecuador)
('es-es', u'Español (España)'), # Spanish (Spain)
('es-mx', u'Español (México)'), # Spanish (Mexico)
('es-pe', u'Español (Perú)'), # Spanish (Peru)
('et-ee', u'Eesti (Eesti)'), # Estonian (Estonia)
('eu-es', u'euskara (Espainia)'), # Basque (Spain)
('fa', u'فارسی'), # Persian
('fa-ir', u'فارسی (ایران)'), # Persian (Iran)
('fi-fi', u'Suomi (Suomi)'), # Finnish (Finland)
('fr', u'Français'), # French
('gl', u'Galego'), # Galician
('he', u'עברית'), # Hebrew
('hi', u'हिन्दी'), # Hindi
('hu', u'magyar'), # Hungarian
('hy-am', u'Հայերեն (Հայաստան)'), # Armenian (Armenia)
('id', u'Bahasa Indonesia'), # Indonesian
('it-it', u'Italiano (Italia)'), # Italian (Italy)
('ja-jp', u'日本語(日本)'), # Japanese (Japan)
('kk-kz', u'қазақ тілі (Қазақстан)'), # Kazakh (Kazakhstan)
('km-kh', u'ភាសាខ្មែរ (កម្ពុជា)'), # Khmer (Cambodia)
('kn', u'ಕನ್ನಡ'), # Kannada
('ko-kr', u'한국어(대한민국)'), # Korean (Korea)
('lt-lt', u'Lietuvių (Lietuva)'), # Lithuanian (Lithuania)
('ml', u'മലയാളം'), # Malayalam
('mn', u'Монгол хэл'), # Mongolian
('ms', u'Bahasa Melayu'), # Malay
('nb', u'Norsk bokmål'), # Norwegian Bokmål
('ne', u'नेपाली'), # Nepali
('nl-nl', u'Nederlands (Nederland)'), # Dutch (Netherlands)
('pl', u'Polski'), # Polish
('pt-br', u'Português (Brasil)'), # Portuguese (Brazil)
('pt-pt', u'Português (Portugal)'), # Portuguese (Portugal)
('ro', u'română'), # Romanian
('ru', u'Русский'), # Russian
('si', u'සිංහල'), # Sinhala
('sk', u'Slovenčina'), # Slovak
('sl', u'Slovenščina'), # Slovenian
('th', u'ไทย'), # Thai
('tr-tr', u'Türkçe (Türkiye)'), # Turkish (Turkey)
('uk', u'Українська'), # Ukranian
('ur', u'اردو'), # Urdu
('vi', u'Tiếng Việt'), # Vietnamese
('zh-cn', u'中文(简体)'), # Chinese (China)
('zh-tw', u'中文(台灣)'), # Chinese (Taiwan)
)
LANGUAGE_DICT = dict(LANGUAGES)
USE_I18N = True
USE_L10N = True
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
#################################### GITHUB #######################################
# gitreload is used in LMS-workflow to pull content from github
# gitreload requests are only allowed from these IP addresses, which are
# the advertised public IPs of the github WebHook servers.
# These are listed, eg at https://github.com/edx/edx-platform/admin/hooks
ALLOWED_GITRELOAD_IPS = ['207.97.227.253', '50.57.128.197', '108.171.174.178']
#################################### AWS #######################################
# S3BotoStorage insists on a timeout for uploaded assets. We should make it
# permanent instead, but rather than trying to figure out exactly where that
# setting is, I'm just bumping the expiration time to something absurd (100
# years). This is only used if DEFAULT_FILE_STORAGE is overriden to use S3
# in the global settings.py
AWS_QUERYSTRING_EXPIRE = 10 * 365 * 24 * 60 * 60 # 10 years
################################# SIMPLEWIKI ###################################
SIMPLE_WIKI_REQUIRE_LOGIN_EDIT = True
SIMPLE_WIKI_REQUIRE_LOGIN_VIEW = False
################################# WIKI ###################################
from course_wiki import settings as course_wiki_settings
WIKI_ACCOUNT_HANDLING = False
WIKI_EDITOR = 'course_wiki.editors.CodeMirror'
WIKI_SHOW_MAX_CHILDREN = 0 # We don't use the little menu that shows children of an article in the breadcrumb
WIKI_ANONYMOUS = False # Don't allow anonymous access until the styling is figured out
WIKI_CAN_DELETE = course_wiki_settings.CAN_DELETE
WIKI_CAN_MODERATE = course_wiki_settings.CAN_MODERATE
WIKI_CAN_CHANGE_PERMISSIONS = course_wiki_settings.CAN_CHANGE_PERMISSIONS
WIKI_CAN_ASSIGN = course_wiki_settings.CAN_ASSIGN
WIKI_USE_BOOTSTRAP_SELECT_WIDGET = False
WIKI_LINK_LIVE_LOOKUPS = False
WIKI_LINK_DEFAULT_LEVEL = 2
##### Feedback submission mechanism #####
FEEDBACK_SUBMISSION_EMAIL = None
##### Zendesk #####
ZENDESK_URL = None
ZENDESK_USER = None
ZENDESK_API_KEY = None
##### shoppingcart Payment #####
PAYMENT_SUPPORT_EMAIL = '[email protected]'
##### Using cybersource by default #####
CC_PROCESSOR = {
'CyberSource': {
'SHARED_SECRET': '',
'MERCHANT_ID': '',
'SERIAL_NUMBER': '',
'ORDERPAGE_VERSION': '7',
'PURCHASE_ENDPOINT': '',
}
}
# Setting for PAID_COURSE_REGISTRATION, DOES NOT AFFECT VERIFIED STUDENTS
PAID_COURSE_REGISTRATION_CURRENCY = ['usd', '$']
# Members of this group are allowed to generate payment reports
PAYMENT_REPORT_GENERATOR_GROUP = 'shoppingcart_report_access'
################################# open ended grading config #####################
#By setting up the default settings with an incorrect user name and password,
# will get an error when attempting to connect
OPEN_ENDED_GRADING_INTERFACE = {
'url': 'http://example.com/peer_grading',
'username': 'incorrect_user',
'password': 'incorrect_pass',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller'
}
# Used for testing, debugging peer grading
MOCK_PEER_GRADING = False
# Used for testing, debugging staff grading
MOCK_STAFF_GRADING = False
################################# Jasmine ###################################
JASMINE_TEST_DIRECTORY = PROJECT_ROOT + '/static/coffee'
################################# Waffle ###################################
# Name prepended to cookies set by Waffle
WAFFLE_COOKIE = "waffle_flag_%s"
# Two weeks (in sec)
WAFFLE_MAX_AGE = 1209600
################################# Middleware ###################################
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'edxmako.makoloader.MakoFilesystemLoader',
'edxmako.makoloader.MakoAppDirectoriesLoader',
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'microsite_configuration.middleware.MicrositeMiddleware',
'django_comment_client.middleware.AjaxExceptionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# Instead of AuthenticationMiddleware, we use a cached backed version
#'django.contrib.auth.middleware.AuthenticationMiddleware',
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
# Adds user tags to tracking events
# Must go before TrackMiddleware, to get the context set up
'user_api.middleware.UserTagsEventContextMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'splash.middleware.SplashMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Allows us to set user preferences
# should be after DarkLangMiddleware
'lang_pref.middleware.LanguagePreferenceMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
'django.middleware.locale.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_comment_client.utils.ViewNameMiddleware',
'codejail.django_integration.ConfigureCodeJailMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# For A/B testing
'waffle.middleware.WaffleMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'course_wiki.middleware.WikiAccessMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
############################### Pipeline #######################################
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
from rooted_paths import rooted_glob
courseware_js = (
[
'coffee/src/' + pth + '.js'
for pth in ['courseware', 'histogram', 'navigation', 'time']
] +
sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/modules/**/*.js'))
)
main_vendor_js = [
'js/vendor/require.js',
'js/RequireJS-namespace-undefine.js',
'js/vendor/json2.js',
'js/vendor/jquery.min.js',
'js/vendor/jquery-ui.min.js',
'js/vendor/jquery.cookie.js',
'js/vendor/jquery.qtip.min.js',
'js/vendor/swfobject/swfobject.js',
'js/vendor/jquery.ba-bbq.min.js',
'js/vendor/ova/annotator-full.js',
'js/vendor/ova/annotator-full-firebase-auth.js',
'js/vendor/ova/video.dev.js',
'js/vendor/ova/vjs.youtube.js',
'js/vendor/ova/rangeslider.js',
'js/vendor/ova/share-annotator.js',
'js/vendor/ova/tinymce.min.js',
'js/vendor/ova/richText-annotator.js',
'js/vendor/ova/reply-annotator.js',
'js/vendor/ova/tags-annotator.js',
'js/vendor/ova/flagging-annotator.js',
'js/vendor/ova/jquery-Watch.js',
'js/vendor/ova/ova.js',
'js/vendor/ova/catch/js/catch.js',
'js/vendor/ova/catch/js/handlebars-1.1.2.js',
'js/vendor/URI.min.js',
]
discussion_js = sorted(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/discussion/**/*.js'))
staff_grading_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/staff_grading/**/*.js'))
open_ended_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/open_ended/**/*.js'))
notes_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/notes/**/*.js'))
instructor_dash_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/instructor_dashboard/**/*.js'))
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/font-awesome.css',
'css/vendor/jquery.qtip.min.css',
'css/vendor/responsive-carousel/responsive-carousel.css',
'css/vendor/responsive-carousel/responsive-carousel.slide.css',
'css/vendor/ova/edx-annotator.css',
'css/vendor/ova/annotator.css',
'css/vendor/ova/video-js.min.css',
'css/vendor/ova/rangeslider.css',
'css/vendor/ova/share-annotator.css',
'css/vendor/ova/richText-annotator.css',
'css/vendor/ova/tags-annotator.css',
'css/vendor/ova/flagging-annotator.css',
'css/vendor/ova/ova.css',
'js/vendor/ova/catch/css/main.css'
],
'output_filename': 'css/lms-style-vendor.css',
},
'style-vendor-tinymce-content': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-content.css',
},
'style-vendor-tinymce-skin': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-skin.css',
},
'style-app': {
'source_filenames': [
'sass/application.css',
'sass/ie.css'
],
'output_filename': 'css/lms-style-app.css',
},
'style-app-extend1': {
'source_filenames': [
'sass/application-extend1.css',
],
'output_filename': 'css/lms-style-app-extend1.css',
},
'style-app-extend2': {
'source_filenames': [
'sass/application-extend2.css',
],
'output_filename': 'css/lms-style-app-extend2.css',
},
'style-course-vendor': {
'source_filenames': [
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/jquery.treeview.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
],
'output_filename': 'css/lms-style-course-vendor.css',
},
'style-course': {
'source_filenames': [
'sass/course.css',
'xmodule/modules.css',
],
'output_filename': 'css/lms-style-course.css',
},
}
common_js = set(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js)
project_js = set(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js)
# test_order: Determines the position of this chunk of javascript on
# the jasmine test page
PIPELINE_JS = {
'application': {
# Application will contain all paths not in courseware_only_js
'source_filenames': sorted(common_js) + sorted(project_js) + [
'js/form.ext.js',
'js/my_courses_dropdown.js',
'js/toggle_login_modal.js',
'js/sticky_filter.js',
'js/query-params.js',
'js/src/utility.js',
'js/src/accessibility_tools.js',
],
'output_filename': 'js/lms-application.js',
'test_order': 1,
},
'courseware': {
'source_filenames': courseware_js,
'output_filename': 'js/lms-courseware.js',
'test_order': 2,
},
'main_vendor': {
'source_filenames': main_vendor_js,
'output_filename': 'js/lms-main_vendor.js',
'test_order': 0,
},
'module-descriptor-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js'),
'output_filename': 'js/lms-module-descriptors.js',
'test_order': 8,
},
'module-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static', 'xmodule/modules/js/*.js'),
'output_filename': 'js/lms-modules.js',
'test_order': 3,
},
'discussion': {
'source_filenames': discussion_js,
'output_filename': 'js/discussion.js',
'test_order': 4,
},
'staff_grading': {
'source_filenames': staff_grading_js,
'output_filename': 'js/staff_grading.js',
'test_order': 5,
},
'open_ended': {
'source_filenames': open_ended_js,
'output_filename': 'js/open_ended.js',
'test_order': 6,
},
'notes': {
'source_filenames': notes_js,
'output_filename': 'js/notes.js',
'test_order': 7
},
'instructor_dash': {
'source_filenames': instructor_dash_js,
'output_filename': 'js/instructor_dash.js',
'test_order': 9,
},
}
PIPELINE_DISABLE_WRAPPER = True
# Compile all coffee files in course data directories if they are out of date.
# TODO: Remove this once we move data into Mongo. This is only temporary while
# course data directories are still in use.
if os.path.isdir(DATA_DIR):
for course_dir in os.listdir(DATA_DIR):
js_dir = DATA_DIR / course_dir / "js"
if not os.path.isdir(js_dir):
continue
for filename in os.listdir(js_dir):
if filename.endswith('coffee'):
new_filename = os.path.splitext(filename)[0] + ".js"
if os.path.exists(js_dir / new_filename):
coffee_timestamp = os.stat(js_dir / filename).st_mtime
js_timestamp = os.stat(js_dir / new_filename).st_mtime
if coffee_timestamp <= js_timestamp:
continue
os.system("rm %s" % (js_dir / new_filename))
os.system("coffee -c %s" % (js_dir / filename))
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
STATICFILES_IGNORE_PATTERNS = (
"sass/*",
"coffee/*",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_YUI_BINARY = 'yui-compressor'
# Setting that will only affect the edX version of django-pipeline until our changes are merged upstream
PIPELINE_COMPILE_INPLACE = True
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
HIGH_MEM_QUEUE = 'edx.core.high_mem'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {},
HIGH_MEM_QUEUE: {},
}
# let logging work as configured:
CELERYD_HIJACK_ROOT_LOGGER = False
################################ Bulk Email ###################################
# Suffix used to construct 'from' email address for bulk emails.
# A course-specific identifier is prepended.
BULK_EMAIL_DEFAULT_FROM_EMAIL = '[email protected]'
# Parameters for breaking down course enrollment into subtasks.
BULK_EMAIL_EMAILS_PER_TASK = 100
BULK_EMAIL_EMAILS_PER_QUERY = 1000
# Initial delay used for retrying tasks. Additional retries use
# longer delays. Value is in seconds.
BULK_EMAIL_DEFAULT_RETRY_DELAY = 30
# Maximum number of retries per task for errors that are not related
# to throttling.
BULK_EMAIL_MAX_RETRIES = 5
# Maximum number of retries per task for errors that are related to
# throttling. If this is not set, then there is no cap on such retries.
BULK_EMAIL_INFINITE_RETRY_CAP = 1000
# We want Bulk Email running on the high-priority queue, so we define the
# routing key that points to it. At the moment, the name is the same.
BULK_EMAIL_ROUTING_KEY = HIGH_PRIORITY_QUEUE
# Flag to indicate if individual email addresses should be logged as they are sent
# a bulk email message.
BULK_EMAIL_LOG_SENT_EMAILS = False
# Delay in seconds to sleep between individual mail messages being sent,
# when a bulk email task is retried for rate-related reasons. Choose this
# value depending on the number of workers that might be sending email in
# parallel, and what the SES rate is.
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = 0.02
############################## Video ##########################################
YOUTUBE = {
# YouTube JavaScript API
'API': 'www.youtube.com/iframe_api',
# URL to test YouTube availability
'TEST_URL': 'gdata.youtube.com/feeds/api/videos/',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
}
################################### APPS ######################################
INSTALLED_APPS = (
# Standard ones that are always installed...
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'djcelery',
'south',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
# Our courseware
'circuit',
'courseware',
'student',
'static_template_view',
'staticbook',
'track',
'eventtracking.django',
'util',
'certificates',
'dashboard',
'instructor',
'instructor_task',
'open_ended_grading',
'psychometrics',
'licenses',
'course_groups',
'bulk_email',
# External auth (OpenID, shib)
'external_auth',
'django_openid_auth',
# For the wiki
'wiki', # The new django-wiki from benjaoming
'django_notify',
'course_wiki', # Our customizations
'mptt',
'sekizai',
#'wiki.plugins.attachments',
'wiki.plugins.links',
'wiki.plugins.notifications',
'course_wiki.plugins.markdownedx',
# Foldit integration
'foldit',
# For A/B testing
'waffle',
# For testing
'django.contrib.admin', # only used in DEBUG mode
'django_nose',
'debug',
# Discussion forums
'django_comment_client',
'django_comment_common',
'notes',
# Splash screen
'splash',
# Monitoring
'datadog',
# User API
'rest_framework',
'user_api',
# Shopping cart
'shoppingcart',
# Notification preferences setting
'notification_prefs',
# Different Course Modes
'course_modes',
# Student Identity Verification
'verify_student',
# Dark-launching languages
'dark_lang',
# Microsite configuration
'microsite_configuration',
# Student Identity Reverification
'reverification',
'embargo',
# Monitoring functionality
'monitoring',
)
######################### MARKETING SITE ###############################
EDXMKTG_COOKIE_NAME = 'edxloggedin'
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
'ABOUT': 'about_edx',
'CONTACT': 'contact',
'FAQ': 'help_edx',
'COURSES': 'courses',
'ROOT': 'root',
'TOS': 'tos',
'HONOR': 'honor',
'PRIVACY': 'privacy_edx',
'JOBS': 'jobs',
'PRESS': 'press',
# Verified Certificates
'WHAT_IS_VERIFIED_CERT': 'verified-certificate',
}
################# Student Verification #################
VERIFY_STUDENT = {
"DAYS_GOOD_FOR": 365, # How many days is a verficiation good for?
}
### This enables the Metrics tab for the Instructor dashboard ###########
FEATURES['CLASS_DASHBOARD'] = False
if FEATURES.get('CLASS_DASHBOARD'):
INSTALLED_APPS += ('class_dashboard',)
######################## CAS authentication ###########################
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = 'https://provide_your_cas_url_here'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
###################### Registration ##################################
# For each of the fields, give one of the following values:
# - 'required': to display the field, and make it mandatory
# - 'optional': to display the field, and make it non-mandatory
# - 'hidden': to not display the field
REGISTRATION_EXTRA_FIELDS = {
'level_of_education': 'optional',
'gender': 'optional',
'year_of_birth': 'optional',
'mailing_address': 'optional',
'goals': 'optional',
'honor_code': 'required',
'city': 'hidden',
'country': 'hidden',
}
########################## CERTIFICATE NAME ########################
CERT_NAME_SHORT = "Certificate"
CERT_NAME_LONG = "Certificate of Achievement"
###################### Grade Downloads ######################
GRADES_DOWNLOAD_ROUTING_KEY = HIGH_MEM_QUEUE
GRADES_DOWNLOAD = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-grades',
'ROOT_PATH': '/tmp/edx-s3/grades',
}
######################## PROGRESS SUCCESS BUTTON ##############################
# The following fields are available in the URL: {course_id} {student_id}
PROGRESS_SUCCESS_BUTTON_URL = 'http://<domain>/<path>/{course_id}'
PROGRESS_SUCCESS_BUTTON_TEXT_OVERRIDE = None
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
##################### LinkedIn #####################
INSTALLED_APPS += ('django_openid_auth',)
############################ LinkedIn Integration #############################
INSTALLED_APPS += ('linkedin',)
LINKEDIN_API = {
'EMAIL_WHITELIST': [],
'COMPANY_ID': '2746406',
}
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
##### LMS DEADLINE DISPLAY TIME_ZONE #######
TIME_ZONE_DISPLAYED_FOR_DEADLINES = 'UTC'
# Source:
# http://loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt according to http://en.wikipedia.org/wiki/ISO_639-1
ALL_LANGUAGES = (
[u"aa", u"Afar"],
[u"ab", u"Abkhazian"],
[u"af", u"Afrikaans"],
[u"ak", u"Akan"],
[u"sq", u"Albanian"],
[u"am", u"Amharic"],
[u"ar", u"Arabic"],
[u"an", u"Aragonese"],
[u"hy", u"Armenian"],
[u"as", u"Assamese"],
[u"av", u"Avaric"],
[u"ae", u"Avestan"],
[u"ay", u"Aymara"],
[u"az", u"Azerbaijani"],
[u"ba", u"Bashkir"],
[u"bm", u"Bambara"],
[u"eu", u"Basque"],
[u"be", u"Belarusian"],
[u"bn", u"Bengali"],
[u"bh", u"Bihari languages"],
[u"bi", u"Bislama"],
[u"bs", u"Bosnian"],
[u"br", u"Breton"],
[u"bg", u"Bulgarian"],
[u"my", u"Burmese"],
[u"ca", u"Catalan"],
[u"ch", u"Chamorro"],
[u"ce", u"Chechen"],
[u"zh", u"Chinese"],
[u"cu", u"Church Slavic"],
[u"cv", u"Chuvash"],
[u"kw", u"Cornish"],
[u"co", u"Corsican"],
[u"cr", u"Cree"],
[u"cs", u"Czech"],
[u"da", u"Danish"],
[u"dv", u"Divehi"],
[u"nl", u"Dutch"],
[u"dz", u"Dzongkha"],
[u"en", u"English"],
[u"eo", u"Esperanto"],
[u"et", u"Estonian"],
[u"ee", u"Ewe"],
[u"fo", u"Faroese"],
[u"fj", u"Fijian"],
[u"fi", u"Finnish"],
[u"fr", u"French"],
[u"fy", u"Western Frisian"],
[u"ff", u"Fulah"],
[u"ka", u"Georgian"],
[u"de", u"German"],
[u"gd", u"Gaelic"],
[u"ga", u"Irish"],
[u"gl", u"Galician"],
[u"gv", u"Manx"],
[u"el", u"Greek"],
[u"gn", u"Guarani"],
[u"gu", u"Gujarati"],
[u"ht", u"Haitian"],
[u"ha", u"Hausa"],
[u"he", u"Hebrew"],
[u"hz", u"Herero"],
[u"hi", u"Hindi"],
[u"ho", u"Hiri Motu"],
[u"hr", u"Croatian"],
[u"hu", u"Hungarian"],
[u"ig", u"Igbo"],
[u"is", u"Icelandic"],
[u"io", u"Ido"],
[u"ii", u"Sichuan Yi"],
[u"iu", u"Inuktitut"],
[u"ie", u"Interlingue"],
[u"ia", u"Interlingua"],
[u"id", u"Indonesian"],
[u"ik", u"Inupiaq"],
[u"it", u"Italian"],
[u"jv", u"Javanese"],
[u"ja", u"Japanese"],
[u"kl", u"Kalaallisut"],
[u"kn", u"Kannada"],
[u"ks", u"Kashmiri"],
[u"kr", u"Kanuri"],
[u"kk", u"Kazakh"],
[u"km", u"Central Khmer"],
[u"ki", u"Kikuyu"],
[u"rw", u"Kinyarwanda"],
[u"ky", u"Kirghiz"],
[u"kv", u"Komi"],
[u"kg", u"Kongo"],
[u"ko", u"Korean"],
[u"kj", u"Kuanyama"],
[u"ku", u"Kurdish"],
[u"lo", u"Lao"],
[u"la", u"Latin"],
[u"lv", u"Latvian"],
[u"li", u"Limburgan"],
[u"ln", u"Lingala"],
[u"lt", u"Lithuanian"],
[u"lb", u"Luxembourgish"],
[u"lu", u"Luba-Katanga"],
[u"lg", u"Ganda"],
[u"mk", u"Macedonian"],
[u"mh", u"Marshallese"],
[u"ml", u"Malayalam"],
[u"mi", u"Maori"],
[u"mr", u"Marathi"],
[u"ms", u"Malay"],
[u"mg", u"Malagasy"],
[u"mt", u"Maltese"],
[u"mn", u"Mongolian"],
[u"na", u"Nauru"],
[u"nv", u"Navajo"],
[u"nr", u"Ndebele, South"],
[u"nd", u"Ndebele, North"],
[u"ng", u"Ndonga"],
[u"ne", u"Nepali"],
[u"nn", u"Norwegian Nynorsk"],
[u"nb", u"Bokmål, Norwegian"],
[u"no", u"Norwegian"],
[u"ny", u"Chichewa"],
[u"oc", u"Occitan"],
[u"oj", u"Ojibwa"],
[u"or", u"Oriya"],
[u"om", u"Oromo"],
[u"os", u"Ossetian"],
[u"pa", u"Panjabi"],
[u"fa", u"Persian"],
[u"pi", u"Pali"],
[u"pl", u"Polish"],
[u"pt", u"Portuguese"],
[u"ps", u"Pushto"],
[u"qu", u"Quechua"],
[u"rm", u"Romansh"],
[u"ro", u"Romanian"],
[u"rn", u"Rundi"],
[u"ru", u"Russian"],
[u"sg", u"Sango"],
[u"sa", u"Sanskrit"],
[u"si", u"Sinhala"],
[u"sk", u"Slovak"],
[u"sl", u"Slovenian"],
[u"se", u"Northern Sami"],
[u"sm", u"Samoan"],
[u"sn", u"Shona"],
[u"sd", u"Sindhi"],
[u"so", u"Somali"],
[u"st", u"Sotho, Southern"],
[u"es", u"Spanish"],
[u"sc", u"Sardinian"],
[u"sr", u"Serbian"],
[u"ss", u"Swati"],
[u"su", u"Sundanese"],
[u"sw", u"Swahili"],
[u"sv", u"Swedish"],
[u"ty", u"Tahitian"],
[u"ta", u"Tamil"],
[u"tt", u"Tatar"],
[u"te", u"Telugu"],
[u"tg", u"Tajik"],
[u"tl", u"Tagalog"],
[u"th", u"Thai"],
[u"bo", u"Tibetan"],
[u"ti", u"Tigrinya"],
[u"to", u"Tonga (Tonga Islands)"],
[u"tn", u"Tswana"],
[u"ts", u"Tsonga"],
[u"tk", u"Turkmen"],
[u"tr", u"Turkish"],
[u"tw", u"Twi"],
[u"ug", u"Uighur"],
[u"uk", u"Ukrainian"],
[u"ur", u"Urdu"],
[u"uz", u"Uzbek"],
[u"ve", u"Venda"],
[u"vi", u"Vietnamese"],
[u"vo", u"Volapük"],
[u"cy", u"Welsh"],
[u"wa", u"Walloon"],
[u"wo", u"Wolof"],
[u"xh", u"Xhosa"],
[u"yi", u"Yiddish"],
[u"yo", u"Yoruba"],
[u"za", u"Zhuang"],
[u"zu", u"Zulu"]
)
### Apps only installed in some instances
OPTIONAL_APPS = (
'edx_jsdraw',
'mentoring',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.workflow',
'openassessment.xblock'
)
for app_name in OPTIONAL_APPS:
# First attempt to only find the module rather than actually importing it,
# to avoid circular references - only try to import if it can't be found
# by find_module, which doesn't work with import hooks
try:
imp.find_module(app_name)
except ImportError:
try:
__import__(app_name)
except ImportError:
continue
INSTALLED_APPS += (app_name,)
# Stub for third_party_auth options.
# See common/djangoapps/third_party_auth/settings.py for configuration details.
THIRD_PARTY_AUTH = {}
### ADVANCED_SECURITY_CONFIG
# Empty by default
ADVANCED_SECURITY_CONFIG = {}
| agpl-3.0 | 7,149,862,700,838,229,000 | 31.291059 | 180 | 0.625886 | false |
redhat-cip/python-tripleo-wrapper | rdomhelper/ssh.py | 1 | 11391 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import paramiko
from paramiko import ssh_exception
import io
import logging
import select
import time
LOG = logging.getLogger('__chainsaw__')
class SshClient(object):
"""SSH client based on Paramiko.
This class implements the following features:
- run commands on a remote host
- send file to a remote host
- redirect connection to another ssh server so that every commands will
be executed on the redirected host
- send files
- create remote files
"""
def __init__(self, hostname, user, key_filename=None,
via_ip=None):
""":param hostname: the host on which to connect
:type hostname: str
:param user: the user to use for the connection
:type user: str
:param key_filename: the private key path to use, by default it will
use the system host keys
:type key_filename: str
:param redirect_to_host: the host on which to redirect, by default it
will use the port 22
:type redirect_to_host: str
"""
assert hostname, 'hostname is defined.'
assert user, 'user is defined.'
self._hostname = hostname
self._user = user
self._key_filename = key_filename
self.load_private_key(key_filename)
self._client = paramiko.SSHClient()
self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self._via_ip = via_ip
self._transport = None
self._started = False
self.description = 'not started yet'
self._environment_filenames = []
def load_private_key(self, priv_key):
"""Register the SSH private key."""
with open(priv_key) as fd:
self._private_key = paramiko.RSAKey.from_private_key(fd)
def _get_transport_via_ip(self):
exception = None
for i in range(60):
try:
channel = self._client.get_transport().open_channel(
'direct-tcpip',
(self._hostname, 22),
(self._via_ip, 0))
except ssh_exception.ChannelException as exception:
LOG.debug('%s creating the direct-tcip connections' % self.description)
time.sleep(1)
else:
transport = paramiko.Transport(channel)
transport.start_client()
transport.auth_publickey(self._user, self._private_key)
return transport
raise exception
def _get_transport(self):
if self._via_ip:
transport = self._get_transport_via_ip()
else:
transport = self._client.get_transport()
transport.set_keepalive(10)
return transport
def start(self):
"""Start the ssh client and connect to the host.
It will wait until the ssh service is available during 90 seconds.
If it doesn't succed to connect then the function will raise
an SSHException.
"""
if self._via_ip:
connect_to = self._via_ip
self.description = '[%s@%s via %s]' % (self._user,
self._hostname,
self._via_ip)
else:
connect_to = self._hostname
self.description = '[%s@%s]' % (self._user,
self._hostname)
for i in range(60):
try:
self._client.connect(
connect_to,
username=self._user,
allow_agent=True,
key_filename=self._key_filename)
# NOTE(Gonéri): TypeError is in the list because of
# https://github.com/paramiko/paramiko/issues/615
self._transport = self._get_transport()
except (OSError,
TypeError,
ssh_exception.SSHException,
ssh_exception.NoValidConnectionsError) as e:
LOG.info('%s waiting for %s' % (self.description, connect_to))
LOG.debug("exception: '%s'" % str(e))
time.sleep(1)
else:
LOG.debug('%s connected' % self.description)
self._started = True
return
_error = ("unable to connect to ssh service on '%s'" % self._hostname)
LOG.error(_error)
raise ssh_exception.SSHException(_error)
def _check_started(self):
if not self._started:
_error = "ssh client not started, please start the client"
LOG.error(_error)
raise ssh_exception.SSHException(_error)
def stop(self):
"""Close the ssh connection."""
self._started = False
self._client.close()
def run(self, cmd, sudo=False, ignore_error=False, success_status=(0,),
error_callback=None, custom_log=None):
"""Run a command on the remote host.
The command is run on the remote host, if there is a redirected host
then the command will be run on that redirected host. See __init__.
:param cmd: the command to run
:type cmd: str
:param sudo: True if the command should be run with sudo, this parameter
disable the use of environment files.
:type sudo: str
:param success_status: the list of the possible success status
:type success_status: list
:param error_callback: if provided, the callback to call in case of
a failure. it will be called with two args, the output of the command
and the returned error code.
:return: the tuple (output of the command, returned code)
:rtype: tuple
:param custom_log: a optional string to record in the log instead of the command.
This is useful for example if you want to hide a password.
:type custom_log: str
"""
self._check_started()
cmd_output = io.StringIO()
channel = self._get_channel()
if sudo:
cmd = "sudo %s" % cmd
else:
for filename in self._environment_filenames:
cmd = '. %s; %s' % (filename, cmd)
if not custom_log:
custom_log = cmd
LOG.info("%s run '%s'" % (self.description, custom_log))
channel.exec_command(cmd)
while True:
if channel.exit_status_ready():
break
rl, _, _ = select.select([channel], [], [], 30)
if rl:
received = channel.recv(1024).decode('UTF-8', 'ignore').strip()
if received:
LOG.debug(received)
cmd_output.write(received)
cmd_output = cmd_output.getvalue()
exit_status = channel.exit_status
if ignore_error or channel.exit_status in success_status:
return cmd_output, channel.exit_status
elif error_callback:
return error_callback(cmd_output, exit_status)
else:
_error = ("%s command %s has failed with, rc='%s'" %
(self.description, custom_log, exit_status))
LOG.error(_error)
raise ssh_exception.SSHException(_error)
def _get_channel(self):
"""Returns a channel according to if there is a redirection to do or
not.
"""
channel = self._transport.open_session()
channel.set_combine_stderr(True)
channel.get_pty()
return channel
def send_file(self, local_path, remote_path):
"""Send a file to the remote host.
:param local_path: the local path of the file
:type local_path: str
:param remote_path: the remote path of the file
:type remote_path: str
:return: the file attributes
:rtype: paramiko.sftp_attr.SFTPAttributes
"""
self._check_started()
sftp = paramiko.SFTPClient.from_transport(self._transport)
return sftp.put(local_path, remote_path)
def create_file(self, path, content, mode='w'):
"""Create a file with a content.
:param path: the path of the file.
:type path: str
:param content: the content of the file
:type content: str
:param mode: the mode of the file while opening it
:type mode: str
"""
self._check_started()
sftp = paramiko.SFTPClient.from_transport(self._transport)
with sftp.open(path, mode) as remote_file:
remote_file.write(content)
remote_file.flush()
def info(self):
return {'hostname': self._hostname,
'user': self._user,
'key_filename': self._key_filename}
def add_environment_file(self, filename):
self._environment_filenames.append(filename)
class PoolSshClient(object):
def __init__(self):
self._ssh_clients = {}
def build_ssh_client(self, hostname, user, key_filename=None,
via_ip=None):
_ssh_client = SshClient(hostname, user, key_filename,
via_ip)
_ssh_client.start()
self._ssh_clients[user] = _ssh_client
def add_ssh_client(self, user, ssh_client):
self._ssh_clients[user] = ssh_client
def del_ssh_client(self, user):
self._check_ssh_client(user)
del self._ssh_clients[user]
def get_client(self, user):
self._check_ssh_client(user)
return self._ssh_clients[user]
def _check_ssh_client(self, user):
if user not in self._ssh_clients.keys():
_error = "ssh client for user %s not existing" % user
LOG.error(_error)
raise ssh_exception.SSHException(_error)
def run(self, user, cmd, sudo=False, ignore_error=False,
success_status=(0,), error_callback=None, custom_log=None):
self._check_ssh_client(user)
return self._ssh_clients[user].run(
cmd,
sudo=sudo,
ignore_error=ignore_error,
success_status=success_status,
error_callback=error_callback,
custom_log=custom_log)
def send_file(self, user, local_path, remote_path):
self._check_ssh_client(user)
return self._ssh_clients[user].send_file(local_path, remote_path)
def create_file(self, user, path, content, mode='w'):
self._check_ssh_client(user)
return self._ssh_clients[user].create_file(path, content, mode)
def stop_all(self):
for ssh_client in self._ssh_clients.values():
ssh_client.stop()
def add_environment_file(self, user, filename):
self._check_ssh_client(user)
self._ssh_clients[user].add_environment_file(filename)
| apache-2.0 | -2,446,650,625,284,174,300 | 35.623794 | 89 | 0.576207 | false |
hermestrimegiste/patchtgtel | patchConnectionTogotelecom.py | 1 | 2031 | #-*- coding:utf-8 -*-
__author__ = 'hermes'
import socket
from os import system
from time import sleep
from datetime import datetime
global connectionName
connectionName = 'TOGOTELECOM' # Definir le nom de votre reseau
def is_connected():
# http://stackoverflow.com/questions/20913411/test-if-an-internet-connection-is-present-in-python
try:
#host = socket.gethostbyname("www.google.com")
#socket.create_connection(('173.194.67.94', 80), 25)
#methode 2 sans test de connection
socket.gethostbyname("www.google.com")
return True
except:
try:
socket.create_connection(('173.194.67.94', 80), 15)
return True
except:
pass
pass
return False
def hardRestartNetwork():
system('nmcli nm enable false')
system('nmcli nm enable true')
sleep(5)
system("nmcli con up id '%s'"% connectionName)
def patchTogotelecom():
activeReseau = system('nmcli nm enable true')
deconnectionSoft = system('nmcli dev disconnect iface ttyUSB0')
sleep(5)
if (deconnectionSoft == 0 or deconnectionSoft == 1536):
activeTGTEL = system("nmcli con up id '%s'"% connectionName)
if activeTGTEL == 768:
# si Erreur : le délai d'attente de 90 sec a expiré.
#system('modprobe --force-vermagic usb_wwan usbserial')
hardRestartNetwork()
else:
# redemarrer le reseau si la methode soft ne marche pas
hardRestartNetwork()
if is_connected():
print(u'Connecté le %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S')))
else:
print(u'Tentative echoué le %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S')))
# sleep(5)
# debut de l execution du script
#system('modprobe --force-vermagic usb_wwan usbserial')
hardRestartNetwork()
print(u'debut du script > %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S')))
sleep(5)
while True:
if is_connected():
sleep(60)
else:
print(u'Tentative de reconnexion le %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S')))
patchTogotelecom()
| gpl-2.0 | -6,435,792,570,533,313,000 | 26.378378 | 100 | 0.659427 | false |
nigelb/SerialGrabber | serial_grabber/cli.py | 1 | 2827 | #!/usr/bin/env python
# SerialGrabber reads data from a serial port and processes it with the
# configured processor.
# Copyright (C) 2012 NigelB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import signal
import time
from SerialGrabber_Storage import storage_cache
from serial_grabber.commander import MultiProcessParameterFactory
from serial_grabber.util import config_helper
from serial_grabber.watchdog import running, counter, Watchdog
from serial_grabber.processor import ProcessorManager
class status:
def __init__(self, logger):
self.logger = logger
def set_tooltip(self, tooltip):
self.logger.info(tooltip)
def register_handler(running, watchdog, reader, processor, command):
def signal_handler(signal, frame):
print 'You pressed Ctrl+C!'
running.running = False
if command:
command.stop()
watchdog.join()
if reader:
reader.close()
exit(0)
signal.signal(signal.SIGINT, signal_handler)
def start(logger, reader, processor, command):
try:
si = status(logger)
isRunning = running(True)
c = counter(si)
params = config_helper({
"counter": c,
"running": isRunning
})
if issubclass(command.__class__, MultiProcessParameterFactory):
command.populate_parameters(params)
if issubclass(reader.__class__, MultiProcessParameterFactory):
reader.populate_parameters(params)
if issubclass(processor.__class__, MultiProcessParameterFactory):
processor.populate_parameters(params)
watchdog = Watchdog(isRunning)
register_handler(isRunning, watchdog, reader, processor, command)
if reader:
watchdog.start_thread(reader, (isRunning, c, params), "Runner")
if processor:
watchdog.start_thread(ProcessorManager(processor), (isRunning, c, params), "Processor")
if command and reader:
watchdog.start_thread(command, (isRunning, c, params), "Commander")
while isRunning.running:
time.sleep(1)
finally:
storage_cache.close_cache()
| gpl-2.0 | -6,709,110,638,634,718,000 | 34.3375 | 99 | 0.686594 | false |
Hybrid-Cloud/badam | patches_tool/aws_patch/aws_deps/libcloud/compute/drivers/vultr.py | 1 | 6023 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Vultr Driver
"""
import time
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlencode
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.compute.types import Provider, NodeState
from libcloud.common.types import LibcloudError, InvalidCredsError
from libcloud.compute.base import NodeDriver
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
class VultrResponse(JsonResponse):
def parse_error(self):
if self.status == httplib.OK:
body = self.parse_body()
return body
elif self.status == httplib.FORBIDDEN:
raise InvalidCredsError(self.body)
else:
raise LibcloudError(self.body)
class VultrConnection(ConnectionKey):
"""
Connection class for the Vultr driver.
"""
host = 'api.vultr.com'
responseCls = VultrResponse
def add_default_params(self, params):
"""
Add parameters that are necessary for every request
This method add ``api_key`` to
the request.
"""
params['api_key'] = self.key
return params
def encode_data(self, data):
return urlencode(data)
def get(self, url):
return self.request(url)
def post(self, url, data):
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return self.request(url, data=data, headers=headers, method='POST')
class VultrNodeDriver(NodeDriver):
"""
VultrNode node driver.
"""
connectionCls = VultrConnection
type = Provider.VULTR
name = 'Vultr'
website = 'https://www.vultr.com'
NODE_STATE_MAP = {'pending': NodeState.PENDING,
'active': NodeState.RUNNING}
def list_nodes(self):
return self._list_resources('/v1/server/list', self._to_node)
def list_locations(self):
return self._list_resources('/v1/regions/list', self._to_location)
def list_sizes(self):
return self._list_resources('/v1/plans/list', self._to_size)
def list_images(self):
return self._list_resources('/v1/os/list', self._to_image)
def create_node(self, name, size, image, location):
params = {'DCID': location.id, 'VPSPLANID': size.id,
'OSID': image.id, 'label': name}
result = self.connection.post('/v1/server/create', params)
if result.status != httplib.OK:
return False
subid = result.object['SUBID']
retry_count = 3
created_node = None
for i in range(retry_count):
try:
nodes = self.list_nodes()
created_node = [n for n in nodes if n.id == subid][0]
except IndexError:
time.sleep(1)
pass
else:
break
return created_node
def reboot_node(self, node):
params = {'SUBID': node.id}
res = self.connection.post('/v1/server/reboot', params)
return res.status == httplib.OK
def destroy_node(self, node):
params = {'SUBID': node.id}
res = self.connection.post('/v1/server/destroy', params)
return res.status == httplib.OK
def _list_resources(self, url, tranform_func):
data = self.connection.get(url).object
sorted_key = sorted(data)
return [tranform_func(data[key]) for key in sorted_key]
def _to_node(self, data):
if 'status' in data:
state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN)
if state == NodeState.RUNNING and \
data['power_status'] != 'running':
state = NodeState.STOPPED
else:
state = NodeState.UNKNOWN
if 'main_ip' in data and data['main_ip'] is not None:
public_ips = [data['main_ip']]
else:
public_ips = []
extra_keys = []
extra = {}
for key in extra_keys:
if key in data:
extra[key] = data[key]
node = Node(id=data['SUBID'], name=data['label'], state=state,
public_ips=public_ips, private_ips=None, extra=extra,
driver=self)
return node
def _to_location(self, data):
return NodeLocation(id=data['DCID'], name=data['name'],
country=data['country'], driver=self)
def _to_size(self, data):
extra = {'vcpu_count': int(data['vcpu_count'])}
ram = int(data['ram'])
disk = int(data['disk'])
bandwidth = float(data['bandwidth'])
price = float(data['price_per_month'])
return NodeSize(id=data['VPSPLANID'], name=data['name'],
ram=ram, disk=disk,
bandwidth=bandwidth, price=price,
extra=extra, driver=self)
def _to_image(self, data):
extra = {'arch': data['arch'], 'family': data['family']}
return NodeImage(id=data['OSID'], name=data['name'], extra=extra,
driver=self)
| apache-2.0 | -2,399,194,911,052,198,000 | 30.733696 | 78 | 0.584592 | false |
YourCyborg/Sun-RPI | src/objects/admin.py | 1 | 5428 | #
# This sets up how models are displayed
# in the web admin interface.
#
from django import forms
from django.conf import settings
from django.contrib import admin
from src.objects.models import ObjAttribute, ObjectDB, ObjectNick, Alias
from src.utils.utils import mod_import
class ObjAttributeInline(admin.TabularInline):
model = ObjAttribute
fields = ('db_key', 'db_value')
extra = 0
class NickInline(admin.TabularInline):
model = ObjectNick
fields = ('db_nick', 'db_real', 'db_type')
extra = 0
class AliasInline(admin.TabularInline):
model = Alias
fields = ("db_key",)
extra = 0
class ObjectCreateForm(forms.ModelForm):
"This form details the look of the fields"
class Meta:
model = ObjectDB
db_key = forms.CharField(label="Name/Key",
widget=forms.TextInput(attrs={'size':'78'}),
help_text="Main identifier, like 'apple', 'strong guy', 'Elizabeth' etc. If creating a Character, check so the name is unique among characters!",)
db_typeclass_path = forms.CharField(label="Typeclass",initial="Change to (for example) %s or %s." % (settings.BASE_OBJECT_TYPECLASS, settings.BASE_CHARACTER_TYPECLASS),
widget=forms.TextInput(attrs={'size':'78'}),
help_text="This defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass. If you are creating a Character you should use the typeclass defined by settings.BASE_CHARACTER_TYPECLASS or one derived from that.")
db_permissions = forms.CharField(label="Permissions",
initial=settings.PERMISSION_PLAYER_DEFAULT,
required=False,
widget=forms.TextInput(attrs={'size':'78'}),
help_text="a comma-separated list of text strings checked by certain locks. They are mainly of use for Character objects. Character permissions overload permissions defined on a controlling Player. Most objects normally don't have any permissions defined.")
db_cmdset_storage = forms.CharField(label="CmdSet",
initial=settings.CMDSET_DEFAULT,
required=False,
widget=forms.TextInput(attrs={'size':'78'}),
help_text="Most non-character objects don't need a cmdset and can leave this field blank.")
class ObjectEditForm(ObjectCreateForm):
"Form used for editing. Extends the create one with more fields"
db_lock_storage = forms.CharField(label="Locks",
required=False,
widget=forms.Textarea(attrs={'cols':'100', 'rows':'2'}),
help_text="In-game lock definition string. If not given, defaults will be used. This string should be on the form <i>type:lockfunction(args);type2:lockfunction2(args);...")
class ObjectDBAdmin(admin.ModelAdmin):
list_display = ('id', 'db_key', 'db_location', 'db_player', 'db_typeclass_path')
list_display_links = ('id', 'db_key')
ordering = ['db_player', 'db_typeclass_path', 'id']
search_fields = ['^db_key', 'db_typeclass_path']
save_as = True
save_on_top = True
list_select_related = True
list_filter = ('db_permissions', 'db_location', 'db_typeclass_path')
# editing fields setup
form = ObjectEditForm
fieldsets = (
(None, {
'fields': (('db_key','db_typeclass_path'), ('db_permissions', 'db_lock_storage'),
('db_location', 'db_home'), 'db_destination','db_cmdset_storage'
)}),
)
#deactivated temporarily, they cause empty objects to be created in admin
inlines = [AliasInline]#, ObjAttributeInline]
# Custom modification to give two different forms wether adding or not.
add_form = ObjectCreateForm
add_fieldsets = (
(None, {
'fields': (('db_key','db_typeclass_path'), 'db_permissions',
('db_location', 'db_home'), 'db_destination','db_cmdset_storage'
)}),
)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(ObjectDBAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during creation
"""
defaults = {}
if obj is None:
defaults.update({
'form': self.add_form,
'fields': admin.util.flatten_fieldsets(self.add_fieldsets),
})
defaults.update(kwargs)
return super(ObjectDBAdmin, self).get_form(request, obj, **defaults)
def save_model(self, request, obj, form, change):
if not change:
# adding a new object
obj = obj.typeclass
obj.basetype_setup()
obj.basetype_posthook_setup()
obj.at_object_creation()
obj.at_init()
admin.site.register(ObjectDB, ObjectDBAdmin)
| bsd-3-clause | 8,773,561,773,923,069,000 | 42.130081 | 306 | 0.573876 | false |
dgutman/ADRCPathViewer | api/routes/v1/Tile.py | 1 | 1783 | from flask_restful import Resource
from flask import Response
from bson.objectid import ObjectId
from bson.json_util import dumps
from utils.deepzoom import get_slide, PILBytesIO
class Tile(Resource):
def __init__(self, db, config):
"""initialize DeepZoom resource
Args:
db: mongo db connection
config: application configurations
opt: deep zoom configurations
Returns:
None
"""
self.db = db
self.config = config
self.slides = self.db[self.config["db_collection"]]
def get(self, id, level, x, y):
"""
Get slide tile
---
tags:
- Tile
parameters:
- in: path
name: id
description: MonogDB ObjectId appended to it the level -- Example 57bf3c092f9b2e1595b29730
type: string
- in: path
name: level
description: The zoom level
type: integer
- in: path
name: x
description: The column
type: integer
- in: path
name: y
description: The row
type: integer
responses:
200:
description: Returns the slide information
404:
description: Invalid slide Id or slide not found
"""
if not ObjectId.is_valid(id):
resp = {"status": 404, "message": "Invalid slide Id " + id}
return Response(dumps(resp), status=404, mimetype='application/json')
image = self.slides.find_one({'_id': ObjectId(id)})
path = image["path"]
slide = get_slide(path)
try:
tile = slide.get_tile(level, (x, y))
buf = PILBytesIO()
tile.save(buf, 'jpeg', quality=90)
return Response(buf.getvalue(), status=200, mimetype='image/jpeg')
except ValueError:
Response(None, status=404) | mit | 4,115,524,365,538,051,600 | 25.626866 | 102 | 0.596186 | false |
HEPData/hepdata3 | hepdata/modules/records/migrator/api.py | 1 | 18355 | # -*- coding: utf-8 -*-
#
# This file is part of HEPData.
# Copyright (C) 2016 CERN.
#
# HEPData is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HEPData; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, print_function
import socket
from datetime import datetime, timedelta
from urllib2 import HTTPError
import requests
from celery import shared_task
from flask import current_app
import os
from invenio_db import db
from hepdata.ext.elasticsearch.api import get_records_matching_field, index_record_ids
from hepdata.modules.inspire_api.views import get_inspire_record_information
from hepdata.modules.dashboard.views import do_finalise
from hepdata.modules.records.utils.common import record_exists
from hepdata.modules.records.utils.submission import \
process_submission_directory, get_or_create_hepsubmission, \
remove_submission
from hepdata.modules.records.utils.workflow import create_record, update_record
import logging
from hepdata.modules.records.utils.yaml_utils import split_files
from hepdata.modules.submission.api import get_latest_hepsubmission, is_resource_added_to_submission
from hepdata.modules.submission.models import DataResource, HEPSubmission
from hepdata.utils.file_extractor import get_file_in_directory
from hepdata.modules.records.utils.doi_minter import generate_dois_for_submission
from hepdata.modules.email.api import notify_publication_update
logging.basicConfig()
log = logging.getLogger(__name__)
class FailedSubmission(Exception):
def __init__(self, message, errors, record_id):
# Call the base class constructor with the parameters it needs
super(FailedSubmission, self).__init__(message)
# Now for your custom code...
self.errors = errors
self.record_id = record_id
def print_errors(self):
for file in self.errors:
print(file)
for error_message in self.errors[file]:
print("\t{0} for {1}".format(error_message, self.record_id))
@shared_task
def update_analyses():
endpoints = current_app.config["ANALYSES_ENDPOINTS"]
for analysis_endpoint in endpoints:
if "endpoint_url" in endpoints[analysis_endpoint]:
log.info("Updating analyses from {0}...".format(analysis_endpoint))
response = requests.get(endpoints[analysis_endpoint]["endpoint_url"])
if response:
analyses = response.json()
for record in analyses:
submission = get_latest_hepsubmission(inspire_id=record, overall_status='finished')
if submission:
num_new_resources = 0
for analysis in analyses[record]:
_resource_url = endpoints[analysis_endpoint]["url_template"].format(analysis)
if not is_resource_added_to_submission(submission.publication_recid, submission.version,
_resource_url):
print('Adding {} analysis to ins{} with URL {}'
.format(analysis_endpoint, record, _resource_url))
new_resource = DataResource(
file_location=_resource_url,
file_type=analysis_endpoint)
submission.resources.append(new_resource)
num_new_resources += 1
if num_new_resources:
try:
db.session.add(submission)
db.session.commit()
index_record_ids([submission.publication_recid])
except Exception as e:
db.session.rollback()
log.error(e)
else:
log.debug("An analysis is available in {0} but with no equivalent in HEPData (ins{1}).".format(
analysis_endpoint, record))
else:
log.debug("No endpoint url configured for {0}".format(analysis_endpoint))
@shared_task
def update_submissions(inspire_ids_to_update, force=False, only_record_information=False, send_email=False):
migrator = Migrator()
for index, inspire_id in enumerate(inspire_ids_to_update):
_cleaned_id = inspire_id.replace("ins", "")
_matching_records = get_records_matching_field("inspire_id", _cleaned_id)
if len(_matching_records["hits"]["hits"]) >= 1:
recid = _matching_records["hits"]["hits"][0]["_source"]["recid"]
if "related_publication" in _matching_records["hits"]["hits"][0]["_source"]:
recid = _matching_records["hits"]["hits"][0]["_source"]["related_publication"]
print("The record with inspire_id {} and recid {} will be updated now".format(inspire_id, recid))
migrator.update_file.delay(inspire_id, recid, force, only_record_information, send_email)
else:
log.error("No record exists with id {0}. You should load this file first.".format(inspire_id))
@shared_task
def add_or_update_records_since_date(date=None, send_tweet=False, convert=False):
"""
Given a date, gets all the records updated or added since that
date and updates or adds the corresponding records.
:param date: in the format YYYYddMM (e.g. 20160705 for the 5th July 2016)
:param send_tweet:
:param convert:
:return:
"""
if not date:
# then use yesterdays date
yesterday = datetime.now() - timedelta(days=1)
date = yesterday.strftime("%Y%m%d")
inspire_ids = get_all_ids_in_current_system(date)
print("{0} records to be added or updated since {1}.".format(len(inspire_ids), date))
load_files(inspire_ids, send_tweet=send_tweet, convert=convert)
def get_all_ids_in_current_system(date=None, prepend_id_with="ins"):
"""
Finds all the IDs that have been added or updated since some date.
:param date:
:param prepend_id_with:
:return:
"""
import requests, re
brackets_re = re.compile(r"\[+|\]+")
inspire_ids = []
base_url = "http://hepdata.cedar.ac.uk/allids/{0}"
if date:
base_url = base_url.format(date)
else:
base_url = base_url.format("")
response = requests.get(base_url)
if response.ok:
_all_ids = response.text
for match in re.finditer("\[[0-9]+,[0-9]+,[0-9]+\]", _all_ids):
start = match.start()
end = match.end()
# process the block which is of the form [inspire_id,xxx,xxx]
id_block = brackets_re.sub("", _all_ids[start:end])
id = id_block.split(",")[0].strip()
if id != "0":
inspire_ids.append("{0}{1}".format(prepend_id_with, id))
return inspire_ids
def load_files(inspire_ids, send_tweet=False, synchronous=False, convert=False,
base_url='http://hepdata.cedar.ac.uk/view/{0}/yaml'):
"""
:param base_url: override default base URL
:param convert:
:param synchronous: if should be run immediately
:param send_tweet: whether or not to tweet this entry.
:param inspire_ids: array of inspire ids to load (in the format insXXX).
:return: None
"""
migrator = Migrator()
for index, inspire_id in enumerate(inspire_ids):
_cleaned_id = inspire_id.replace("ins", "")
if not record_exists(inspire_id=_cleaned_id):
print("The record with id {0} does not exist in the database, so we're loading it.".format(inspire_id))
try:
log.info("Loading {0}".format(inspire_id))
if synchronous:
migrator.load_file(inspire_id, send_tweet, convert=convert, base_url=base_url)
else:
migrator.load_file.delay(inspire_id, send_tweet, convert=convert, base_url=base_url)
except socket.error as se:
print("socket error...")
log.error(se.message)
except Exception as e:
print("Failed to load {0}. {1} ".format(inspire_id, e))
log.error("Failed to load {0}. {1} ".format(inspire_id, e))
else:
print("The record with inspire id {0} already exists. Updating instead.".format(inspire_id))
log.info("Updating {}".format(inspire_id))
if synchronous:
update_submissions([inspire_id])
else:
update_submissions.delay([inspire_id])
class Migrator(object):
"""
Performs the interface for all migration-related tasks including downloading, splitting files, YAML cleaning, and
loading.
"""
def __init__(self, base_url="http://hepdata.cedar.ac.uk/view/{0}/yaml"):
self.base_url = base_url
def prepare_files_for_submission(self, inspire_id, force_retrieval=False):
"""
Either returns a file if it already exists, or downloads it and
splits it.
:param inspire_id:
:return: output location if successful, None if not
"""
output_location = os.path.join(current_app.config["CFG_DATADIR"], inspire_id)
last_updated = datetime.now()
download = not os.path.exists(output_location) or (get_file_in_directory(output_location, 'yaml') is None)
if download or force_retrieval:
print("Downloading file for {0}".format(inspire_id))
file_location = self.download_file(inspire_id)
if file_location:
output_location = os.path.join(current_app.config["CFG_DATADIR"], inspire_id)
error, last_updated = split_files(file_location, output_location, "{0}.zip".format(output_location))
# remove temporary download file after processing
try:
os.remove(file_location)
except:
log.info('Unable to remove {0}'.format(file_location))
else:
output_location = None
else:
print("File for {0} already in system...no download required.".format(inspire_id))
return output_location, last_updated
@shared_task
def update_file(inspire_id, recid, force=False, only_record_information=False, send_email=False,
send_tweet=False, convert=False):
self = Migrator()
output_location, oldsite_last_updated = self.prepare_files_for_submission(inspire_id, force_retrieval=True)
if output_location:
updated_record_information, status = self.retrieve_publication_information(inspire_id)
if status == 'success':
record_information = update_record(recid, updated_record_information)
else:
log.error("Failed to retrieve publication information for {0}".format(inspire_id))
return
hep_submission = HEPSubmission.query.filter_by(publication_recid=recid).first()
version_count = HEPSubmission.query.filter_by(publication_recid=recid).count()
print('Old site last updated {}'.format(str(oldsite_last_updated)))
print('New site last updated {}'.format(str(hep_submission.last_updated)))
print('Coordinator ID is {}, version count is {}'.format(hep_submission.coordinator, version_count))
allow_update = (hep_submission.last_updated < oldsite_last_updated or force) and \
hep_submission.coordinator == 1 and version_count == 1
if not only_record_information and allow_update:
try:
recid = self.load_submission(
record_information, output_location, os.path.join(output_location, "submission.yaml"),
update=True)
print('Loaded record {}'.format(recid))
if recid is not None:
do_finalise(recid, publication_record=record_information,
force_finalise=True, send_tweet=send_tweet, update=True, convert=convert)
except FailedSubmission as fe:
log.error(fe.message)
fe.print_errors()
remove_submission(fe.record_id)
elif not only_record_information:
print('Not updating record {}'.format(recid))
else:
index_record_ids([record_information["recid"]])
_cleaned_id = inspire_id.replace("ins", "")
generate_dois_for_submission.delay(inspire_id=_cleaned_id) # update metadata stored in DataCite
if send_email:
notify_publication_update(hep_submission, record_information) # send email to all participants
else:
log.error("Failed to load {0}".format(inspire_id))
@shared_task
def load_file(inspire_id, send_tweet=False, convert=False, base_url='http://hepdata.cedar.ac.uk/view/{0}/yaml'):
self = Migrator(base_url)
output_location, oldsite_last_updated = self.prepare_files_for_submission(inspire_id)
if output_location:
publication_information, status = self.retrieve_publication_information(inspire_id)
if status == "success":
record_information = create_record(publication_information)
else:
log.error("Failed to retrieve publication information for " + inspire_id)
return False
try:
recid = self.load_submission(
record_information, output_location,
os.path.join(output_location, "submission.yaml"))
if recid is not None:
do_finalise(recid, publication_record=record_information,
force_finalise=True, send_tweet=send_tweet, convert=convert)
return True
except FailedSubmission as fe:
log.error(fe.message)
fe.print_errors()
remove_submission(fe.record_id)
return False
else:
log.error("Failed to load " + inspire_id)
return False
def download_file(self, inspire_id):
"""
:param inspire_id:
:return:
"""
import requests
import tempfile
from shutil import copyfile
# Check if single YAML file exists in static directory.
base_dir = os.path.dirname(os.path.realpath(__file__))
yaml_file = os.path.join(base_dir, 'static', inspire_id + '.yaml')
if os.path.isfile(yaml_file):
print("Found {}".format(yaml_file))
tmp_file = tempfile.NamedTemporaryFile(dir=current_app.config["CFG_TMPDIR"], delete=False)
tmp_file.close()
copyfile(yaml_file, tmp_file.name)
return tmp_file.name
try:
url = self.base_url.format(inspire_id)
log.info("Trying URL " + url)
response = requests.get(url)
if response.ok:
yaml = response.text
# save to tmp file
tmp_file = tempfile.NamedTemporaryFile(dir=current_app.config["CFG_TMPDIR"],
delete=False)
tmp_file.write(yaml)
tmp_file.close()
return tmp_file.name
else:
log.error('Non OK response from endpoint at {0}'.format(url))
return None
except HTTPError as e:
log.error("Failed to download {0}".format(inspire_id))
log.error(e.message)
return None
def retrieve_publication_information(self, inspire_id):
"""
:param inspire_id: id for record to get. If this contains "ins", the "ins" is removed.
:return: dict containing keys for:
title
doi
authors
abstract
arxiv_id
collaboration
"""
if "ins" in inspire_id:
inspire_id = int(inspire_id.replace("ins", ""))
content, status = get_inspire_record_information(inspire_id)
content["inspire_id"] = inspire_id
return content, status
def load_submission(self, record_information, file_base_path,
submission_yaml_file_location, update=False):
"""
:param record_information:
:param file_base_path:
:param files:
:return:
"""
# create publication record.
# load data tables
# create data table records (call finalise(recid))
admin_user_id = 1
# consume data payload and store in db.
get_or_create_hepsubmission(record_information["recid"], admin_user_id)
errors = process_submission_directory(file_base_path,
submission_yaml_file_location,
record_information["recid"], update=update)
if len(errors) > 0:
print("ERRORS ARE: ")
print(errors)
if errors:
raise FailedSubmission("Submission failed for {0}.".format(
record_information["recid"]), errors,
record_information["recid"])
else:
return record_information["recid"]
| gpl-2.0 | -4,209,488,083,953,529,000 | 40.154709 | 119 | 0.592373 | false |
hiatobr/midiacapoeira | modules/queries.py | 1 | 1403 | # -*- coding: utf-8 -*-
from gluon import current
def tagQuery(tags, ctbl, ttbl, query = 0, op = 'or', field =
'texto_id'):
'''
Busca no banco de dados por conteúdo marcado pelas tags em <tags>.
A operação é recursiva, tag por tag, juntando o resultado de uma
busca ao resultado referente à tag anterior. Essa junção pode ser por
intersecção (op = 'and') ou por união (op = 'or').
Esta implementação preza por generalidade, de modo que a função
pode ser utilizada para buscar qualquer tipo de conteúdo, desde
que a variável <field> seja corretamente preenchida na chamada da
função.
<ctbl> = tabela de conteúdo
<ttbl> = tabela de tags
'''
db = current.db
try:
# Escolhe uma tag e procura por índices de textos que a contêm
tag_ref = db(ttbl.tag==tags.pop()).select(ttbl[field]).as_list()
tag_ref = map(list.pop, map(dict.values, tag_ref))
if query and op == 'or':
return tagQuery(tags, ctbl, ttbl, ctbl.id.belongs(tag_ref) |
query)
elif query and op == 'and':
return tagQuery (tags, ctbl, ttbl,
ctbl.id.belongs(tag_ref) & query)
else:
return tagQuery(tags, ctbl, ttbl, ctbl.id.belongs(tag_ref))
except IndexError:
return db(query).select(ctbl.ALL).as_list()
| gpl-3.0 | 3,060,283,172,362,273,000 | 34.435897 | 73 | 0.606368 | false |
jsaponara/opentaxforms | opentaxforms/serve.py | 1 | 3082 | #!/usr/bin/env python
from __future__ import print_function, absolute_import
import flask_restless
from argparse import ArgumentParser
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from .db import connect
from .version import appname, apiVersion
from .ut import Bag
def createApi(app,**kw):
db = SQLAlchemy(app)
conn, engine, metadata, md = connect(appname, **kw)
Base = declarative_base()
Session = sessionmaker(autocommit=False, autoflush=False, bind=engine)
mysession = scoped_session(Session)
apimanager = flask_restless.APIManager(app, session=mysession)
counts = {}
for tabl in md:
tablobj = md[tabl]
counts[tabl] = tablobj.count().execute().fetchone()[0]
attrs = dict(
__table__=tablobj,
# todo should flask_restless need __tablename__?
__tablename__=str(tabl),
)
attrs.update(dict(
orgn=dict(
form=db.relationship('Form'),
),
form=dict(
orgn=db.relationship('Orgn', back_populates='form'),
slot=db.relationship('Slot', back_populates='form'),
),
slot=dict(
form=db.relationship('Form'),
),
)[tabl])
tablcls = type(str(tabl).capitalize(), (Base, ), attrs)
colsToAdd = dict(
orgn=(),
form=(
'orgn', 'orgn.code',
),
slot=(
'form', 'form.code',
),
)[tabl]
colsToShow = [c.name for c in tablobj.columns]
colsToShow.extend(colsToAdd)
# print tabl,colsToShow
apimanager.create_api(
tablcls,
url_prefix='/api/v%s' % (apiVersion, ),
include_columns=colsToShow,
)
return counts
def parseCmdline():
'''Load command line arguments'''
parser = ArgumentParser(
description='Automates tax forms'
' and provides an API for new tax form interfaces'
)
parser.add_argument(
'-P', '--postgres',
help='use postgres database [default=sqlite]', action="store_true")
return parser.parse_args()
def createApp(**kw):
cmdline = kw.get('cmdline')
verbose = kw.get('verbose')
if 'cmdline' in kw:
del kw['cmdline']
if 'verbose' in kw:
del kw['verbose']
args = parseCmdline() if cmdline else Bag(dict(postgres=False))
app = Flask(appname)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # to suppress warning
counts = createApi(app,postgres=args.postgres, **kw)
if verbose:
print('serving {slot} slots in {form} forms from {orgn} orgns'.format(
**counts))
return app
def main(**kw):
app = createApp(dbpath='sqlite:///opentaxforms.sqlite3', **kw)
app.run()
if __name__ == "__main__":
main(cmdline=True, verbose=True)
| agpl-3.0 | 2,853,290,267,090,020,000 | 30.131313 | 79 | 0.576249 | false |
bbglab/wok | wok/core/flow/reader.py | 1 | 6997 | ###############################################################################
#
# Copyright 2009-2011, Universitat Pompeu Fabra
#
# This file is part of Wok.
#
# Wok is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Wok is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses
#
###############################################################################
import os.path
try:
from lxml import etree
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5+
import xml.etree.ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree
except ImportError:
import sys
sys.stderr.write("Failed to import ElementTree from any known place\n")
raise
from wok.config.data import DataElement, Data
from wok.core.flow.model import *
def str_to_bool(s):
s2b = {
"0" : False, "1" : True,
"no" : False, "yes" : True,
"false" : False, "true" : True}
if s in s2b:
return s2b[s]
else:
return False
class FlowReader(object):
def __init__(self, source):
if isinstance(source, basestring):
self.path = os.path.abspath(source)
self.fp = open(source, "r")
else:
self.path = None
self.fp = source
self.__doc = None
def __read_doc(self):
if self.__doc is None:
self.__doc = etree.parse(self.fp)
return self.__doc
def read_meta(self):
doc = self.__read_doc()
root = doc.getroot()
if root.tag != "flow":
raise Exception("<flow> expected but <{}> found".format(xmle.tag))
name = root.attrib.get("name")
library = root.attrib.get("library")
version = root.attrib.get("version")
return (name, library, version)
def read(self):
doc = self.__read_doc()
root = doc.getroot()
flow = self._parse_flow(root)
if self.path:
flow.path = self.path
return flow
def _parse_base_desc(self, xmle, obj):
if "name" not in xmle.attrib:
raise Exception("'name' attribute not found in tag <{}>".format(xmle.tag))
obj.name = xmle.attrib["name"]
obj.title = xmle.findtext("title")
obj.desc = xmle.findtext("desc")
if "enabled" in xmle:
obj.enabled = str_to_bool(xmle.attr["enabled"])
def _parse_base_port(self, xmle, obj):
self._parse_base_desc(xmle, obj)
if "serializer" in xmle.attrib:
obj.serializer = xmle.attrib["serializer"]
if "wsize" in xmle.attrib:
try:
obj.wsize = int(xmle.attrib["wsize"])
except:
raise Exception("At {} {}: 'wsize' should be a number greater than 0".format(xmle.tag, obj.name))
if obj.wsize < 1:
raise Exception("At {} {}: 'wsize' should be a number greater than 0".format(xmle.tag, obj.name))
def _parse_base_module(self, xmle, obj):
self._parse_base_port(xmle, obj)
if "maxpar" in xmle.attrib:
try:
obj.maxpar = int(xmle.attrib["maxpar"])
except:
raise Exception("At {} {}: 'maxpar' should be a number greater than 0".format(xmle.tag, obj.name))
if obj.maxpar < 1:
raise Exception("At {} {}: 'maxpar' should be a number greater than 0".format(xmle.tag, obj.name))
conf_xml = xmle.find("conf")
if conf_xml is not None:
obj.conf = self._parse_conf(conf_xml)
res_xml = xmle.find("resources")
if res_xml is not None:
obj.resources = self._parse_conf(res_xml)
for x in xmle.findall("param"):
obj.params += [self._parse_param(x)]
for x in xmle.findall("in"):
obj.add_in_port(self._parse_port(x))
for x in xmle.findall("out"):
obj.add_out_port(self._parse_port(x))
def _parse_flow(self, xmle):
if xmle.tag != "flow":
raise Exception("<flow> expected but <{}> found".format(xmle.tag))
flow = Flow(name = None)
self._parse_base_module(xmle, flow)
if "library" in xmle.attrib:
flow.library = xmle.attrib["library"]
if "version" in xmle.attrib:
flow.version = xmle.attrib["version"]
for xmle in xmle.findall("module"):
module = self._parse_module(flow, xmle)
# TODO check that there is no other module with the same name
flow.add_module(module)
return flow
def _parse_module(self, flow, xmle):
mod = Module(name = None)
self._parse_base_module(xmle, mod)
if "depends" in xmle.attrib:
depends = [d.strip() for d in xmle.attrib["depends"].split(",")]
mod.depends = [d for d in depends if len(d) > 0]
exec_xml = xmle.find("exec")
if exec_xml is None:
run_xml = xmle.find("run")
if run_xml is None:
flow_ref_xml = xmle.find("flow")
if flow_ref_xml is None:
raise Exception("Missing either <exec>, <run> or <flow> in module {}".format(mod.name))
else:
mod.flow_ref = self._parse_flow_ref(flow, mod, flow_ref_xml)
else:
mod.execution = self._parse_run(mod, run_xml)
else:
mod.execution = self._parse_exec(exec_xml)
return mod
def _parse_param(self, xmle):
raise Exception("Unimplemented")
def _parse_port(self, xmle):
if xmle.tag == "in":
mode = PORT_MODE_IN
elif xmle.tag == "out":
mode = PORT_MODE_OUT
port = Port(name = None, mode = mode)
self._parse_base_port(xmle, port)
if "link" in xmle.attrib:
link = [x.strip() for x in xmle.attrib["link"].split(",")]
port.link = [l for l in link if len(l) > 0]
return port
def _parse_conf(self, xmle):
return Data.from_xmle(xmle)
def _parse_exec(self, xmle):
execution = Exec()
if "launcher" in xmle.attrib:
execution.mode = xmle.attrib["launcher"].lower()
if execution.mode == "python":
execution.mode = "native"
execution.conf = Data.from_xmle(xmle)
return execution
def _parse_run(self, mod, xmle):
if xmle.text is None or len(xmle.text) == 0:
raise Exception("Missing script name for <run> in module {}".format(mod.name))
execution = Exec()
execution.mode = "native"
execution.conf = DataElement()
execution.conf["script_path"] = xmle.text
return execution
def _parse_flow_ref(self, flow, mod, xmle):
if xmle.text is None or len(xmle.text) == 0:
raise Exception("Missing flow name for <flow> in module {}".format(mod.name))
flow_ref = FlowRef()
pos = xmle.text.rfind(".")
if pos == -1 and flow.library is not None:
flow_ref.canonical_name = "{}.{}".format(flow.library, xmle.text)
else:
flow_ref.canonical_name = xmle.text
if "version" in xmle.attrib:
flow_ref.version = xmle.attrib["version"]
return flow_ref
def close(self):
self.fp.close()
| gpl-3.0 | 6,454,725,803,999,862,000 | 26.225681 | 102 | 0.641561 | false |
nacc/autotest | client/tests/kvm/tests/pci_hotplug.py | 1 | 8582 | import re
from autotest.client.shared import error
from autotest.client.virt import virt_utils, virt_vm, aexpect
def run_pci_hotplug(test, params, env):
"""
Test hotplug of PCI devices.
(Elements between [] are configurable test parameters)
1) PCI add a deivce (NIC / block)
2) Compare output of monitor command 'info pci'.
3) Compare output of guest command [reference_cmd].
4) Verify whether pci_model is shown in [pci_find_cmd].
5) Check whether the newly added PCI device works fine.
6) PCI delete the device, verify whether could remove the PCI device.
@param test: KVM test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
# Modprobe the module if specified in config file
module = params.get("modprobe_module")
if module:
session.cmd("modprobe %s" % module)
# Get output of command 'info pci' as reference
info_pci_ref = vm.monitor.info("pci")
# Get output of command as reference
reference = session.cmd_output(params.get("reference_cmd"))
tested_model = params.get("pci_model")
test_type = params.get("pci_type")
image_format = params.get("image_format_stg")
# Probe qemu to verify what is the supported syntax for PCI hotplug
cmd_output = vm.monitor.cmd("?")
if len(re.findall("\ndevice_add", cmd_output)) > 0:
cmd_type = "device_add"
elif len(re.findall("\npci_add", cmd_output)) > 0:
cmd_type = "pci_add"
else:
raise error.TestError("Unknow version of qemu")
# Determine syntax of drive hotplug
# __com.redhat_drive_add == qemu-kvm-0.12 on RHEL 6
if len(re.findall("\n__com.redhat_drive_add", cmd_output)) > 0:
drive_cmd_type = "__com.redhat_drive_add"
# drive_add == qemu-kvm-0.13 onwards
elif len(re.findall("\ndrive_add", cmd_output)) > 0:
drive_cmd_type = "drive_add"
else:
raise error.TestError("Unknow version of qemu")
# Probe qemu for a list of supported devices
devices_support = vm.monitor.cmd("%s ?" % cmd_type)
if cmd_type == "pci_add":
if test_type == "nic":
pci_add_cmd = "pci_add pci_addr=auto nic model=%s" % tested_model
elif test_type == "block":
image_params = params.object_params("stg")
image_filename = virt_utils.get_image_filename(image_params,
test.bindir)
pci_add_cmd = ("pci_add pci_addr=auto storage file=%s,if=%s" %
(image_filename, tested_model))
# Execute pci_add (should be replaced by a proper monitor method call)
add_output = vm.monitor.cmd(pci_add_cmd)
if not "OK domain" in add_output:
raise error.TestFail("Add PCI device failed. "
"Monitor command is: %s, Output: %r" %
(pci_add_cmd, add_output))
after_add = vm.monitor.info("pci")
elif cmd_type == "device_add":
driver_id = test_type + "-" + virt_utils.generate_random_id()
device_id = test_type + "-" + virt_utils.generate_random_id()
if test_type == "nic":
if tested_model == "virtio":
tested_model = "virtio-net-pci"
pci_add_cmd = "device_add id=%s,driver=%s" % (device_id,
tested_model)
elif test_type == "block":
image_params = params.object_params("stg")
image_filename = virt_utils.get_image_filename(image_params,
test.bindir)
controller_model = None
if tested_model == "virtio":
tested_model = "virtio-blk-pci"
if tested_model == "scsi":
tested_model = "scsi-disk"
controller_model = "lsi53c895a"
if len(re.findall(controller_model, devices_support)) == 0:
raise error.TestError("scsi controller device (%s) not "
"supported by qemu" %
controller_model)
if controller_model is not None:
controller_id = "controller-" + device_id
controller_add_cmd = ("device_add %s,id=%s" %
(controller_model, controller_id))
vm.monitor.cmd(controller_add_cmd)
if drive_cmd_type == "drive_add":
driver_add_cmd = ("drive_add auto "
"file=%s,if=none,id=%s,format=%s" %
(image_filename, driver_id, image_format))
elif drive_cmd_type == "__com.redhat_drive_add":
driver_add_cmd = ("__com.redhat_drive_add "
"file=%s,format=%s,id=%s" %
(image_filename, image_format, driver_id))
pci_add_cmd = ("device_add id=%s,driver=%s,drive=%s" %
(device_id, tested_model, driver_id))
vm.monitor.cmd(driver_add_cmd)
# Check if the device is support in qemu
if len(re.findall(tested_model, devices_support)) > 0:
add_output = vm.monitor.cmd(pci_add_cmd)
else:
raise error.TestError("%s doesn't support device: %s" %
(cmd_type, tested_model))
after_add = vm.monitor.info("pci")
if not device_id in after_add:
raise error.TestFail("Add device failed. Monitor command is: %s"
". Output: %r" % (pci_add_cmd, add_output))
# Define a helper function to delete the device
def pci_del(ignore_failure=False):
if cmd_type == "pci_add":
result_domain, bus, slot, function = add_output.split(',')
domain = int(result_domain.split()[2])
bus = int(bus.split()[1])
slot = int(slot.split()[1])
pci_addr = "%x:%x:%x" % (domain, bus, slot)
cmd = "pci_del pci_addr=%s" % pci_addr
elif cmd_type == "device_add":
cmd = "device_del %s" % device_id
# This should be replaced by a proper monitor method call
vm.monitor.cmd(cmd)
def device_removed():
after_del = vm.monitor.info("pci")
return after_del != after_add
if (not virt_utils.wait_for(device_removed, 10, 0, 1)
and not ignore_failure):
raise error.TestFail("Failed to hot remove PCI device: %s. "
"Monitor command: %s" %
(tested_model, cmd))
try:
# Compare the output of 'info pci'
if after_add == info_pci_ref:
raise error.TestFail("No new PCI device shown after executing "
"monitor command: 'info pci'")
# Define a helper function to compare the output
def new_shown():
o = session.cmd_output(params.get("reference_cmd"))
return o != reference
secs = int(params.get("wait_secs_for_hook_up"))
if not virt_utils.wait_for(new_shown, 30, secs, 3):
raise error.TestFail("No new device shown in output of command "
"executed inside the guest: %s" %
params.get("reference_cmd"))
# Define a helper function to catch PCI device string
def find_pci():
o = session.cmd_output(params.get("find_pci_cmd"))
return params.get("match_string") in o
if not virt_utils.wait_for(find_pci, 30, 3, 3):
raise error.TestFail("PCI %s %s device not found in guest. "
"Command was: %s" %
(tested_model, test_type,
params.get("find_pci_cmd")))
# Test the newly added device
try:
session.cmd(params.get("pci_test_cmd"))
except aexpect.ShellError, e:
raise error.TestFail("Check for %s device failed after PCI "
"hotplug. Output: %r" % (test_type, e.output))
session.close()
except Exception:
pci_del(ignore_failure=True)
raise
else:
pci_del()
| gpl-2.0 | -7,138,173,694,472,886,000 | 41.275862 | 79 | 0.53554 | false |
cshtarkov/autobump | tests/test_java_handler.py | 1 | 10110 | import os
import tempfile
import unittest
from autobump.handlers import java_ast
from autobump.handlers import java_native
class TestJavaHandlerBase(unittest.TestCase):
"""Used to set up a simple Java codebase in a temporary
location.
Does not contain any actual tests. Test cases
are meant to inherit from this class so that they
all have a non-trivial fixture."""
@classmethod
def setUpClass(cls):
sources = [
("packageX/ClassA.java",
"""
package packageX;
public class ClassA {
public void returnsVoid() {}
public ClassB returnsB(ClassB b) { return b; }
private void privateReturnsVoid() {}
}
"""),
("packageX/ClassB.java",
"""
package packageX;
public class ClassB {
private static final int ONE = 1;
public static final int FIVE = 5;
public ClassA returnsB(ClassA b) { return b; }
}
"""),
("packageY/ClassC.java",
"""
package packageY;
import packageX.ClassA;
import packageX.ClassB;
public class ClassC extends ClassA {
public void overloaded(int a) {}
public void overloaded(int a, ClassA b) {}
public void overloaded(int a, ClassB b) {}
public class Inner {
}
}
"""),
("packageY/InterfaceD.java",
"""
package packageY;
public interface InterfaceD {
public static final boolean INTERFACE_D = true;
}
"""),
("packageY/InterfaceE.java",
"""
package packageY;
public interface InterfaceE {
}
"""),
("packageY/InterfaceX.java",
"""
package packageY;
public interface InterfaceX extends InterfaceD, InterfaceE {
}
"""),
("packageY/ClassD.java",
"""
package packageY;
import packageX.ClassA;
public class ClassD extends ClassC implements InterfaceD {
public static void acceptsClassD(ClassD p) {}
public static void acceptsIfaceD(InterfaceD p) {}
public static void acceptsIfaceE(InterfaceE p) {}
public static void acceptsIfaceX(InterfaceX p) {}
public static void acceptsClassA(ClassA p) {}
public static void acceptsClassC(ClassC p) {}
public static void acceptsArrayClassC(ClassC[] p) {}
public static void acceptsArrayClassA(ClassA[] p) {}
}
""")
]
cls.dir_handle = tempfile.TemporaryDirectory()
cls.dir = cls.dir_handle.name
# Write the above class definitions to source files.
files = []
for filename, source in sources:
fullpath = os.path.join(cls.dir, filename)
files.append(fullpath)
os.makedirs(os.path.dirname(fullpath), exist_ok=True)
with open(fullpath, "w") as f:
f.write(source)
# Get two codebases for the two different handlers.
cls.codebase_ast = java_ast.codebase_to_units(cls.dir)
cls.codebase_native = java_native.codebase_to_units(cls.dir, 'javac `find -name "*.java" | xargs`', '.')
# By default, run the java_ast handler tests.
# The java_native handler will need to override setUp()
# and reassign cls.codebase.
cls.codebase = cls.codebase_ast
@classmethod
def tearDownClass(cls):
cls.dir_handle.cleanup()
def setUp(self):
self.codebase = self.__class__.codebase
self.codebase_ast = self.__class__.codebase_ast
class TestClassesAST(TestJavaHandlerBase):
def test_class_names(self):
self.assertTrue("packageX.ClassA" in self.codebase)
self.assertTrue("packageX.ClassB" in self.codebase)
self.assertTrue("packageY.ClassC" in self.codebase)
self.assertTrue("packageY.InterfaceD" in self.codebase)
self.assertTrue("packageY.ClassD" in self.codebase)
def test_class_functions(self):
self.assertTrue("returnsVoid" in self.codebase["packageX.ClassA"].functions)
self.assertFalse("privateReturnsVoid" in self.codebase["packageX.ClassA"].functions)
def test_class_fields(self):
self.assertTrue("FIVE" in self.codebase["packageX.ClassB"].fields)
self.assertFalse("ONE" in self.codebase["packageX.ClassB"].fields)
def test_inner_class(self):
self.assertTrue("Inner" in self.codebase["packageY.ClassC"].units)
class TestClassesNative(TestClassesAST):
def setUp(self):
super(TestClassesNative, self).setUp()
self.codebase = self.codebase_native
# java_ast and java_native disagree on what inner classes should be called,
# so we need to override this test.
def test_inner_class(self):
self.assertTrue("packageY.ClassC$Inner" in self.codebase["packageY.ClassC"].units)
class TestMethodOverloadingAST(TestJavaHandlerBase):
def test_overloading_possible(self):
self.assertEqual(len(self.codebase["packageY.ClassC"].functions["overloaded"].signatures), 3)
def test_additional_parameter(self):
function = self.codebase["packageY.ClassC"].functions["overloaded"]
self.assertTrue(any(len(sig.parameters) == 3 and sig.parameters[2].type.name == "packageX.ClassA" for sig in function.signatures))
def test_parameter_different_type(self):
function = self.codebase["packageY.ClassC"].functions["overloaded"]
self.assertTrue(any(len(sig.parameters) == 3 and sig.parameters[2].type.name == "packageX.ClassB" for sig in function.signatures))
class TestMethodOverloadingNative(TestMethodOverloadingAST):
def setUp(self):
super(TestMethodOverloadingNative, self).setUp()
self.codebase = self.codebase_native
class TestTypesAST(TestJavaHandlerBase):
def test_type_and_array_of_type_are_different(self):
t = self.codebase["packageY.ClassD"].functions["acceptsClassA"].signatures[0].parameters[1].type
arrayT = self.codebase["packageY.ClassD"].functions["acceptsArrayClassA"].signatures[0].parameters[1].type
self.assertFalse(t.is_compatible(arrayT))
self.assertFalse(arrayT.is_compatible(t))
def test_superclass_compatible_with_subclass(self):
superclass = self.codebase["packageY.ClassD"].functions["acceptsClassA"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsClassC"].signatures[0].parameters[1].type
self.assertTrue(superclass.is_compatible(subclass))
def test_superclass_array_compatible_with_subclass(self):
superclass = self.codebase["packageY.ClassD"].functions["acceptsArrayClassA"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsArrayClassC"].signatures[0].parameters[1].type
self.assertTrue(superclass.is_compatible(subclass))
def test_subclass_not_compatible_with_superclass(self):
superclass = self.codebase["packageY.ClassD"].functions["acceptsClassA"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsClassC"].signatures[0].parameters[1].type
self.assertFalse(subclass.is_compatible(superclass))
def test_subclass_array_not_compatible_with_superclass(self):
superclass = self.codebase["packageY.ClassD"].functions["acceptsArrayClassA"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsArrayClassC"].signatures[0].parameters[1].type
self.assertFalse(subclass.is_compatible(superclass))
def test_superclass_compatible_with_subclass_skip_one(self):
superclass = self.codebase["packageY.ClassD"].functions["acceptsClassA"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsClassD"].signatures[0].parameters[1].type
self.assertTrue(superclass.is_compatible(subclass))
def test_interface_compatible_with_class(self):
interface = self.codebase["packageY.ClassD"].functions["acceptsIfaceD"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsClassD"].signatures[0].parameters[1].type
self.assertTrue(interface.is_compatible(subclass))
def test_class_not_compatible_with_interface(self):
interface = self.codebase["packageY.ClassD"].functions["acceptsIfaceD"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsClassD"].signatures[0].parameters[1].type
self.assertFalse(subclass.is_compatible(interface))
def test_interface_extension(self):
interface1 = self.codebase["packageY.ClassD"].functions["acceptsIfaceD"].signatures[0].parameters[1].type
interface2 = self.codebase["packageY.ClassD"].functions["acceptsIfaceE"].signatures[0].parameters[1].type
subclass = self.codebase["packageY.ClassD"].functions["acceptsIfaceX"].signatures[0].parameters[1].type
self.assertTrue(interface1.is_compatible(subclass))
self.assertTrue(interface2.is_compatible(subclass))
class TestTypesNative(TestTypesAST):
def setUp(self):
super(TestTypesNative, self).setUp()
self.codebase = self.codebase_native
# Need to patch the 'location' of every type - i.e.
# where TypeCompatibilityChecker will try to find the
# class files.
for function in self.codebase["packageY.ClassD"].functions.values():
for signature in function.signatures:
for parameter in signature.parameters:
parameter.type.location = self.dir
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | -4,486,002,600,987,291,600 | 39.44 | 138 | 0.646785 | false |
arruda/rmr | rmr/apps/accounts/migrations/0001_initial.py | 1 | 4385 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserProfile'
db.create_table('accounts_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('quota', self.gf('django.db.models.fields.DecimalField')(default='0', null=True, max_digits=10, decimal_places=2, blank=True)),
))
db.send_create_signal('accounts', ['UserProfile'])
def backwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table('accounts_userprofile')
models = {
'accounts.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quota': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts'] | mit | 4,301,774,263,361,745,000 | 61.657143 | 182 | 0.560547 | false |
Dev-Cloud-Platform/Dev-Cloud | dev_cloud/cc1/src/wi/tests/registration_test.py | 1 | 3217 | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
# -*- coding: utf-8 -*-
"""@package src.wi.tests.registration_test
@author Piotr Wójcik
@author Krzysztof Danielowski
@date 11.10.2012
"""
from wi.tests import WiTestCase
import unittest
import random
class RegistrationTests(WiTestCase, unittest.TestCase):
def _fill_common_data(self, field_key=None, field_value=None):
driver = self.driver
self.base_url = self.TEST_SERVER
driver.get(self.base_url + "/registration/register/")
self.change_language()
self.wait_for_text("//div[@id='content']/div[2]/div/div[5]/h2", ["Registration"])
driver.find_element_by_id("id_login").clear()
driver.find_element_by_id("id_login").send_keys("witest" + str(random.randint(1, 100000)))
driver.find_element_by_id("id_first").clear()
driver.find_element_by_id("id_first").send_keys("test")
driver.find_element_by_id("id_last").clear()
driver.find_element_by_id("id_last").send_keys("test")
driver.find_element_by_id("id_organization").clear()
driver.find_element_by_id("id_organization").send_keys("test")
driver.find_element_by_id("id_email").clear()
driver.find_element_by_id("id_email").send_keys("witest" + str(random.randint(1, 100000)) + "@witest.pl")
driver.find_element_by_id("id_new_password").clear()
driver.find_element_by_id("id_new_password").send_keys("test1")
driver.find_element_by_id("id_password2").clear()
driver.find_element_by_id("id_password2").send_keys("test1")
if field_key is not None:
driver.find_element_by_id(field_key).clear()
driver.find_element_by_id(field_key).send_keys(field_value)
driver.find_element_by_css_selector("input.big_button").click()
@unittest.skip('a')
def test_1_registration_success(self):
driver = self.driver
self._fill_common_data()
self.assertEqual("Registration success - Registration - CC1", driver.title)
def test_2_registration_login_duplicate(self):
self._fill_common_data("id_login", self.TEST_USER['login'])
self.wait_for_text("//form[@id='registration-form']/fieldset/div/ul/li",
["A user with that login already exists."])
def test_3_registration_wrong_email(self):
self._fill_common_data("id_email", "witest" + str(random.randint(1, 100000)) + "@witestpl")
self.wait_for_text("//form[@id='registration-form']/fieldset/div/ul/li", ["Enter a valid email address."])
| apache-2.0 | 5,230,412,575,024,754,000 | 40.230769 | 114 | 0.662313 | false |
gustavovaliati/ci724-ppginfufpr-2016 | knn-02-06/knn-cuda_notworking_yet.py | 1 | 7979 | #!/usr/bin/python
import datetime, argparse, sys, os
import numpy as np
from multiprocessing import Pool
import pycuda.driver as drv
import pycuda.tools
import pycuda.autoinit
import numpy.linalg as la
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
ap = argparse.ArgumentParser()
ap.add_argument("-tr", "--train", required = True, help = "Is the training dataset path.")
ap.add_argument("-te", "--test", required = True, help = "Is the testing dataset path.")
ap.add_argument("-k", required = True, help = "Is K for the KNN algorithm.")
ap.add_argument("-lte", "--limit-test", required = False, help = "Sets a limit for how many testing sets must be used instead of the whole file.")
ap.add_argument("-ltr", "--limit-train", required = False, help = "Sets a limit for how many training sets must be used instead of the whole file.")
args = vars(ap.parse_args())
train_file_path = args["train"]
test_file_path = args["test"]
k_number = int(args["k"])
test_calculation_limit = False
test_calculation_limit_arg = args["limit_test"]
if (test_calculation_limit_arg):
test_calculation_limit = int(test_calculation_limit_arg)
else:
print "Be aware you didn't set a limit for the testing set. We are going to test all."
train_calculation_limit = False
train_calculation_limit_arg = args["limit_train"]
if (train_calculation_limit_arg):
train_calculation_limit = int(train_calculation_limit_arg)
else:
print "Be aware you didn't set a limit for the training set. We are going to use it all."
############
# STATIC PARAMETERS
############
classes = 10 #todo remove harded coded.
confusion_matrix = np.zeros((classes,classes), dtype=np.int)
result_error = 0
result_rejection = 0
total_testing = 0
total_training = 0
process_number = 4
############
#LOAD TRAINING FILE
############
train_file = open(train_file_path, "r")
print "Reading file: ", train_file_path
header = train_file.readline().split(" ")
train_number_lines = int(header[0])
number_features = int(header[1])
print "Lines {} | Features {}".format(train_number_lines, number_features)
if train_calculation_limit:
print "We are limiting to {} training sets.".format(train_calculation_limit)
if train_number_lines > train_calculation_limit:
total_training = train_calculation_limit
else:
print "\nERROR: the training limit is bigger than the actual number of testing sets."
sys.exit()
else:
total_training = train_number_lines
train_features = []
train_real_class = []
train_guessed_class = []
for train_index, features in enumerate(train_file):
if train_calculation_limit and train_index >= train_calculation_limit:
break
features = features.split(" ")
features_class = features.pop(number_features)
# features = np.array(map(float, features))
features = np.array(features, dtype=np.float32)
features_class = int(features_class.replace("\n",""))
train_features.append(features)
train_real_class.append(features_class)
############
#LOAD TEST FILE
############
test_file = open(test_file_path, "r")
print "Reading file: ", test_file_path
header = test_file.readline().split(" ")
test_number_lines = int(header[0])
number_features = int(header[1])
print "Lines {} | Features {}".format(test_number_lines, number_features)
if test_calculation_limit:
print "We are limiting to {} testing sets.".format(test_calculation_limit)
if test_number_lines > test_calculation_limit:
total_testing = test_calculation_limit
else:
print "\nERROR: the testing limit is bigger than the actual number of testing sets."
sys.exit()
else:
total_testing = test_number_lines
test_features = []
test_real_class = []
test_guessed_class = []
test_processed_lines = 0
for test_index, features in enumerate(test_file):
if test_calculation_limit and test_index >= test_calculation_limit:
break
features = features.split(" ")
features_class = features.pop(number_features)
# features = np.array(map(float, features))
features = np.array(features, dtype=np.float32)
features_class = int(features_class.replace("\n",""))
test_features.append(features)
test_real_class.append(features_class)
############
# CALCULATION
############
def print_summary(tested):
valid_total = tested - result_rejection
time_end = datetime.datetime.now()
print "Calculation time: {}".format(time_end - time_start)
if valid_total > 0:
correct = (valid_total - result_error) * 100.0 / valid_total
else:
correct = 0.0
print "Tested {} | Error: {} | Rejection {} | Correct {} %".format(tested, result_error, result_rejection, correct)
print confusion_matrix
def calc_distance(test_feat_index, train_feat_index):
# a = test_features[test_feat_index][:3]
# b = train_features[train_feat_index][:3]
# a = np.array(test_features[test_feat_index], dtype=np.float32)
# b = np.array(train_features[train_feat_index], dtype=np.float32)
# print a, a.shape
# print b, b.shape
a_gpu = gpuarray.to_gpu(np.array([0.4, 0.5, 0.6]))
b_gpu = gpuarray.to_gpu(np.array([0.1, 0.2, 0.3]))
# a_gpu = gpuarray.to_gpu(test_features[test_feat_index])
# b_gpu = gpuarray.to_gpu(train_features[train_feat_index])
# a_gpu = gpuarray.to_gpu(np.random.randn(132).astype(np.float32))
# b_gpu = gpuarray.to_gpu(np.random.randn(132).astype(np.float32))
a_doubled = ((a_gpu - b_gpu)**2).get()
return pycuda.gpuarray.sum(a_doubled)
# return 1
def calc_train(start, end, test_feat_index):
# print "pid", os.getpid(), start, end
# current_ranking = np.zeros(0)
current_ranking = np.zeros(0, dtype=np.float32)
dictionary = {}
for index in range(start, end):
distance = calc_distance(test_feat_index, index)
if current_ranking.size >= k_number:
# dictionary.pop(current_ranking[k_number-1])
current_ranking = np.delete(current_ranking, k_number-1, 0)
current_ranking = np.append(current_ranking, distance)
# print distance
dictionary[distance] = train_real_class[index]
current_ranking = np.sort(current_ranking, kind="mergesort")
# print current_ranking, dictionary
new_dic = {}
for r in current_ranking:
new_dic[r] = dictionary[r]
return new_dic, current_ranking
time_start = datetime.datetime.now()
offset = int(total_training / process_number)
pool = Pool(processes=process_number)
for test_index, test_feat in enumerate(test_features):
start = 0
workers = []
for i in range(process_number):
end = start+offset
worker = pool.apply_async(calc_train, (start, end, test_index))
workers.append(worker)
start = end
# print "workers",workers
k_ranking_dict = {}
ranking = []
for worker in workers:
d, r = worker.get()
ranking = np.concatenate((ranking, r))
k_ranking_dict.update(d)
ranking = np.sort(ranking, kind="mergesort")
ranking = ranking[0:k_number]
# print "here",ranking, k_ranking_dict
to_count_array = []
for r in ranking:
# print "k_ranking_dict[key]",k_ranking_dict[key]
to_count_array.append(k_ranking_dict[r])
counting = np.bincount(to_count_array)
guessed_class = np.argmax(counting)
guessed_counter = counting[guessed_class]
counting = np.delete(counting, guessed_class)
if guessed_counter in counting:
result_rejection = result_rejection + 1
continue
real_class = test_real_class[test_index]
confusion_matrix[real_class,guessed_class] = confusion_matrix[real_class,guessed_class] + 1
# print real_class, guessed_class
if real_class != guessed_class:
result_error = result_error + 1
print_summary(test_index+1)
############
# END - PRESENT RESULTS
############
print "\n FINISHED. Final summary: \n"
tested = len(test_features)
print_summary(tested)
| gpl-3.0 | 4,105,071,858,557,358,000 | 30.788845 | 148 | 0.6665 | false |
clembou/PCWG | tests/interpolators_test.py | 1 | 4393 | import pcwg.core.interpolators as interpolators
import unittest
from pcwg.core.binning import Bins
class TestMarmanderPowerCurveInterpolator(unittest.TestCase):
def test_spreadsheet_benchmark(self):
x = [1.00,
2.00,
3.00,
4.10,
5.06,
6.04,
7.00,
8.00,
9.01,
9.98,
10.97,
12.00,
12.99,
13.95,
14.99,
16.01,
16.98,
17.84,
19.00,
20.00,
21.00,
22.00,
23.00,
24.00,
25.00,
26.00,
27.00,
28.00,
29.00,
30.00]
y = [0.0,
0.0,
0.0,
70.5,
198.6,
373.7,
578.4,
886.4,
1177.2,
1523.2,
1792.2,
1918.3,
1955.4,
1976.0,
1976.0,
1981.7,
1982.9,
1982.2,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
0.0,
0.0,
0.0,
0.0,
0.0]
cutOutWindSpeed = 25.0
limits = Bins(0.0,1.0, 30.0).limits
expectedX = [1.00,
2.00,
3.00,
3.55,
4.10,
5.06,
6.04,
7.00,
8.00,
9.01,
9.98,
10.97,
12.00,
12.99,
13.95,
14.99,
16.01,
16.98,
17.84,
18.42,
19.00,
20.00,
21.00,
22.00,
23.00,
24.00,
25.00,
25.01,
26.00,
27.00]
expectedY = [0.0,
0.0,
0.0,
0.0,
91.8,
204.2,
383.9,
571.1,
893.5,
1173.5,
1522.4,
1794.9,
1922.8,
1954.5,
1977.4,
1975.1,
1982.2,
1983.4,
1979.4,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
1987.4,
0.0,
0.0,
0.0
]
interpolator = interpolators.MarmanderPowerCurveInterpolator(x, y, cutOutWindSpeed, xLimits = limits, debug = False)
if interpolator.debug:
print interpolator.debugText
print "Cen\tExpect\tAct\tError\tTolerance\tMatch"
#NNOTE: a relative large tolerance required to make test pass.
#This is understood to be associated with differences
#between the cubic interpolation scheme implemented
#in the excel benchmark and scipy.
#TODO: Further work to bottom out on this difference.
for i in range(len(expectedX)):
if expectedX[i] < 6.0:
tolerancePercent = 0.02
else:
tolerancePercent = 0.005
actual = interpolator(expectedX[i])
error = actual - expectedY[i]
if expectedY[i] != 0.0:
errorPercent = (actual - expectedY[i]) / expectedY[i]
else:
errorPercent = 0.0
match = (abs(errorPercent) <= tolerancePercent)
print "{0:.2f}\t{1:.2f}\t{2:.2f}\t{3:.2f}%\t{4:.2f}%\t{5}".format(expectedX[i], expectedY[i], actual, (errorPercent * 100.0), (tolerancePercent * 100.0), match)
self.assertTrue(match)
if __name__ == '__main__':
unittest.main()
| mit | 3,842,213,902,350,668,000 | 24.247126 | 172 | 0.332575 | false |
Azure/azure-sdk-for-python | sdk/keyvault/azure-keyvault-keys/samples/backup_restore_operations_async.py | 1 | 3785 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import asyncio
import os
from azure.keyvault.keys.aio import KeyClient
from azure.identity.aio import DefaultAzureCredential
from azure.core.exceptions import HttpResponseError
# ----------------------------------------------------------------------------------------------------------
# Prerequisites:
# 1. An Azure Key Vault (https://docs.microsoft.com/en-us/azure/key-vault/quick-create-cli)
#
# 2. azure-keyvault-keys and azure-identity libraries (pip install these)
#
# 3. Set Environment variables AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET, VAULT_URL
# (See https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/keyvault/azure-keyvault-keys#authenticate-the-client)
#
# ----------------------------------------------------------------------------------------------------------
# Sample - demonstrates the basic backup and restore operations on a vault(key) resource for Azure Key Vault
#
# 1. Create a key (create_key)
#
# 2. Backup a key (backup_key)
#
# 3. Delete a key (delete_key)
#
# 4. Purge a key (purge_deleted_key)
#
# 5. Restore a key (restore_key_backup)
# ----------------------------------------------------------------------------------------------------------
async def run_sample():
# Instantiate a key client that will be used to call the service.
# Notice that the client is using default Azure credentials.
# To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID',
# 'AZURE_CLIENT_SECRET' and 'AZURE_TENANT_ID' are set with the service principal credentials.
VAULT_URL = os.environ["VAULT_URL"]
credential = DefaultAzureCredential()
client = KeyClient(vault_url=VAULT_URL, credential=credential)
try:
# Let's create a Key of type RSA.
# if the key already exists in the Key Vault, then a new version of the key is created.
print("\n.. Create Key")
key = await client.create_key("keyName", "RSA")
print("Key with name '{0}' created with key type '{1}'".format(key.name, key.key_type))
# Backups are good to have, if in case keys gets deleted accidentally.
# For long term storage, it is ideal to write the backup to a file.
print("\n.. Create a backup for an existing Key")
key_backup = await client.backup_key(key.name)
print("Backup created for key with name '{0}'.".format(key.name))
# The rsa key is no longer in use, so you delete it.
deleted_key = await client.delete_key(key.name)
print("Deleted key with name '{0}'".format(deleted_key.name))
# Purge the deleted key.
# The purge will take some time, so wait before restoring the backup to avoid a conflict.
print("\n.. Purge the key")
await client.purge_deleted_key(key.name)
await asyncio.sleep(60)
print("Purged key with name '{0}'".format(deleted_key.name))
# In the future, if the key is required again, we can use the backup value to restore it in the Key Vault.
print("\n.. Restore the key using the backed up key bytes")
key = await client.restore_key_backup(key_backup)
print("Restored key with name '{0}'".format(key.name))
except HttpResponseError as e:
print("\nrun_sample has caught an error. {0}".format(e.message))
finally:
print("\nrun_sample done")
await credential.close()
await client.close()
if __name__ == "__main__":
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_sample())
loop.close()
except Exception as e:
print("Top level Error: {0}".format(str(e)))
| mit | -6,305,333,877,761,251,000 | 43.011628 | 123 | 0.60317 | false |
Habitissimo/vespapp-web | vespapp/settings.py | 1 | 4155 | """
Django settings for vespapp project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
from decouple import config
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_DIR = os.path.dirname(os.path.realpath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@7xvv!(2p469*!j7d)g^tc-e*$spkkj+g#rly6m+b7%p3hift&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
BASE_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
VENDOR_APPS = (
'rest_framework',
)
MY_APPS = (
'web',
'api',
)
INSTALLED_APPS = BASE_APPS + VENDOR_APPS + MY_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vespapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vespapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
if not DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config("DB_NAME"),
'USER': config("DB_USER"),
'PASSWORD': config("DB_PASSWORD", default='supersecret'),
'HOST': config("DB_HOST"),
'PORT': config("DB_PORT", default="5432"),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'es-ES'
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'staticfiles'),
)
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')
MEDIA_URL = '/media/'
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
)
}
| gpl-3.0 | 6,548,691,946,700,982,000 | 24.648148 | 91 | 0.662335 | false |
masschallenge/django-accelerator | accelerator/tests/contexts/judge_feedback_context.py | 1 | 13749 | from accelerator_abstract.models import (
FORM_ELEM_FEEDBACK_TO_MC,
FORM_ELEM_FEEDBACK_TO_STARTUP,
FORM_ELEM_OVERALL_RECOMMENDATION,
)
from accelerator.models import (
ACTIVE_PROGRAM_STATUS,
ASSIGNED_PANEL_ASSIGNMENT_STATUS,
COMPLETE_PANEL_ASSIGNMENT_STATUS,
FEEDBACK_DISPLAY_DISABLED as DISABLED,
FEEDBACK_DISPLAY_ENABLED as ENABLED,
IN_PERSON_JUDGING_ROUND_TYPE,
ONLINE_JUDGING_ROUND_TYPE,
JUDGING_FEEDBACK_STATUS_INCOMPLETE as INCOMPLETE,
PREVIEW_PANEL_STATUS,
SUBMITTED_APP_STATUS,
UserRole,
)
from accelerator.tests.factories import (
ApplicationAnswerFactory,
ApplicationFactory,
ApplicationPanelAssignmentFactory,
ExpertFactory,
JudgeApplicationFeedbackFactory,
JudgeFeedbackComponentFactory,
JudgePanelAssignmentFactory,
JudgeRoundCommitmentFactory,
JudgingFormElementFactory,
PanelFactory,
ProgramCycleFactory,
ProgramRoleFactory,
ProgramRoleGrantFactory,
ScenarioFactory,
StartupCycleInterestFactory,
StartupProgramInterestFactory,
)
from accelerator.tests.contexts.context_utils import get_user_role_by_name
from .judging_round_context import JudgingRoundContext
ELEMENT_NAMES = [
FORM_ELEM_OVERALL_RECOMMENDATION,
FORM_ELEM_FEEDBACK_TO_STARTUP,
FORM_ELEM_FEEDBACK_TO_MC,
]
_round_type = {True: ONLINE_JUDGING_ROUND_TYPE,
False: IN_PERSON_JUDGING_ROUND_TYPE}
class JudgeFeedbackContext:
def __init__(self,
application=None,
num_components=1,
complete=True,
panel_status=PREVIEW_PANEL_STATUS,
display_feedback=False,
merge_feedback_with=None,
cycle_based_round=False,
online_round=True,
is_active=True,
judge_capacity=10,
program_status=ACTIVE_PROGRAM_STATUS):
self.judging_capacity = 0
if application:
self.application = application
self.cycle = application.cycle
else:
self.cycle = ProgramCycleFactory()
self.application = ApplicationFactory(
application_status=SUBMITTED_APP_STATUS,
application_type=self.cycle.default_application_type,
cycle=self.cycle)
self.application_type = self.application.application_type
self.applications = [self.application]
self.startup = self.application.startup
self.industry = self.startup.primary_industry
feedback_display = ENABLED if display_feedback else DISABLED
jr_kwargs = {
'program__cycle': self.cycle,
'round_type': _round_type[online_round],
'feedback_display': feedback_display,
'cycle_based_round': cycle_based_round,
'application_type': self.application_type,
'is_active': False,
'program__program_status': program_status,
}
if merge_feedback_with:
jr_kwargs['feedback_merge_with'] = merge_feedback_with
self.judging_round = JudgingRoundContext(**jr_kwargs).judging_round
self.program = self.judging_round.program
self.panel = PanelFactory(status=panel_status,
panel_time__judging_round=self.judging_round)
self.scenario = ScenarioFactory(judging_round=self.judging_round)
user_role = get_user_role_by_name(UserRole.JUDGE)
self.judge_role = ProgramRoleFactory(program=self.program,
user_role=user_role)
self.judges = []
self.judge = self.add_judge(complete=complete,
capacity=judge_capacity)
self.feedback = JudgeApplicationFeedbackFactory(
judge=self.judge,
application=self.application,
panel=self.panel,
form_type=self.judging_round.judging_form)
self.judging_form = self.feedback.form_type
self.application_assignment = ApplicationPanelAssignmentFactory(
application=self.application,
panel=self.panel,
scenario=self.scenario)
cycle_interest = StartupCycleInterestFactory(cycle=self.program.cycle,
startup=self.startup)
StartupProgramInterestFactory(program=self.program,
startup=self.startup,
startup_cycle_interest=cycle_interest,
applying=True,
order=1)
self.components = []
self.elements = []
self.application_questions = []
self.application_answers = []
for element_name in ELEMENT_NAMES:
self.add_component(element_name=element_name)
if complete:
self.feedback.save()
for _ in range(num_components):
self.add_component()
else:
for _ in range(num_components):
self.add_element()
self.judging_round.is_active = is_active
self.judging_round.save()
def add_application_answer(self, question=None, answer_text=None):
question = question or self.application_questions[0]
kwargs = {"application_question": question,
"application": self.application}
if answer_text:
kwargs["answer_text"] = answer_text
app_answer = ApplicationAnswerFactory(**kwargs)
self.application_answers.append(app_answer)
return app_answer
def add_component(self, element_name=None,
feedback_element=None,
add_answer=True,
answer_text=None):
factory_params = {
"judge_feedback": self.feedback, }
if feedback_element is None:
app_type_key = "__".join(["feedback_element",
"application_question",
"application_type"])
factory_params.update(
{
"feedback_element__form_type": self.judging_form,
"feedback_element__element_type": "feedback",
"feedback_element__mandatory": True,
"feedback_element__sharing": "share-with-startup",
app_type_key: self.application_type}
)
if element_name:
factory_params['feedback_element__element_name'] = element_name
else:
factory_params.update({"feedback_element": feedback_element})
if answer_text:
factory_params["answer_text"] = answer_text
component = JudgeFeedbackComponentFactory(
**factory_params)
self.components.append(component)
question = component.feedback_element.application_question
self.application_questions.append(question)
if add_answer:
app_answer = ApplicationAnswerFactory(
application_question=question,
application=self.application)
self.application_answers.append(app_answer)
if feedback_element is None:
self.elements.append(component.feedback_element)
self.feedback.save()
return component
def add_element(self,
feedback_type="",
element_type="feedback",
choice_layout="",
mandatory=True,
text_minimum=0,
text_minimum_units="",
answer_text=None,
text_limit=0,
text_limit_units=""):
element = JudgingFormElementFactory(
form_type=self.judging_form,
mandatory=mandatory,
element_type=element_type,
feedback_type=feedback_type,
choice_layout=choice_layout,
sharing="share-with-startup",
application_question__application_type=self.application_type,
text_minimum=text_minimum,
text_minimum_units=text_minimum_units,
text_limit=text_limit,
text_limit_units=text_limit_units,
)
application_question = element.application_question
self.application_questions.append(application_question)
answer_kwargs = {"application_question": application_question,
"application": self.application}
if answer_text:
answer_kwargs["answer_text"] = answer_text
application_answer = ApplicationAnswerFactory(**answer_kwargs)
self.application_answers.append(application_answer)
self.elements.append(element)
self.feedback.save()
return element
def add_extra_scenario(self):
return ScenarioFactory(judging_round=self.judging_round)
def add_panel(self):
return PanelFactory(
panel_time__judging_round=self.judging_round,
panel_type__judging_round=self.judging_round,
location__judging_round=self.judging_round)
def add_assignment(self,
judge=None,
panel=None,
scenario=None):
scenario = scenario or self.scenario
judge = judge or self.judge
panel = panel or self.panel
return JudgePanelAssignmentFactory(
judge=judge,
panel=panel,
scenario=scenario)
def add_feedback(self,
application=None,
judge=None,
panel=None,
feedback_status=INCOMPLETE):
judge = judge or self.judge
application = application or self.application
panel = panel or self.panel
if not panel.applicationpanelassignment_set.filter(
application=application).exists():
ApplicationPanelAssignmentFactory(
application=application,
panel=panel,
scenario=self.scenario)
return JudgeApplicationFeedbackFactory(
feedback_status=feedback_status,
judge=judge,
application=application,
panel=panel,
form_type=self.judging_round.judging_form)
def add_application(self,
application=None,
field=None,
option=None,
program=None):
program = program or self.program
if application is None:
fields = {
"application_status": SUBMITTED_APP_STATUS,
"application_type": self.application_type,
}
if field:
fields[field] = option
application = ApplicationFactory(**fields)
self.applications.append(application)
startup = application.startup
cycle_interest = StartupCycleInterestFactory(cycle=program.cycle,
startup=startup)
StartupProgramInterestFactory(program=program,
startup=startup,
startup_cycle_interest=cycle_interest,
applying=True,
order=1)
return application
def add_applications(self, count, field=None, options=[], programs=[]):
result = []
option_count = len(options)
option = None
program_count = len(programs)
program = None
for i in range(count):
if option_count > 0:
option = options[i % option_count]
if program_count > 0:
program = programs[i % program_count]
result.append(self.add_application(field=field,
option=option,
program=program))
return result
def add_judge(self,
assigned=True,
complete=True,
judge=None,
panel=None,
capacity=10):
if judge is None:
judge = ExpertFactory(
profile__primary_industry=self.industry,
profile__home_program_family=self.program.program_family)
ProgramRoleGrantFactory(person=judge, program_role=self.judge_role)
self.judging_round.confirmed_judge_label.users.add(judge)
JudgeRoundCommitmentFactory(judging_round=self.judging_round,
judge=judge,
capacity=10,
commitment_state=True)
self.judging_capacity += capacity
if assigned:
if complete:
status = COMPLETE_PANEL_ASSIGNMENT_STATUS
else:
status = ASSIGNED_PANEL_ASSIGNMENT_STATUS
JudgePanelAssignmentFactory(
judge=judge,
assignment_status=status,
panel=panel or self.panel,
scenario=self.scenario)
self.judges.append(judge)
return judge
@classmethod
def create_batch(cls, qty, *args, **kwargs):
if 'merge_feedback' in kwargs:
merge_feedback = kwargs.pop('merge_feedback')
else:
merge_feedback = False
contexts = [cls(*args, **kwargs)]
if merge_feedback:
kwargs['merge_feedback_with'] = contexts[0].judging_round
for _ in range(1, qty):
contexts.append(cls(*args, **kwargs))
return contexts
| mit | 4,584,632,868,787,122,000 | 38.852174 | 79 | 0.56695 | false |
superbatlc/dtailweb | phonegroups/migrations/0001_initial.py | 1 | 1870 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('systems', '0001_initial'),
('calls', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Phonegroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name=b'Nome')),
('code', models.CharField(max_length=10, verbose_name=b'Codice')),
('parent', models.ForeignKey(related_name='child_phonegroup_set', blank=True, to='phonegroups.Phonegroup', help_text=b'The father of this group', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PhonegroupCall',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('call', models.ForeignKey(to='calls.Call')),
('phonegroup', models.ForeignKey(to='phonegroups.Phonegroup')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PhonegroupExtension',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('extension', models.CharField(max_length=4)),
('phonegroup', models.ForeignKey(to='phonegroups.Phonegroup')),
('system', models.ForeignKey(to='systems.System')),
],
options={
},
bases=(models.Model,),
),
]
| gpl-2.0 | -5,535,782,326,201,843,000 | 36.4 | 174 | 0.536364 | false |
hiraditya/fool | tensorflow/scaling-up-ml-using-cmle.py | 1 | 6449 | '''
In this lab, you will learn how to:
Package up TensorFlow model
Run training locally
Run training on cloud
Deploy model to cloud
Invoke model to carry out predictions
'''
'''
Scaling up ML using Cloud ML Engine
In this notebook, we take a previously developed TensorFlow model to predict taxifare rides and package it up so that it can be run in Cloud MLE. For now, we'll run this on a small dataset. The model that was developed is rather simplistic, and therefore, the accuracy of the model is not great either. However, this notebook illustrates how to package up a TensorFlow model to run it within Cloud ML.
Later in the course, we will look at ways to make a more effective machine learning model.
Environment variables for project and bucket
Note that:
Your project id is the unique string that identifies your project (not the project name). You can find this from the GCP Console dashboard's Home page. My dashboard reads: Project ID: cloud-training-demos
Cloud training often involves saving and restoring model files. If you don't have a bucket already, I suggest that you create one from the GCP console (because it will dynamically check whether the bucket name you want is available). A common pattern is to prefix the bucket name by the project id, so that it is unique. Also, for cost reasons, you might want to use a single region bucket.
Change the cell below to reflect your Project ID and bucket name.
'''
import os
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# for bash
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.7' # Tensorflow version
%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%bash
PROJECT_ID=$PROJECT
AUTH_TOKEN=$(gcloud auth print-access-token)
SVC_ACCOUNT=$(curl -X GET -H "Content-Type: application/json" \
-H "Authorization: Bearer $AUTH_TOKEN" \
https://ml.googleapis.com/v1/projects/${PROJECT_ID}:getConfig \
| python -c "import json; import sys; response = json.load(sys.stdin); \
print response['serviceAccount']")
echo "Authorizing the Cloud ML Service account $SVC_ACCOUNT to access files in $BUCKET"
gsutil -m defacl ch -u $SVC_ACCOUNT:R gs://$BUCKET
gsutil -m acl ch -u $SVC_ACCOUNT:R -r gs://$BUCKET # error message (if bucket is empty) can be ignored
gsutil -m acl ch -u $SVC_ACCOUNT:W gs://$BUCKET
'''
Packaging up the code
Take your code and put into a standard Python package structure. model.py and task.py contain the Tensorflow code from earlier (explore the directory structure).
'''
!find taxifare
!cat taxifare/trainer/model.py
'''
Find absolute paths to your data
Note the absolute paths below. /content is mapped in Datalab to where the home icon takes you
'''
%bash
echo $PWD
rm -rf $PWD/taxi_trained
head -1 $PWD/taxi-train.csv
head -1 $PWD/taxi-valid.csv
'''
Running the Python module from the command-line
'''
%bash
rm -rf taxifare.tar.gz taxi_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare
python -m trainer.task \
--train_data_paths="${PWD}/taxi-train*" \
--eval_data_paths=${PWD}/taxi-valid.csv \
--output_dir=${PWD}/taxi_trained \
--train_steps=1000 --job-dir=./tmp
%bash
ls $PWD/taxi_trained/export/exporter/
%writefile ./test.json
{"pickuplon": -73.885262,"pickuplat": 40.773008,"dropofflon": -73.987232,"dropofflat": 40.732403,"passengers": 2}
%bash
model_dir=$(ls ${PWD}/taxi_trained/export/exporter)
gcloud ml-engine local predict \
--model-dir=${PWD}/taxi_trained/export/exporter/${model_dir} \
--json-instances=./test.json
'''
Running locally using gcloud
'''
%bash
rm -rf taxifare.tar.gz taxi_trained
gcloud ml-engine local train \
--module-name=trainer.task \
--package-path=${PWD}/taxifare/trainer \
-- \
--train_data_paths=${PWD}/taxi-train.csv \
--eval_data_paths=${PWD}/taxi-valid.csv \
--train_steps=1000 \
--output_dir=${PWD}/taxi_trained
'''
When I ran it (due to random seeds, your results will be different), the average_loss (Mean Squared Error) on the evaluation dataset was 187, meaning that the RMSE was around 13.
'''
from google.datalab.ml import TensorBoard
TensorBoard().start('./taxi_trained')
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print 'Stopped TensorBoard with pid {}'.format(pid)
'''
If the above step (to stop TensorBoard) appears stalled, just move on to the next step. You don't need to wait for it to return.
'''
!ls $PWD/taxi_trained
'''
Submit training job using gcloud
First copy the training data to the cloud. Then, launch a training job.
After you submit the job, go to the cloud console (http://console.cloud.google.com) and select Machine Learning | Jobs to monitor progress.
Note: Don't be concerned if the notebook stalls (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. Use the Cloud Console link (above) to monitor the job.
https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction
'''
%bash
echo $BUCKET
gsutil -m rm -rf gs://${BUCKET}/taxifare/smallinput/
gsutil -m cp ${PWD}/*.csv gs://${BUCKET}/taxifare/smallinput/
%%bash
OUTDIR=gs://${BUCKET}/taxifare/smallinput/taxi_trained
JOBNAME=lab3a_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/taxifare/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC \
--runtime-version=$TFVERSION \
-- \
--train_data_paths="gs://${BUCKET}/taxifare/smallinput/taxi-train*" \
--eval_data_paths="gs://${BUCKET}/taxifare/smallinput/taxi-valid*" \
--output_dir=$OUTDIR \
--train_steps=10000
Job [lab3a_180607_192245] submitted successfully.
Your job is still active. You may view the status of your job with the command (on google cloud consile)
$ gcloud ml-engine jobs describe lab3a_180607_192245
or continue streaming the logs with the command
$ gcloud ml-engine jobs stream-logs lab3a_180607_192245
Use the Cloud Console link to monitor the job and do NOT proceed until the job is done.
| mit | 2,726,381,697,759,011,000 | 36.71345 | 401 | 0.728485 | false |
wangyixiaohuihui/spark2-annotation | python/pyspark/mllib/stat/_statistics.py | 1 | 13703 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= '3':
basestring = str
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import Matrix, _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.stat.test import ChiSqTestResult, KolmogorovSmirnovTestResult
__all__ = ['MultivariateStatisticalSummary', 'Statistics']
class MultivariateStatisticalSummary(JavaModelWrapper):
"""
Trait for multivariate statistical summary of a data matrix.
"""
def mean(self):
return self.call("mean").toArray()
def variance(self):
return self.call("variance").toArray()
def count(self):
return int(self.call("count"))
def numNonzeros(self):
return self.call("numNonzeros").toArray()
def max(self):
return self.call("max").toArray()
def min(self):
return self.call("min").toArray()
def normL1(self):
return self.call("normL1").toArray()
def normL2(self):
return self.call("normL2").toArray()
class Statistics(object):
@staticmethod
def colStats(rdd):
"""
Computes column-wise summary statistics for the input RDD[Vector].
:param rdd: an RDD[Vector] for which column-wise summary statistics
are to be computed.
:return: :class:`MultivariateStatisticalSummary` object containing
column-wise summary statistics.
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([2, 0, 0, -2]),
... Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8])])
>>> cStats = Statistics.colStats(rdd)
>>> cStats.mean()
array([ 4., 4., 0., 3.])
>>> cStats.variance()
array([ 4., 13., 0., 25.])
>>> cStats.count()
3
>>> cStats.numNonzeros()
array([ 3., 2., 0., 3.])
>>> cStats.max()
array([ 6., 7., 0., 8.])
>>> cStats.min()
array([ 2., 0., 0., -2.])
"""
cStats = callMLlibFunc("colStats", rdd.map(_convert_to_vector))
return MultivariateStatisticalSummary(cStats)
@staticmethod
def corr(x, y=None, method=None):
"""
Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the input RDD is returned. Use C{method=}
to specify the method to be used for single RDD inout.
If two RDDs of floats are passed in, a single float is returned.
:param x: an RDD of vector for which the correlation matrix is to be computed,
or an RDD of float of the same cardinality as y when y is specified.
:param y: an RDD of float of the same cardinality as x.
:param method: String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`
:return: Correlation matrix comparing columns in x.
>>> x = sc.parallelize([1.0, 0.0, -2.0], 2)
>>> y = sc.parallelize([4.0, 5.0, 3.0], 2)
>>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2)
>>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7
True
>>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson")
True
>>> Statistics.corr(x, y, "spearman")
0.5
>>> from math import isnan
>>> isnan(Statistics.corr(x, zeros))
True
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])])
>>> pearsonCorr = Statistics.corr(rdd)
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
[[ 1. 0.05564149 NaN 0.40047142]
[ 0.05564149 1. NaN 0.91359586]
[ NaN NaN 1. NaN]
[ 0.40047142 0.91359586 NaN 1. ]]
>>> spearmanCorr = Statistics.corr(rdd, method="spearman")
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
[[ 1. 0.10540926 NaN 0.4 ]
[ 0.10540926 1. NaN 0.9486833 ]
[ NaN NaN 1. NaN]
[ 0.4 0.9486833 NaN 1. ]]
>>> try:
... Statistics.corr(rdd, "spearman")
... print("Method name as second argument without 'method=' shouldn't be allowed.")
... except TypeError:
... pass
"""
# Check inputs to determine whether a single value or a matrix is needed for output.
# Since it's legal for users to use the method name as the second argument, we need to
# check if y is used to specify the method name instead.
if type(y) == str:
raise TypeError("Use 'method=' to specify method name.")
if not y:
return callMLlibFunc("corr", x.map(_convert_to_vector), method).toArray()
else:
return callMLlibFunc("corr", x.map(float), y.map(float), method)
@staticmethod
@ignore_unicode_prefix
def chiSqTest(observed, expected=None):
"""
If `observed` is Vector, conduct Pearson's chi-squared goodness
of fit test of the observed data against the expected distribution,
or againt the uniform distribution (by default), with each category
having an expected frequency of `1 / len(observed)`.
If `observed` is matrix, conduct Pearson's independence test on the
input contingency matrix, which cannot contain negative entries or
columns or rows that sum up to 0.
If `observed` is an RDD of LabeledPoint, conduct Pearson's independence
test for every feature against the label across the input RDD.
For each feature, the (feature, label) pairs are converted into a
contingency matrix for which the chi-squared statistic is computed.
All label and feature values must be categorical.
.. note:: `observed` cannot contain negative values
:param observed: it could be a vector containing the observed categorical
counts/relative frequencies, or the contingency matrix
(containing either counts or relative frequencies),
or an RDD of LabeledPoint containing the labeled dataset
with categorical features. Real-valued features will be
treated as categorical for each distinct value.
:param expected: Vector containing the expected categorical counts/relative
frequencies. `expected` is rescaled if the `expected` sum
differs from the `observed` sum.
:return: ChiSquaredTest object containing the test statistic, degrees
of freedom, p-value, the method used, and the null hypothesis.
>>> from pyspark.mllib.linalg import Vectors, Matrices
>>> observed = Vectors.dense([4, 6, 5])
>>> pearson = Statistics.chiSqTest(observed)
>>> print(pearson.statistic)
0.4
>>> pearson.degreesOfFreedom
2
>>> print(round(pearson.pValue, 4))
0.8187
>>> pearson.method
u'pearson'
>>> pearson.nullHypothesis
u'observed follows the same distribution as expected.'
>>> observed = Vectors.dense([21, 38, 43, 80])
>>> expected = Vectors.dense([3, 5, 7, 20])
>>> pearson = Statistics.chiSqTest(observed, expected)
>>> print(round(pearson.pValue, 4))
0.0027
>>> data = [40.0, 24.0, 29.0, 56.0, 32.0, 42.0, 31.0, 10.0, 0.0, 30.0, 15.0, 12.0]
>>> chi = Statistics.chiSqTest(Matrices.dense(3, 4, data))
>>> print(round(chi.statistic, 4))
21.9958
>>> data = [LabeledPoint(0.0, Vectors.dense([0.5, 10.0])),
... LabeledPoint(0.0, Vectors.dense([1.5, 20.0])),
... LabeledPoint(1.0, Vectors.dense([1.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 40.0])),
... LabeledPoint(1.0, Vectors.dense([3.5, 40.0])),]
>>> rdd = sc.parallelize(data, 4)
>>> chi = Statistics.chiSqTest(rdd)
>>> print(chi[0].statistic)
0.75
>>> print(chi[1].statistic)
1.5
"""
if isinstance(observed, RDD):
if not isinstance(observed.first(), LabeledPoint):
raise ValueError("observed should be an RDD of LabeledPoint")
jmodels = callMLlibFunc("chiSqTest", observed)
return [ChiSqTestResult(m) for m in jmodels]
if isinstance(observed, Matrix):
jmodel = callMLlibFunc("chiSqTest", observed)
else:
if expected and len(expected) != len(observed):
raise ValueError("`expected` should have same length with `observed`")
jmodel = callMLlibFunc("chiSqTest", _convert_to_vector(observed), expected)
return ChiSqTestResult(jmodel)
@staticmethod
@ignore_unicode_prefix
def kolmogorovSmirnovTest(data, distName="norm", *params):
"""
Performs the Kolmogorov-Smirnov (KS) test for data sampled from
a continuous distribution. It tests the null hypothesis that
the data is generated from a particular distribution.
The given data is sorted and the Empirical Cumulative
Distribution Function (ECDF) is calculated
which for a given point is the number of points having a CDF
value lesser than it divided by the total number of points.
Since the data is sorted, this is a step function
that rises by (1 / length of data) for every ordered point.
The KS statistic gives us the maximum distance between the
ECDF and the CDF. Intuitively if this statistic is large, the
probabilty that the null hypothesis is true becomes small.
For specific details of the implementation, please have a look
at the Scala documentation.
:param data: RDD, samples from the data
:param distName: string, currently only "norm" is supported.
(Normal distribution) to calculate the
theoretical distribution of the data.
:param params: additional values which need to be provided for
a certain distribution.
If not provided, the default values are used.
:return: KolmogorovSmirnovTestResult object containing the test
statistic, degrees of freedom, p-value,
the method used, and the null hypothesis.
>>> kstest = Statistics.kolmogorovSmirnovTest
>>> data = sc.parallelize([-1.0, 0.0, 1.0])
>>> ksmodel = kstest(data, "norm")
>>> print(round(ksmodel.pValue, 3))
1.0
>>> print(round(ksmodel.statistic, 3))
0.175
>>> ksmodel.nullHypothesis
u'Sample follows theoretical distribution'
>>> data = sc.parallelize([2.0, 3.0, 4.0])
>>> ksmodel = kstest(data, "norm", 3.0, 1.0)
>>> print(round(ksmodel.pValue, 3))
1.0
>>> print(round(ksmodel.statistic, 3))
0.175
"""
if not isinstance(data, RDD):
raise TypeError("data should be an RDD, got %s." % type(data))
if not isinstance(distName, basestring):
raise TypeError("distName should be a string, got %s." % type(distName))
params = [float(param) for param in params]
return KolmogorovSmirnovTestResult(
callMLlibFunc("kolmogorovSmirnovTest", data, distName, params))
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.stat.statistics tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | 1,669,562,013,303,584,300 | 40.821875 | 95 | 0.583157 | false |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/enums/types/lead_form_desired_intent.py | 1 | 1209 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.enums',
marshal='google.ads.googleads.v6',
manifest={
'LeadFormDesiredIntentEnum',
},
)
class LeadFormDesiredIntentEnum(proto.Message):
r"""Describes the desired level of intent of generated leads. """
class LeadFormDesiredIntent(proto.Enum):
r"""Enum describing the desired level of intent of generated
leads.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LOW_INTENT = 2
HIGH_INTENT = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -8,953,890,120,698,834,000 | 29.225 | 74 | 0.687345 | false |
praekelt/jmbo-twitter | jmbo_twitter/models.py | 1 | 5204 | import datetime, twitter
from urllib2 import URLError
import logging
from django.db import models
from django.core.cache import cache
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from jmbo.models import ModelBase
logger = logging.getLogger('django')
class Status(ModelBase):
"""Purely a wrapper that allows us to use jmbo-foundry's listings for
tweets."""
def __init__(self, status):
# Copy attributes over
attrs = ('contributors', 'coordinates', 'created_at', \
'created_at_in_seconds', 'favorited', 'geo', 'hashtags', 'id', \
'in_reply_to_screen_name', 'in_reply_to_status_id', \
'in_reply_to_user_id', 'location', 'now', 'place', \
'relative_created_at', 'retweet_count', 'retweeted', \
'retweeted_status', 'source', 'text', 'truncated', 'urls', 'user', \
'user_mentions', 'created_at_datetime')
for attr in attrs:
setattr(self, attr, getattr(status, attr))
@property
def as_leaf_class(self):
return self
def save(self):
raise NotImplemented
class StatusMixin(object):
def get_statuses(self, api):
raise NotImplemented
def fetch(self, force=False):
klass_name = self.__class__.__name__
cache_key = 'jmbo_twitter_%s_%s' % (klass_name, self.id)
cached = cache.get(cache_key, None)
if (cached is not None) and not force:
return cached
# Get and check settings
di = getattr(settings, 'JMBO_TWITTER', {})
ck = di.get('consumer_key')
cs = di.get('consumer_secret')
atk = di.get('access_token_key')
ats = di.get('access_token_secret')
if not all([ck, cs, atk, ats]):
logger.error(
'jmbo_twitter.models.%s.fetch - incomplete settings' \
% klass_name
)
return []
# Query twitter taking care to handle network errors
api = twitter.Api(
consumer_key=ck, consumer_secret=cs, access_token_key=atk,
access_token_secret=ats, requests_timeout=10
)
try:
statuses = self.get_statuses(api)
except (URLError, ValueError, twitter.TwitterError):
statuses = []
except Exception, e:
# All manner of things can go wrong with integration
logger.error(
'jmbo_twitter.models.%s.fetch - %s' % (klass_name, e.message)
)
statuses = []
for status in statuses:
status.created_at_datetime = datetime.datetime.fromtimestamp(
status.created_at_in_seconds
)
if statuses:
# Only set if there are statuses. Twitter may randomly throttle us
# and destroy our cache without this check. Cache for a long time
# incase Twitter goes down.
cache.set(cache_key, statuses, 86400)
# Legacy return
return statuses
@property
def fetched(self):
klass_name = self.__class__.__name__
cache_key = 'jmbo_twitter_%s_%s' % (klass_name, self.id)
return cache.get(cache_key, [])
@property
def tweets(self):
class MyList(list):
"""Slightly emulate QuerySet API so jmbo-foundry listings work"""
@property
def exists(self):
return len(self) > 0
result = []
for status in self.fetched:
result.append(Status(status))
return MyList(result)
class Feed(ModelBase, StatusMixin):
"""A feed represents a twitter user account"""
name = models.CharField(
max_length=255,
unique=True,
help_text="A twitter account name, eg. johnsmith"
)
profile_image_url = models.CharField(
null=True, editable=False, max_length=255
)
twitter_id = models.CharField(max_length=255, default='', editable=False)
def get_statuses(self, api):
# Fall back to slug for historical reasons
statuses = api.GetUserTimeline(
screen_name=self.name or self.slug, include_rts=True
)
return statuses
def fetch(self, force=False):
statuses = super(Feed, self).fetch(force=force)
if statuses:
# This is a convenient place to set the feed image url
status = statuses[0]
changed = False
if status.user.profile_image_url != self.profile_image_url:
self.profile_image_url = status.user.profile_image_url
changed = True
if status.user.name != self.title:
self.title = status.user.name
changed = True
if changed:
self.save()
return statuses
class Search(ModelBase, StatusMixin):
"""A search represents a twitter keyword search"""
criteria = models.CharField(
max_length=255,
unique=True,
help_text="Search string or a hashtag"
)
class Meta:
verbose_name_plural = _("Searches")
def get_statuses(self, api):
return api.GetSearch(self.criteria)
| bsd-3-clause | 148,431,581,819,713,100 | 30.349398 | 80 | 0.580515 | false |
ronas/PythonGNF | Fabulao/PedidosCapa.py | 1 | 3001 | # -*- coding: latin -*-
import sys
#from PyQt5 import QtGui, QtCore, QtWidgets #, QTableWidget, QTableWidgetItem
from PyQt5.QtWidgets import QApplication, QWidget, QTableWidget, QTableWidgetItem, QLineEdit, QLabel
from PyQt5.QtCore import QSize, Qt
import pymysql
config = {
'host': 'localhost',
'port': 3306,
'database': 'LojaDB',
'user': 'root',
'password' : 'fbl1978'
}
class ClasseAPP(QWidget):
def __init__(self):
super(ClasseAPP, self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('Pedidos')
self.resize(850, 400)
self.move(300, 200)
self.tabela = QTableWidget(3,5,self)
self.tabela.setGeometry(20,20,760,300)
self.tabela.setHorizontalHeaderLabels(('Numero Pedido','Data','Codigo Cliente','Telefone','Cond Pagamento'))
self.dbBuscarPedidos()
self.lblNumeroPedido = QLabel('Numero Pedido',self)
self.lblNumeroPedido.setGeometry(20,330,130,25)
self.lblData = QLabel('Data',self)
self.lblData.setGeometry(100.360,50,25)
#self.lblCodigoCliente = QLabel('Codigo Cliente',self)
#self.lblCodigoCliente.setGeometry()
#self.lblTelefone = QLabel('Telefone',self)
#self.lblTelefone.setGeometry()
#self.lblCondPagamento = QLabel('Cond Pagamento',self)
#self.lblCondPagamento.setGeometry()
self.txtNumeroPedido = QLineEdit(self)
self.txtNumeroPedido.setGeometry(130,330,130,25)
self.txtData = QLineEdit(self)
self.txtData.setGeometry(130,360,50,25)
#self.txtCodigoCliente = QLineEdit(self)
#self.txtCOdigoCliente.setGeometry()
#self.txtTelefone = QLineEdit(self)
#self.txtTelefone.setGeometry()
#self.txtCondPagamento = QLineEdit(self)
#self.txtCondPagamento.setGeometry()
self.tabela.resizeColumnsToContents()
self.show()
def dbBuscarPedidos(self):
db = pymysql.connect(**config)
cursor = db.cursor()
comando = ('select * from LojaDB.Pedidos ')
cursor.execute(comando )
self.tabela.setRowCount(0)
registros = cursor.fetchall()
for registro in registros:
numerolinhas = self.tabela.rowCount()
self.tabela.insertRow(numerolinhas)
self.tabela.setItem(numerolinhas, 0, QTableWidgetItem( str(registro[0]) ))
self.tabela.setItem(numerolinhas, 1, QTableWidgetItem( str(registro[1]) ))
self.tabela.setItem(numerolinhas, 2, QTableWidgetItem( registro[2] ))
self.tabela.setItem(numerolinhas, 3, QTableWidgetItem( str(registro[3]) ))
self.tabela.setItem(numerolinhas, 4, QTableWidgetItem( registro[4] ))
cursor.close()
db.close()
def main():
app = QApplication(sys.argv)
MeuApp = ClasseAPP()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | gpl-3.0 | -1,464,602,394,802,016,300 | 29.632653 | 116 | 0.625125 | false |
caronc/newsreap | newsreap/Logging.py | 1 | 6395 | # -*- coding: utf-8 -*-
#
# Common Logging Parameters and Defaults
#
# Copyright (C) 2015-2017 Chris Caron <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# The first part of the file defines all of the namespacing
# used by this application
import sys
import logging
# We intentionally import this module so it preconfigures it's logging
# From there we can choose to manipulate it later without worrying about
# it's configuration over-riding ours; This creates a lint warning
# that we're importing a module we're not using; but this is intended.
# do not comment out or remove this entry
import sqlalchemy
# The default logger identifier used for general logging
NEWSREAP_LOGGER = 'newsreap'
# The default logger which displays backend engine and
# NNTP Server Interaction
NEWSREAP_ENGINE = '%s.engine' % NEWSREAP_LOGGER
# Codec Manipulation such as yEnc, uuencoded, etc
NEWSREAP_CODEC = '%s.codec' % NEWSREAP_LOGGER
# Users should utilize this for their hook logging
NEWSREAP_HOOKS = '%s.hooks' % NEWSREAP_LOGGER
# Command Line Interface Logger
NEWSREAP_CLI = '%s.cli' % NEWSREAP_LOGGER
# For a common reference point, we include the static logging
# Resource at the time for this information was:
# - http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html#dbengine-logging
#
# namespaces used by SQLAlchemy
SQLALCHEMY_LOGGER = 'sqlalchemy'
# Defines the logger for the SQLAlchemy Engine
SQLALCHEMY_ENGINE = '%s.engine' % SQLALCHEMY_LOGGER
# Controls SQLAlchemy's connection pool logging.
SQLALCHEMY_POOL = '%s.pool' % SQLALCHEMY_LOGGER
# Controls SQLAlchemy's various Object Relational Mapping (ORM) logging.
SQLALCHEMY_ORM = '%s.orm' % SQLALCHEMY_LOGGER
# The number of bytes reached before automatically rotating the log file
# if this option was specified
# 5000000 bytes == 5 Megabytes
LOG_ROTATE_FILESIZE_BYTES = 5000000
def add_handler(logger, sendto=True, backupCount=5):
"""
Add handler to idenfied logger
sendto == None then logging is disabled
sendto == True then logging is put to stdout
sendto == False then logging is put to stderr
sendto == <string> then logging is routed to the filename specified
if sendto is a <string>, then backupCount defines the number of logs
to keep around. Set this to 0 or None if you don't wish the python
logger to backupCount the files ever. By default logs are rotated
once they reach 5MB
"""
if sendto is True:
# redirect to stdout
handler = logging.StreamHandler(sys.stdout)
elif sendto is False:
# redirect to stderr
handler = logging.StreamHandler(sys.stderr)
elif sendto is None:
# redirect to null
try:
handler = logging.NullHandler()
except AttributeError:
# Python <= v2.6
class NullHandler(logging.Handler):
def emit(self, record):
pass
handler = NullHandler()
# Set data to NOTSET just to eliminate the
# extra checks done internally
if logger.level != logging.NOTSET:
logger.setLevel(logging.NOTSET)
elif isinstance(sendto, basestring):
if backupCount is None:
handler = logging.FileHandler(filename=sendto)
elif isinstance(backupCount, int):
handler = logging.RotatingFileHandler(
filename=sendto,
maxBytes=LOG_ROTATE_FILESIZE_BYTES,
backupCount=backupCount,
)
else:
# We failed to add a handler
return False
# Setup Log Format
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s %(name)s %(message)s'))
# Add Handler
logger.addHandler(handler)
return True
def init(verbose=2, sendto=True, backupCount=5):
"""
Set's up some simple default handling to make it
easier for those wrapping this library.
You do not need to call this function if you
don't wnat to; ideally one might want to set up
things their own way.
"""
# Add our handlers at the parent level
add_handler(
logging.getLogger(SQLALCHEMY_LOGGER),
sendto=True,
backupCount=backupCount,
)
add_handler(
logging.getLogger(NEWSREAP_LOGGER),
sendto=True,
backupCount=backupCount,
)
if verbose:
set_verbosity(verbose=verbose)
def set_verbosity(verbose):
"""
A simple function one can use to set the verbosity of
the app.
"""
# Default
logging.getLogger(SQLALCHEMY_LOGGER).setLevel(logging.ERROR)
logging.getLogger(SQLALCHEMY_ENGINE).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_LOGGER).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_CLI).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_CODEC).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_HOOKS).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_ENGINE).setLevel(logging.ERROR)
# Handle Verbosity
if verbose > 0:
logging.getLogger(NEWSREAP_CLI).setLevel(logging.INFO)
logging.getLogger(NEWSREAP_HOOKS).setLevel(logging.INFO)
logging.getLogger(NEWSREAP_ENGINE).setLevel(logging.INFO)
if verbose > 1:
logging.getLogger(NEWSREAP_CLI).setLevel(logging.DEBUG)
logging.getLogger(NEWSREAP_HOOKS).setLevel(logging.DEBUG)
logging.getLogger(NEWSREAP_ENGINE).setLevel(logging.DEBUG)
if verbose > 2:
logging.getLogger(SQLALCHEMY_ENGINE).setLevel(logging.INFO)
logging.getLogger(NEWSREAP_CODEC).setLevel(logging.INFO)
if verbose > 3:
logging.getLogger(NEWSREAP_CODEC).setLevel(logging.DEBUG)
if verbose > 4:
logging.getLogger(SQLALCHEMY_ENGINE).setLevel(logging.DEBUG)
# set initial level to WARN.
rootlogger = logging.getLogger(NEWSREAP_LOGGER)
if rootlogger.level == logging.NOTSET:
set_verbosity(-1)
| gpl-3.0 | 3,607,045,367,080,225,300 | 31.794872 | 76 | 0.696638 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/operations/load_balancer_load_balancing_rules_operations.py | 1 | 7995 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class LoadBalancerLoadBalancingRulesOperations(object):
"""LoadBalancerLoadBalancingRulesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-08-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-08-01"
self.config = config
def list(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancing rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LoadBalancingRule
:rtype:
~azure.mgmt.network.v2017_08_01.models.LoadBalancingRulePaged[~azure.mgmt.network.v2017_08_01.models.LoadBalancingRule]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LoadBalancingRulePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LoadBalancingRulePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, resource_group_name, load_balancer_name, load_balancing_rule_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified load balancer load balancing rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param load_balancing_rule_name: The name of the load balancing rule.
:type load_balancing_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LoadBalancingRule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_08_01.models.LoadBalancingRule or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules/{loadBalancingRuleName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'loadBalancingRuleName': self._serialize.url("load_balancing_rule_name", load_balancing_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancingRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| mit | -4,003,765,549,931,447,000 | 45.482558 | 186 | 0.643652 | false |
RedhawkSDR/integration-gnuhawk | components/fll_band_edge_cc_4o/tests/test_fll_band_edge_cc_4o.py | 1 | 4545 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of GNUHAWK.
#
# GNUHAWK is free software: you can redistribute it and/or modify is under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/.
#
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in fll_band_edge_cc_4o"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
self.assertEqual(self.spd.get_id(), self.comp.ref._get_identifier())
#######################################################################
# Simulate regular component startup
# Verify that initialize nor configure throw errors
self.comp.initialize()
configureProps = self.getPropertySet(kinds=("configure",), modes=("readwrite", "writeonly"), includeNil=False)
self.comp.configure(configureProps)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../fll_band_edge_cc_4o.spd.xml") # By default tests all implementations
| gpl-3.0 | 149,642,112,192,682,620 | 47.870968 | 133 | 0.588559 | false |
Mirantis/octane | octane/commands/sync_images.py | 1 | 2791 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from octane.helpers.sync_glance_images import sync_glance_images
from octane.util import db
from octane.util import env as env_util
from octane.util import ssh
def prepare(orig_id, seed_id):
orig_env = environment_obj.Environment(orig_id)
seed_env = environment_obj.Environment(seed_id)
controller = env_util.get_one_controller(seed_env)
with tempfile.NamedTemporaryFile() as temp:
db.mysqldump_from_env(orig_env, ['keystone'], temp.name)
db.mysqldump_restore_to_env(seed_env, temp.name)
ssh.call(['keystone-manage', 'db_sync'],
node=controller, parse_levels=True)
for controller in env_util.get_controllers(seed_env):
ssh.call(['service', 'memcached', 'restart'], node=controller)
class SyncImagesCommand(cmd.Command):
"""Sync glance images between ORIG and SEED environments"""
def get_parser(self, prog_name):
parser = super(SyncImagesCommand, self).get_parser(prog_name)
parser.add_argument(
'orig_id', type=int, metavar='ORIG_ID',
help="ID of original environment")
parser.add_argument(
'seed_id', type=int, metavar='SEED_ID',
help="ID of seed environment")
parser.add_argument(
'swift_ep', type=str,
help="Endpoint's name where swift-proxy service is listening on")
return parser
def take_action(self, parsed_args):
sync_glance_images(parsed_args.orig_id, parsed_args.seed_id,
parsed_args.swift_ep)
class SyncImagesPrepareCommand(cmd.Command):
"""Sync glance images between ORIG and SEED environments"""
def get_parser(self, prog_name):
parser = super(SyncImagesPrepareCommand, self).get_parser(prog_name)
parser.add_argument(
'orig_id', type=int, metavar='ORIG_ID',
help="ID of original environment")
parser.add_argument(
'seed_id', type=int, metavar='SEED_ID',
help="ID of seed environment")
return parser
def take_action(self, parsed_args):
prepare(parsed_args.orig_id, parsed_args.seed_id)
| apache-2.0 | -4,300,452,264,042,446,300 | 36.716216 | 77 | 0.677893 | false |
Wolnosciowiec/file-repository | client/bahub/bahubapp/handler/__init__.py | 1 | 4733 | from ..entity.definition import BackupDefinition
from ..service.client import FileRepositoryClient
from ..service.pipefactory import PipeFactory
from ..exceptions import ReadWriteException
from ..result import CommandExecutionResult
from logging import Logger
import string
import random
import subprocess
from shutil import copyfileobj
class BackupHandler:
""" Manages the process of backup and restore, interacts with different sources of backup data using adapters """
_client = None # type: FileRepositoryClient
_pipe_factory = None # type: PipeFactory
_logger = None # type: Logger
_definition = None
def __init__(self,
_client: FileRepositoryClient,
_pipe_factory: PipeFactory,
_logger: Logger,
_definition: BackupDefinition):
self._client = _client
self._pipe_factory = _pipe_factory
self._logger = _logger
self._definition = _definition
def perform_backup(self):
self._validate()
self._validate_running_command()
response = self._read()
if response.return_code != 0 and response.return_code is not None:
raise ReadWriteException('Backup source read error, use --debug and retry to investigate')
upload_response = self._client.send(response.stdout, self._get_definition())
response.process.wait(15)
response.stdout.close()
return upload_response
def perform_restore(self, version: str):
response = self._write(
self._read_from_storage(version)
)
response.process.wait()
self._logger.info('Waiting for process to finish')
if response.return_code is not None and response.return_code > 0:
raise ReadWriteException('Cannot write files to disk while restoring from backup. Errors: '
+ str(response.stderr.read().decode('utf-8')))
self._logger.info('No errors found, sending success information')
return '{"status": "OK"}'
def close(self):
self._logger.info('Finishing the process')
self._close()
def _get_definition(self) -> BackupDefinition:
return self._definition
def _execute_command(self, command: str, stdin=None) -> CommandExecutionResult:
"""
Executes a command on local machine, returning stdout as a stream, and streaming in the stdin (optionally)
"""
self._logger.debug('shell(' + command + ')')
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE if stdin else None,
executable='/bin/bash',
shell=True)
if stdin:
self._logger.info('Copying stdin to process')
try:
copyfileobj(stdin, process.stdin)
except BrokenPipeError:
raise ReadWriteException(
'Cannot write to process, broken pipe occurred, probably a tar process died. '
+ str(process.stdin.read()) + str(process.stderr.read())
)
process.stdin.close()
return CommandExecutionResult(process.stdout, process.stderr, process.returncode, process)
def _validate_running_command(self):
""" Validate if the command really exports the data, does not end up with an error """
response = self._read()
response.stdout.read(1024)
response.process.kill()
response.process.wait(15)
if response.process.returncode > 0:
raise ReadWriteException(
'The process exited with incorrect code, try to verify the command in with --debug switch'
)
def _validate(self):
raise Exception('_validate() not implemented for handler')
def _read(self) -> CommandExecutionResult:
""" TAR output or file stream buffered from ANY source for example """
raise Exception('_read() not implemented for handler')
def _write(self, stream) -> CommandExecutionResult:
""" A file stream or tar output be written into the storage. May be OpenSSL encoded, depends on definition """
raise Exception('_write() not implemented for handler')
def _read_from_storage(self, version: str):
return self._client.fetch(version, self._get_definition())
def _close(self):
pass
@staticmethod
def generate_id(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
| lgpl-3.0 | -7,351,242,393,543,664,000 | 35.129771 | 118 | 0.616734 | false |
Saevon/saevon.github.io | plugins/jinja2/__init__.py | 1 | 1409 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from itertools import chain
from plugins.jinja2.debug_dump import dump, dump_all, dump_file, dump_url
from plugins.jinja2.category import find_category, category_preview_articles, mark
from plugins.jinja2.article import is_untold
from plugins.jinja2.nav import is_cur_page, get_main
from plugins.jinja2.summary import summary, summary_raw
from plugins.jinja2.tags import update_count, tag_remap, tag_sort, tag_ratios
from plugins.jinja2.date import date_to_xmlschema
from plugins.jinja2.string import title_case, file_title_case
from datetime import datetime
JINJA_FILTERS = {
'find_category': find_category,
'category_preview_articles': category_preview_articles,
'mark': mark,
'is_untold': is_untold,
'update_count': update_count,
'tag_remap': tag_remap,
'tag_ratios': tag_ratios,
'tag_sort': tag_sort,
'summary': summary,
'summary_raw': summary_raw,
'merge': lambda *args: list(chain(*args)),
'is_cur_page': is_cur_page,
'date_to_xmlschema': date_to_xmlschema,
# DEBUG
'dump': dump,
'dump_all': dump_all,
# String
'title_case': title_case,
'file_title_case': file_title_case,
}
JINJA_FUNCTIONS = {
'today': datetime.today,
'get_main': get_main,
'dump_file': dump_file,
'dump_url': dump_url,
}
JINJA_EXTENSIONS = []
| mit | 2,002,489,755,693,733,400 | 23.719298 | 82 | 0.686302 | false |
aerokappa/SantaClaus | handCodedOptimum_v4.py | 1 | 2216 | import numpy as np
import pandas as pd
from processInput import processInput
def handCodedOptimum_v4 ( ):
fileName = 'gifts.csv'
giftList, giftListSummary = processInput( fileName )
packedBags = []
for i in np.arange(1000):
print i
currentBag = []
if (i< 333):
itemCount = np.array([0 ,3 ,0 ,0 ,0 ,0 ,0 ,3 ,0])
elif ((i>=333) & (i<458)):
itemCount = np.array([8, 0, 0, 0, 0, 0, 0, 0, 0])
elif ((i>=458) & (i<583)):
itemCount = np.array([0, 0, 0, 0, 0, 0, 8, 0, 0])
elif ((i>=583) & (i<916)):
itemCount = np.array([0, 0, 0, 3, 0, 2, 0, 0, 0])
elif ((i>=916) & (i<924)):
itemCount = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 25])
elif ((i>=924) & (i<928)):
itemCount = np.array([ 0, 23, 0, 0, 0, 0, 0, 0, 0])
elif ((i>=928) & (i<938)):
itemCount = np.array([ 0, 0, 0, 0, 0, 19, 0, 0, 0])
elif ((i>=938) & (i<939)):
itemCount = np.array([ 0, 0, 0, 0, 0, 11, 0, 1, 0])
elif ((i>=939) & (i<940)):
itemCount = np.array([0, 9, 0, 1, 0, 0, 0, 0, 0])
else:
itemCount = np.array([0, 0, 1, 0, 0, 5, 0, 0, 0])
for i in np.arange(len(itemCount)):
if (itemCount[i] <= giftListSummary['nGiftsNotPacked'][i]):
for j in np.arange(itemCount[i]):
giftName = giftListSummary['GiftType'][i]
currGiftID = giftListSummary['nGiftsPacked'][i]
currentBag.append(giftName+'_'+str(currGiftID))
giftListSummary['nGiftsPacked'][i] += 1
giftListSummary['nGiftsNotPacked'][i] -= 1
packedBags.append(currentBag)
# Write to File 'submission.csv'
subFile = open('submission_5.csv','w')
subFile.write('Gifts\n')
for currentBag in packedBags:
subFile.write(currentBag[0])
for currentItem in currentBag[1:]:
subFile.write(' ')
subFile.write(currentItem)
subFile.write('\n')
subFile.close()
return packedBags | mit | -6,132,476,382,293,829,000 | 35.344262 | 73 | 0.476083 | false |
Jbonnett/Mutagen-flo | mutagen/ogg.py | 1 | 17770 | # Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# $Id: ogg.py 3975 2007-01-13 21:51:17Z piman $
"""Read and write Ogg bitstreams and pages.
This module reads and writes a subset of the Ogg bitstream format
version 0. It does *not* read or write Ogg Vorbis files! For that,
you should use mutagen.oggvorbis.
This implementation is based on the RFC 3533 standard found at
http://www.xiph.org/ogg/doc/rfc3533.txt.
"""
import struct
import sys
import zlib
from cStringIO import StringIO
from mutagen import FileType
from mutagen._util import cdata, insert_bytes, delete_bytes, WrappedFileobj
class error(IOError):
"""Ogg stream parsing errors."""
pass
class OggPage(object):
"""A single Ogg page (not necessarily a single encoded packet).
A page is a header of 26 bytes, followed by the length of the
data, followed by the data.
The constructor is givin a file-like object pointing to the start
of an Ogg page. After the constructor is finished it is pointing
to the start of the next page.
Attributes:
version -- stream structure version (currently always 0)
position -- absolute stream position (default -1)
serial -- logical stream serial number (default 0)
sequence -- page sequence number within logical stream (default 0)
offset -- offset this page was read from (default None)
complete -- if the last packet on this page is complete (default True)
packets -- list of raw packet data (default [])
Note that if 'complete' is false, the next page's 'continued'
property must be true (so set both when constructing pages).
If a file-like object is supplied to the constructor, the above
attributes will be filled in based on it.
"""
version = 0
__type_flags = 0
position = 0L
serial = 0
sequence = 0
offset = None
complete = True
def __init__(self, fileobj=None):
self.packets = []
if fileobj is None:
return
self.offset = fileobj.tell()
header = fileobj.read(27)
if len(header) == 0:
raise EOFError
try:
(oggs, self.version, self.__type_flags, self.position,
self.serial, self.sequence, crc, segments) = struct.unpack(
"<4sBBqIIiB", header)
except struct.error:
raise error("unable to read full header; got %r" % header)
if oggs != "OggS":
raise error("read %r, expected %r, at 0x%x" % (
oggs, "OggS", fileobj.tell() - 27))
if self.version != 0:
raise error("version %r unsupported" % self.version)
total = 0
lacings = []
lacing_bytes = fileobj.read(segments)
if len(lacing_bytes) != segments:
raise error("unable to read %r lacing bytes" % segments)
for c in map(ord, lacing_bytes):
total += c
if c < 255:
lacings.append(total)
total = 0
if total:
lacings.append(total)
self.complete = False
self.packets = map(fileobj.read, lacings)
if map(len, self.packets) != lacings:
raise error("unable to read full data")
def __eq__(self, other):
"""Two Ogg pages are the same if they write the same data."""
try:
return (self.write() == other.write())
except AttributeError:
return False
__hash__ = object.__hash__
def __repr__(self):
attrs = ['version', 'position', 'serial', 'sequence', 'offset',
'complete', 'continued', 'first', 'last']
values = ["%s=%r" % (attr, getattr(self, attr)) for attr in attrs]
return "<%s %s, %d bytes in %d packets>" % (
type(self).__name__, " ".join(values), sum(map(len, self.packets)),
len(self.packets))
def write(self):
"""Return a string encoding of the page header and data.
A ValueError is raised if the data is too big to fit in a
single page.
"""
data = [
struct.pack("<4sBBqIIi", "OggS", self.version, self.__type_flags,
self.position, self.serial, self.sequence, 0)
]
lacing_data = []
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
lacing_data.append("\xff" * quot + chr(rem))
lacing_data = "".join(lacing_data)
if not self.complete and lacing_data.endswith("\x00"):
lacing_data = lacing_data[:-1]
data.append(chr(len(lacing_data)))
data.append(lacing_data)
data.extend(self.packets)
data = "".join(data)
# Python's CRC is swapped relative to Ogg's needs.
crc = ~zlib.crc32(data.translate(cdata.bitswap), -1)
# Although we're using to_int_be, this actually makes the CRC
# a proper le integer, since Python's CRC is byteswapped.
crc = cdata.to_int_be(crc).translate(cdata.bitswap)
data = data[:22] + crc + data[26:]
return data
def __size(self):
size = 27 # Initial header size
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
size += quot + 1
if not self.complete and rem == 0:
# Packet contains a multiple of 255 bytes and is not
# terminated, so we don't have a \x00 at the end.
size -= 1
size += sum(map(len, self.packets))
return size
size = property(__size, doc="Total frame size.")
def __set_flag(self, bit, val):
mask = 1 << bit
if val: self.__type_flags |= mask
else: self.__type_flags &= ~mask
continued = property(
lambda self: cdata.test_bit(self.__type_flags, 0),
lambda self, v: self.__set_flag(0, v),
doc="The first packet is continued from the previous page.")
first = property(
lambda self: cdata.test_bit(self.__type_flags, 1),
lambda self, v: self.__set_flag(1, v),
doc="This is the first page of a logical bitstream.")
last = property(
lambda self: cdata.test_bit(self.__type_flags, 2),
lambda self, v: self.__set_flag(2, v),
doc="This is the last page of a logical bitstream.")
def renumber(klass, fileobj, serial, start):
"""Renumber pages belonging to a specified logical stream.
fileobj must be opened with mode r+b or w+b.
Starting at page number 'start', renumber all pages belonging
to logical stream 'serial'. Other pages will be ignored.
fileobj must point to the start of a valid Ogg page; any
occuring after it and part of the specified logical stream
will be numbered. No adjustment will be made to the data in
the pages nor the granule position; only the page number, and
so also the CRC.
If an error occurs (e.g. non-Ogg data is found), fileobj will
be left pointing to the place in the stream the error occured,
but the invalid data will be left intact (since this function
does not change the total file size).
"""
number = start
while True:
try: page = OggPage(fileobj)
except EOFError:
break
else:
if page.serial != serial:
# Wrong stream, skip this page.
continue
# Changing the number can't change the page size,
# so seeking back based on the current size is safe.
fileobj.seek(-page.size, 1)
page.sequence = number
fileobj.write(page.write())
fileobj.seek(page.offset + page.size, 0)
number += 1
renumber = classmethod(renumber)
def to_packets(klass, pages, strict=False):
"""Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
"""
serial = pages[0].serial
sequence = pages[0].sequence
packets = []
if strict:
if pages[0].continued:
raise ValueError("first packet is continued")
if not pages[-1].complete:
raise ValueError("last packet does not complete")
elif pages and pages[0].continued:
packets.append("")
for page in pages:
if serial != page.serial:
raise ValueError("invalid serial number in %r" % page)
elif sequence != page.sequence:
raise ValueError("bad sequence number in %r" % page)
else: sequence += 1
if page.continued: packets[-1] += page.packets[0]
else: packets.append(page.packets[0])
packets.extend(page.packets[1:])
return packets
to_packets = classmethod(to_packets)
def from_packets(klass, packets, sequence=0,
default_size=4096, wiggle_room=2048):
"""Construct a list of Ogg pages from a list of packet data.
The algorithm will generate pages of approximately
default_size in size (rounded down to the nearest multiple of
255). However, it will also allow pages to increase to
approximately default_size + wiggle_room if allowing the
wiggle room would finish a packet (only one packet will be
finished in this way per page; if the next packet would fit
into the wiggle room, it still starts on a new page).
This method reduces packet fragmentation when packet sizes are
slightly larger than the default page size, while still
ensuring most pages are of the average size.
Pages are numbered started at 'sequence'; other information is
uninitialized.
"""
chunk_size = (default_size // 255) * 255
pages = []
page = OggPage()
page.sequence = sequence
for packet in packets:
page.packets.append("")
while packet:
data, packet = packet[:chunk_size], packet[chunk_size:]
if page.size < default_size and len(page.packets) < 255:
page.packets[-1] += data
else:
# If we've put any packet data into this page yet,
# we need to mark it incomplete. However, we can
# also have just started this packet on an already
# full page, in which case, just start the new
# page with this packet.
if page.packets[-1]:
page.complete = False
if len(page.packets) == 1:
page.position = -1L
else:
page.packets.pop(-1)
pages.append(page)
page = OggPage()
page.continued = not pages[-1].complete
page.sequence = pages[-1].sequence + 1
page.packets.append(data)
if len(packet) < wiggle_room:
page.packets[-1] += packet
packet = ""
if page.packets:
pages.append(page)
return pages
from_packets = classmethod(from_packets)
def replace(klass, fileobj, old_pages, new_pages):
"""Replace old_pages with new_pages within fileobj.
old_pages must have come from reading fileobj originally.
new_pages are assumed to have the 'same' data as old_pages,
and so the serial and sequence numbers will be copied, as will
the flags for the first and last pages.
fileobj will be resized and pages renumbered as necessary. As
such, it must be opened r+b or w+b.
"""
# Number the new pages starting from the first old page.
first = old_pages[0].sequence
for page, seq in zip(new_pages, range(first, first + len(new_pages))):
page.sequence = seq
page.serial = old_pages[0].serial
new_pages[0].first = old_pages[0].first
new_pages[0].last = old_pages[0].last
new_pages[0].continued = old_pages[0].continued
new_pages[-1].first = old_pages[-1].first
new_pages[-1].last = old_pages[-1].last
new_pages[-1].complete = old_pages[-1].complete
if not new_pages[-1].complete and len(new_pages[-1].packets) == 1:
new_pages[-1].position = -1L
new_data = "".join(map(klass.write, new_pages))
# Make room in the file for the new data.
delta = len(new_data)
fileobj.seek(old_pages[0].offset, 0)
insert_bytes(fileobj, delta, old_pages[0].offset)
fileobj.seek(old_pages[0].offset, 0)
fileobj.write(new_data)
new_data_end = old_pages[0].offset + delta
# Go through the old pages and delete them. Since we shifted
# the data down the file, we need to adjust their offsets. We
# also need to go backwards, so we don't adjust the deltas of
# the other pages.
old_pages.reverse()
for old_page in old_pages:
adj_offset = old_page.offset + delta
delete_bytes(fileobj, old_page.size, adj_offset)
# Finally, if there's any discrepency in length, we need to
# renumber the pages for the logical stream.
if len(old_pages) != len(new_pages):
fileobj.seek(new_data_end, 0)
serial = new_pages[-1].serial
sequence = new_pages[-1].sequence + 1
klass.renumber(fileobj, serial, sequence)
replace = classmethod(replace)
def find_last(klass, fileobj, serial):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
"""
# For non-muxed streams, look at the last page.
try: fileobj.seek(-256*256, 2)
except IOError:
# The file is less than 64k in length.
fileobj.seek(0)
data = fileobj.read()
try: index = data.rindex("OggS")
except ValueError:
raise error("unable to find final Ogg header")
stringobj = StringIO(data[index:])
best_page = None
try:
page = OggPage(stringobj)
except error:
pass
else:
if page.serial == serial:
if page.last: return page
else: best_page = page
else: best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page
find_last = classmethod(find_last)
class OggFileType(FileType):
"""An generic Ogg file."""
_Info = None
_Tags = None
_Error = None
_mimes = ["application/ogg", "application/x-ogg"]
def load(self, filename):
"""Load file information from a filename."""
self.filename = filename
fileobj = WrappedFileobj(filename, "rb")
try:
try:
self.info = self._Info(fileobj)
self.tags = self._Tags(fileobj, self.info)
if self.info.length:
# The streaminfo gave us real length information,
# don't waste time scanning the Ogg.
return
last_page = OggPage.find_last(fileobj, self.info.serial)
samples = last_page.position
try:
denom = self.info.sample_rate
except AttributeError:
denom = self.info.fps
self.info.length = samples / float(denom)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def delete(self, filename=None):
"""Remove tags from a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
self.tags.clear()
fileobj = WrappedFileobj(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def save(self, filename=None):
"""Save a tag to a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
fileobj = WrappedFileobj(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
| gpl-2.0 | -2,453,146,239,836,660,000 | 34.54 | 79 | 0.570568 | false |
kervi/kervi | kervi-core/kervi/core/utility/kervi_logging.py | 1 | 2921 | #Copyright 2016 Tim Wentlau.
#Distributed under the MIT License. See LICENSE in root of project.
"""
Module that holds log functionality.
In general you dont need to include this in your kervi code as the spine class holds a reference
to a fully configured log.
"""
import logging
import logging.handlers
import os
VERBOSE = 15
logging.addLevelName(VERBOSE, "VERBOSE")
class BraceMessage(object):
def __init__(self, fmt, *args):
self.fmt = fmt
self.args = args
def __str__(self):
try:
if '{' in self.fmt:
return self.fmt.format(*self.args)
else:
return self.fmt % self.args
except:
return self.fmt
class KerviLog(object):
def __init__(self, name):
self.logger = logging.getLogger(name)
def info(self, message, *args):
self.logger.info(BraceMessage(message, *args))
def verbose(self, message, *args, **kwargs):
if args:
self.logger._log(VERBOSE, str(BraceMessage(message, *args)), None)
else:
self.logger._log(VERBOSE, message, *args)
def warn(self, message, *args):
self.logger.warning(BraceMessage(message, *args))
def warning(self, message, *args):
self.logger.warning(BraceMessage(message, *args))
def debug(self, message, *args):
if args:
self.logger.debug(BraceMessage(message, *args))
else:
self.logger.debug(message)
def error(self, message, *args):
self.logger.error(BraceMessage(message, *args))
def exception(self, message, *args):
self.logger.exception(BraceMessage(message, *args))
def fatal(self, message, *args):
self.logger.fatal(BraceMessage(message, *args))
def init_process_logging(process_name, config, log_queue=None):
logger = logging.getLogger()
if config.level == "verbose":
logger.setLevel(VERBOSE)
elif config.level == "info":
logger.setLevel(logging.INFO)
elif config.level == "warning":
logger.setLevel(logging.WARNING)
elif config.level == "debug":
logger.setLevel(logging.DEBUG)
if log_queue:
queue_handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(queue_handler)
else:
if config.resetLog:
try:
os.remove(config.file)
except:
pass
file_handler = logging.FileHandler(config.file)
file_handler.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
| mit | 1,199,704,996,666,553,600 | 28.806122 | 96 | 0.618966 | false |
arcticfoxnv/slackminion | slackminion/plugin/base.py | 1 | 4847 | from six import string_types
from builtins import object
import logging
import threading
from slackminion.slack import SlackChannel, SlackIM, SlackUser, SlackRoom
class BasePlugin(object):
def __init__(self, bot, **kwargs):
self.log = logging.getLogger(type(self).__name__)
self._bot = bot
self._dont_save = False # By default, we want to save a plugin's state during save_state()
self._state_handler = False # State storage backends should set this to true
self._timer_callbacks = {}
self.config = {}
if 'config' in kwargs:
self.config = kwargs['config']
def on_load(self):
"""
Executes when a plugin is loaded.
Override this if your plugin needs to do initialization when loading.
Do not use this to restore runtime changes to variables -- they will be overwritten later on by
PluginManager.load_state()
"""
return True
def on_unload(self):
"""
Executes when a plugin is unloaded.
Override this if your plugin needs to do cleanup when unloading.
"""
return True
def on_connect(self):
"""
Executes immediately after connecting to slack.
Will not fire on reconnects.
"""
return True
def send_message(self, channel, text, thread=None, reply_broadcast=False):
"""
Used to send a message to the specified channel.
* channel - can be a channel or user
* text - message to send
* thread - thread to reply in
* reply_broadcast - whether or not to also send the message to the channel
"""
self.log.debug('Sending message to channel {} of type {}'.format(channel, type(channel)))
if isinstance(channel, SlackIM) or isinstance(channel, SlackUser):
self._bot.send_im(channel, text)
elif isinstance(channel, SlackRoom):
self._bot.send_message(channel, text, thread, reply_broadcast)
elif isinstance(channel, string_types):
if channel[0] == '@':
self._bot.send_im(channel[1:], text)
elif channel[0] == '#':
self._bot.send_message(channel[1:], text, thread, reply_broadcast)
else:
self._bot.send_message(channel, text, thread, reply_broadcast)
else:
self._bot.send_message(channel, text, thread, reply_broadcast)
def start_timer(self, duration, func, *args):
"""
Schedules a function to be called after some period of time.
* duration - time in seconds to wait before firing
* func - function to be called
* args - arguments to pass to the function
"""
self.log.info("Scheduling call to %s in %ds: %s", func.__name__, duration, args)
if self._bot.runnable:
t = threading.Timer(duration, self._timer_callback, (func, args))
self._timer_callbacks[func] = t
self._bot.timers.append(t)
t.start()
self.log.info("Scheduled call to %s in %ds", func.__name__, duration)
else:
self.log.warning("Not scheduling call to %s in %ds because we're shutting down.", func.__name__, duration)
def stop_timer(self, func):
"""
Stops a timer if it hasn't fired yet
* func - the function passed in start_timer
"""
self.log.debug('Stopping timer {}'.format(func.__name__))
if func in self._timer_callbacks:
t = self._timer_callbacks[func]
self._bot.timers.remove(t)
t.cancel()
del self._timer_callbacks[func]
def _timer_callback(self, func, args):
self.log.debug('Executing timer function {}'.format(func.__name__))
try:
func(*args)
except Exception:
self.log.exception("Caught exception executing timer function: {}".format(func.__name__))
def get_user(self, username):
"""
Utility function to query slack for a particular user
:param username: The username of the user to lookup
:return: SlackUser object or None
"""
if hasattr(self._bot, 'user_manager'):
user = self._bot.user_manager.get_by_username(username)
if user:
return user
user = SlackUser.get_user(self._bot.sc, username)
self._bot.user_manager.set(user)
return user
return SlackUser.get_user(self._bot.sc, username)
def get_channel(self, channel):
"""
Utility function to query slack for a particular channel
:param channel: The channel name or id of the channel to lookup
:return: SlackChannel object or None
"""
return SlackChannel.get_channel(self._bot.sc, channel)
| mit | -3,488,926,449,185,535,000 | 36 | 118 | 0.595007 | false |
linxdcn/iS3 | IS3Py/is3.py | 2 | 7512 | # Copyright (C) 2015 iS3 Software Foundation
# Author: Xiaojun Li
# Contact: [email protected]
import sys
import clr
import System
# Load System.Windows.Media in PresentationCore.dll
sys.path.append('C:\\Program Files (x86)\\Reference Assemblies\\Microsoft\\Framework\\.NETFramework\\v4.5')
prcore = clr.LoadAssemblyFromFile('PresentationCore.dll')
clr.AddReference(prcore)
# Import classes in System
from System import Func,Action
from System.Windows.Media import Colors
from System.Collections.ObjectModel import ObservableCollection
from System.Threading.Tasks import Task
# Load IS3 namespaces
iS3Core = clr.LoadAssemblyFromFile('IS3.Core.dll')
clr.AddReference(iS3Core)
# Import classes in IS3
from IS3.Core import (Globals, Runtime, ErrorReport, ErrorReportTarget,
DGObject, DGObjects,
ProjectDefinition, Project,
EngineeringMap, EngineeringMapType, DrawShapeType,
IView, LayerDef, Domain, DomainType, ToolTreeItem)
from IS3.Core.Geometry import *
from IS3.Core.Graphics import *
def output(text):
print(text)
# Redirect ErrorReport to python cosole
ErrorReport.target = ErrorReportTarget.DelegateConsole
ErrorReport.consoleDelegate = output
# In Windows, UI thread vars and functions are restricted to other threads.
# So, be caution with python calls to functions in UI thread.
# Classes in the main UI thread include: mainframe, view, layer, ...
# Therefore, calling to functions in mainframe, view, layer etc. are restricted.
mainframe = Globals.mainframe # Global var: mainframe
prj = mainframe.prj # Global var: prj
dispatcher = mainframe.Dispatcher # Global var: dispatcher -> UI thread manager
graphicsEngine = Runtime.graphicEngine # Global var: graphics Engine
geometryEngine = Runtime.geometryEngine # Global var: geometry Engine
class MainframeWrapper():
"Define thread safe calls to mainframe methods"
@staticmethod
def addView(emap, canClose = True):
"A thread safe call to -> mainframe.addView(emap, canclose)"
if (Globals.isThreadUnsafe()):
func = Func[EngineeringMap, bool, Task[IView]](mainframe.addView)
view = dispatcher.Invoke(func, emap, canClose)
else:
view = mainframe.addView(emap, canClose)
viewWrapper = ViewWrapper(view.Result)
return viewWrapper
@staticmethod
def loadDomainPanels():
"A thread safe call to -> mainframe.loadDomainPanels()"
if (Globals.isThreadUnsafe()):
dispatcher.Invoke(mainframe.loadDomainPanels)
else:
mainframe.loadDomainPanels()
class ViewWrapper():
"Define thread safe calls to IS3View methods"
def __init__(self, view):
self.view = view
def addLayer(self, layer):
"A thread safe call to -> IS3View.addLayer"
if (Globals.isThreadUnsafe()):
func = Action[IGraphicsLayer](self.view.addLayer)
dispatcher.Invoke(func, layer)
else:
self.view.addLayer(layer)
def addLocalTiledLayer(self, file, id):
"A thread safe call to -> IS3View.addLocalTiledLayer"
if (Globals.isThreadUnsafe()):
func = Action[str, str](self.view.addLocalTiledLayer)
dispatcher.Invoke(func, file, id)
else:
self.view.addLocalTiledLayer(file, id)
def addGdbLayer(self, layerDef, gdbFile, start = 0, maxFeatures = 0):
"A thread safe call to -> IS3View.addGdbLayer"
if (Globals.isThreadUnsafe()):
func = Func[LayerDef, str, int, int, Task[IGraphicsLayer]](self.view.addGdbLayer)
layer = dispatcher.Invoke(func, layerDef, gdbFile, start, maxFeatures)
else:
layer = self.view.addGdbLayer(layerDef, gdbFile, start, maxFeatures)
layerWrapper = GraphicsLayerWrapper(layer.Result)
return layerWrapper
def addShpLayer(self, layerDef, shpFile, start = 0, maxFeatures = 0):
"A thread safe call to -> IS3View.addShpLayer"
if (Globals.isThreadUnsafe()):
func = Func[LayerDef, str, int, int, Task[IGraphicsLayer]](self.view.addShpLayer)
layer = dispatcher.Invoke(func, layerDef, shpFile, start, maxFeatures)
else:
self.view.addShpLayer(layerDef, shpFile, start, maxFeatures)
layerWrapper = GraphicsLayerWrapper(layer.Result)
return layerWrapper
def selectByRect(self):
"A thread safe call to -> IS3View.selectByRect"
if (Globals.isThreadUnsafe()):
dispatcher.Invoke(self.view.selectByRect)
else:
self.view.selectByRect()
class GraphicsLayerWrapper():
"Define thread safe calls to IS3GraphicsLayer methods"
def __init__(self, glayer):
self.layer = glayer
def setRenderer(self, renderer):
"A thread safe call to -> IS3GraphicsLayer.setRenderer"
if (Globals.isThreadUnsafe()):
func = Action[IRenderer](self.layer.setRenderer)
dispatcher.Invoke(func, renderer)
else:
self.layer.setRenderer(renderer)
def addGraphic(self, graphic):
"A thread safe call to -> IS3GraphicsLayer.addGraphic"
if (Globals.isThreadUnsafe()):
func = Action[IGraphic](self.layer.addGraphic)
dispatcher.Invoke(func, graphic)
else:
self.layer.addGraphic(graphic)
def newGraphicsLayer(id, displayName):
layer = graphicsEngine.newGraphicsLayer(id, displayName)
layerWrapper = GraphicsLayerWrapper(layer)
return layerWrapper
def addView3d(id, file):
map3d = EngineeringMap()
map3d.MapID = id
map3d.MapType = EngineeringMapType.Map3D
map3d.LocalMapFileName = file
view3d = MainframeWrapper.addView(map3d, True)
return view3d
def addGdbLayer(viewWrapper, layerDef, gdbFile = None, start = 0, maxFeatures = 0):
prj = Globals.project
layerWrapper = viewWrapper.addGdbLayer(layerDef, gdbFile, start, maxFeatures)
if (layerWrapper.layer == None):
print('addGdbFileELayer failed: ' + layerDef.Name)
return None
else:
print('addGdbFileELayer succeeded: ' + layerDef.Name)
objs = prj.findObjects(layerDef.Name)
if (objs == None):
print('Layer ' + layerDef.Name + ' has no corresponding objects in the project.')
else:
count = layerWrapper.layer.syncObjects(objs)
print('Sync with ' + str(count) + ' objects for layer ' + layerDef.Name)
return layerWrapper
def addGdbLayerLazy(view, name, type, gdbFile = None, start = 0, maxFeatures = 0):
layerDef = LayerDef()
layerDef.Name = name
layerDef.GeometryType = type
layerWrapper = addGdbLayer(view, layerDef, gdbFile, start, maxFeatures)
return layerWrapper
def addShpLayer(viewWrapper, layerDef, shpfile, start = 0, maxFeatures = 0):
prj = Globals.project
layerWrapper = viewWrapper.addShpLayer(layerDef, shpfile, start, maxFeatures)
if (layerWrapper.layer == None):
print('addShpFileELayer failed: ' + layerDef.Name)
return None
else:
print('addShpFileELayer succeeded: ' + layerDef.Name)
objs = prj.findObjects(layerDef.Name)
if (objs == None):
print('Layer ' + layerDef.Name + ' has no corresponding objects in the project.')
else:
count = layerWrapper.layer.syncObjects(objs)
print('Sync with ' + str(count) + ' objects for layer ' + layerDef.Name)
return layerWrapper
| lgpl-3.0 | -7,984,074,763,647,838,000 | 37.523077 | 107 | 0.676384 | false |
abendig/django-mailchimp | mailchimp/models.py | 1 | 9678 | from django.db import models
import json as simplejson
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from mailchimp.utils import get_connection
class QueueManager(models.Manager):
def queue(self, campaign_type, contents, list_id, template_id, subject,
from_email, from_name, to_email, folder_id=None, tracking_opens=True,
tracking_html_clicks=True, tracking_text_clicks=False, title=None,
authenticate=False, google_analytics=None, auto_footer=False,
auto_tweet=False, segment_options=False, segment_options_all=True,
segment_options_conditions=[], type_opts={}, obj=None, extra_info=[]):
"""
Queue a campaign
"""
kwargs = locals().copy()
kwargs['segment_options_conditions'] = simplejson.dumps(segment_options_conditions)
kwargs['type_opts'] = simplejson.dumps(type_opts)
kwargs['contents'] = simplejson.dumps(contents)
kwargs['extra_info'] = simplejson.dumps(extra_info)
for thing in ('template_id', 'list_id'):
thingy = kwargs[thing]
if hasattr(thingy, 'id'):
kwargs[thing] = thingy.id
del kwargs['self']
del kwargs['obj']
if obj:
kwargs['object_id'] = obj.pk
kwargs['content_type'] = ContentType.objects.get_for_model(obj)
return self.create(**kwargs)
def dequeue(self, limit=None):
if limit:
qs = self.filter(locked=False)[:limit]
else:
qs = self.filter(locked=False)
for obj in qs:
yield obj.send()
def get_or_404(self, *args, **kwargs):
return get_object_or_404(self.model, *args, **kwargs)
class Queue(models.Model):
"""
A FIFO queue for async sending of campaigns
"""
campaign_type = models.CharField(max_length=50)
contents = models.TextField()
list_id = models.CharField(max_length=50)
template_id = models.PositiveIntegerField()
subject = models.CharField(max_length=255)
from_email = models.EmailField()
from_name = models.CharField(max_length=255)
to_email = models.EmailField()
folder_id = models.CharField(max_length=50, null=True, blank=True)
tracking_opens = models.BooleanField(default=True)
tracking_html_clicks = models.BooleanField(default=True)
tracking_text_clicks = models.BooleanField(default=False)
title = models.CharField(max_length=255, null=True, blank=True)
authenticate = models.BooleanField(default=False)
google_analytics = models.CharField(max_length=100, blank=True, null=True)
auto_footer = models.BooleanField(default=False)
generate_text = models.BooleanField(default=False)
auto_tweet = models.BooleanField(default=False)
segment_options = models.BooleanField(default=False)
segment_options_all = models.BooleanField(default=False)
segment_options_conditions = models.TextField()
type_opts = models.TextField()
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = generic.GenericForeignKey('content_type', 'object_id')
extra_info = models.TextField(null=True)
locked = models.BooleanField(default=False)
objects = QueueManager()
def send(self):
"""
send (schedule) this queued object
"""
# check lock
if self.locked:
return False
# aquire lock
self.locked = True
self.save()
# get connection and send the mails
c = get_connection()
tpl = c.get_template_by_id(self.template_id)
content_data = dict([(str(k), v) for k,v in simplejson.loads(self.contents).items()])
built_template = tpl.build(**content_data)
tracking = {'opens': self.tracking_opens,
'html_clicks': self.tracking_html_clicks,
'text_clicks': self.tracking_text_clicks}
if self.google_analytics:
analytics = {'google': self.google_analytics}
else:
analytics = {}
segment_opts = {'match': 'all' if self.segment_options_all else 'any',
'conditions': simplejson.loads(self.segment_options_conditions)}
type_opts = simplejson.loads(self.type_opts)
title = self.title or self.subject
camp = c.create_campaign(self.campaign_type, c.get_list_by_id(self.list_id),
built_template, self.subject, self.from_email, self.from_name,
self.to_email, self.folder_id, tracking, title, self.authenticate,
analytics, self.auto_footer, self.generate_text, self.auto_tweet,
segment_opts, type_opts)
if camp.send_now_async():
self.delete()
kwargs = {}
if self.content_type and self.object_id:
kwargs['content_type'] = self.content_type
kwargs['object_id'] = self.object_id
if self.extra_info:
kwargs['extra_info'] = simplejson.loads(self.extra_info)
return Campaign.objects.create(camp.id, segment_opts, **kwargs)
# release lock if failed
self.locked = False
self.save()
return False
def get_dequeue_url(self):
return reverse('mailchimp_dequeue', kwargs={'id': self.id})
def get_cancel_url(self):
return reverse('mailchimp_cancel', kwargs={'id': self.id})
def get_list(self):
return get_connection().lists[self.list_id]
@property
def object(self):
"""
The object might have vanished until now, so triple check that it's there!
"""
if self.object_id:
model = self.content_type.model_class()
try:
return model.objects.get(id=self.object_id)
except model.DoesNotExist:
return None
return None
def get_object_admin_url(self):
if not self.object:
return ''
name = 'admin:%s_%s_change' % (self.object._meta.app_label,
self.object._meta.module_name)
return reverse(name, args=(self.object.pk,))
def can_dequeue(self, user):
if user.is_superuser:
return True
if not user.is_staff:
return False
if callable(getattr(self.object, 'mailchimp_can_dequeue', None)):
return self.object.mailchimp_can_dequeue(user)
return user.has_perm('mailchimp.can_send') and user.has_perm('mailchimp.can_dequeue')
class CampaignManager(models.Manager):
def create(self, campaign_id, segment_opts, content_type=None, object_id=None,
extra_info=[]):
con = get_connection()
camp = con.get_campaign_by_id(campaign_id)
extra_info = simplejson.dumps(extra_info)
obj = self.model(content=camp.content, campaign_id=campaign_id,
name=camp.title, content_type=content_type, object_id=object_id,
extra_info=extra_info)
obj.save()
segment_opts = dict([(str(k), v) for k,v in segment_opts.items()])
for email in camp.list.filter_members(segment_opts):
Reciever.objects.create(campaign=obj, email=email)
return obj
def get_or_404(self, *args, **kwargs):
return get_object_or_404(self.model, *args, **kwargs)
class DeletedCampaign(object):
subject = u'<deleted from mailchimp>'
class Campaign(models.Model):
sent_date = models.DateTimeField(auto_now_add=True)
campaign_id = models.CharField(max_length=50)
content = models.TextField()
name = models.CharField(max_length=255)
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = generic.GenericForeignKey('content_type', 'object_id')
extra_info = models.TextField(null=True)
objects = CampaignManager()
class Meta:
ordering = ['-sent_date']
permissions = [('can_view', 'Can view Mailchimp information'),
('can_send', 'Can send Mailchimp newsletters')]
verbose_name = _('Mailchimp Log')
verbose_name_plural = _('Mailchimp Logs')
def get_absolute_url(self):
return reverse('mailchimp_campaign_info', kwargs={'campaign_id': self.campaign_id})
def get_object_admin_url(self):
if not self.object:
return ''
name = 'admin:%s_%s_change' % (self.object._meta.app_label,
self.object._meta.module_name)
return reverse(name, args=(self.object.pk,))
def get_extra_info(self):
if self.extra_info:
return simplejson.loads(self.extra_info)
return []
@property
def object(self):
"""
The object might have vanished until now, so triple check that it's there!
"""
if self.object_id:
model = self.content_type.model_class()
try:
return model.objects.get(id=self.object_id)
except model.DoesNotExist:
return None
return None
@property
def mc(self):
try:
if not hasattr(self, '_mc'):
self._mc = get_connection().get_campaign_by_id(self.campaign_id)
return self._mc
except:
return DeletedCampaign()
class Reciever(models.Model):
campaign = models.ForeignKey(Campaign, related_name='recievers')
email = models.EmailField()
| bsd-3-clause | -397,447,282,722,257,540 | 37.712 | 93 | 0.626886 | false |
bashu/fluentcms-filer | fluentcms_filer/file/south_migrations/0001_initial.py | 1 | 10131 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FilerFileItem'
db.create_table(u'contentitem_file_filerfileitem', (
(u'contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)),
('file', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.File'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('target', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),
))
db.send_create_signal(u'file', ['FilerFileItem'])
def backwards(self, orm):
# Deleting model 'FilerFileItem'
db.delete_table(u'contentitem_file_filerfileitem')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'file.filerfileitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'FilerFileItem', 'db_table': "u'contentitem_file_filerfileitem'", '_ormbases': ['fluent_contents.ContentItem']},
u'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['filer.File']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set+'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'fluent_contents.contentitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'ContentItem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '15', 'db_index': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentitems'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['fluent_contents.Placeholder']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_fluent_contents.contentitem_set+'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
'fluent_contents.placeholder': {
'Meta': {'unique_together': "(('parent_type', 'parent_id', 'slot'),)", 'object_name': 'Placeholder'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'m'", 'max_length': '1'}),
'slot': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['file'] | apache-2.0 | -6,681,037,321,661,970,000 | 82.04918 | 210 | 0.566183 | false |
WilliamDiakite/ExperimentationsACA | processing/lsa.py | 1 | 3364 |
import os
import sys
import itertools
import operator
import nltk
import numpy as np
import matplotlib.pyplot as plt
from nltk.util import ngrams
from collections import Counter
from spell_checker import SpellChecker
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
sys.path.insert(0, '/Users/diakite_w/Documents/Dev/ExperimentationsACA/FrenchLefffLemmatizer')
from FrenchLefffLemmatizer import FrenchLefffLemmatizer
def extract_ngrams(documents, n):
'''
Return list of n-grams
'''
chained_documents = list(itertools.chain.from_iterable(documents))
return Counter(ngrams(chained_documents, n))
def tokenize(text):
fll = FrenchLefffLemmatizer()
splck = SpellChecker()
contracted_pronouns = ["l'", "m'", "n'", "d'", "c'", "j'", "qu'", "s'"]
dictionnary = []
stopwords = [w.rstrip() for w in open('stopwords-fr.txt')]
# Put everything to lower case
text = text.lower()
# Tokenize text
tokens = nltk.tokenize.word_tokenize(text)
print('Nombre de tokens dans le texte :', len(tokens))
#tokens = [splck.correct(t) if t not in dictionnary else t for t in tokens]
# Remove contacted pronous from tokens
tokens = [t[2:] if t[:2] in contracted_pronouns else t for t in tokens]
tokens = [t for t in tokens if len(t) > 2]
tokens = [t for t in tokens if t not in stopwords]
tokens = [fll.lemmatize(t) for t in tokens]
print('Nombre de tokens apres traitement :', len(tokens), '\n')
return tokens
def tokens_to_vec(tokens):
vec = np.zeros(len(word_index_map))
for token in tokens:
idx = word_index_map[token]
vec[idx] = 1
return vec
def read_txt(textfile):
with open(textfile, 'r') as f:
text = f.read()
text = text.replace('\n', ' ')
text = text.replace('- ', '')
text = text.replace('.', '')
text = text.replace('-', '')
text = text.replace("‘l'", 'ï')
return text
def get_all_doc(directory):
'''
Read all txt documents and append them in string
'''
documents = []
counter = 1
for filename in os.listdir(directory):
if filename.endswith('.txt'):
print('\n[...] Reading document', counter)
filename = 'data/' + filename
documents.append(read_txt(filename))
counter += 1
return documents
documents = get_all_doc('data/')
all_tokens = [tokenize(doc) for doc in documents]
vocabulary = list(set(itertools.chain.from_iterable(all_tokens)))
print ('\nVocab size:', len(vocabulary))
# Computing n-grams
bigrams = extract_ngrams(all_tokens, 2)
trigrams = extract_ngrams(all_tokens, 3)
[print(t) for t in trigrams.most_common(5)]
print('\n')
[print(t) for t in bigrams.most_common(10)]
'''
# Key: word - value: index
word_index_map = {j: i for i, j in enumerate(vocabulary)}
# Key: index - value: word
index_word_map = sorted(word_index_map.items(), key=operator.itemgetter(1))
index_word_map = [t[0] for t in index_word_map]
N = len(documents)
D = len(word_index_map)
X = np.zeros((D,N))
i = 0
for tokens in all_tokens:
X[:,i] = tokens_to_vec(tokens)
i += 1
print(X.shape)
svd = TruncatedSVD()
Z = svd.fit_transform(X)
print('Z shape', Z.shape)
plt.scatter(Z[:,0], Z[:,1])
print('D:', D)
for i in range(D):
plt.annotate(s=index_word_map[i], xy=(Z[i,0], Z[i,1]))
plt.show()
'''
| mit | -8,408,729,182,994,486,000 | 20.96732 | 94 | 0.664683 | false |
babble/babble | include/jython/Lib/asyncore.py | 1 | 17033 | # -*- Mode: Python -*-
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <[email protected]>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import exceptions
import select
import socket
import sys
import time
import os
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
ENOTCONN, ESHUTDOWN, EINTR, EISCONN
try:
socket_map
except NameError:
socket_map = {}
class ExitNow(exceptions.Exception):
pass
def read(obj):
try:
obj.handle_read_event()
except ExitNow:
raise
except:
obj.handle_error()
def write(obj):
try:
obj.handle_write_event()
except ExitNow:
raise
except:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & select.POLLIN:
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
except ExitNow:
raise
except:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = socket_map
if map:
r = []; w = []; e = []
for fd, obj in map.items():
if obj.readable():
r.append(fd)
if obj.writable():
w.append(fd)
if [] == r == w == e:
time.sleep(timeout)
else:
try:
r, w, e = select.select(r, w, e, timeout)
except select.error, err:
if err[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
def poll2(timeout=0.0, map=None):
import poll
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
if map:
l = []
for fd, obj in map.items():
flags = 0
if obj.readable():
flags = poll.POLLIN
if obj.writable():
flags = flags | poll.POLLOUT
if flags:
l.append((fd, flags))
r = poll.poll(l, timeout)
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
def poll3(timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
pollster = select.poll()
if map:
for fd, obj in map.items():
flags = 0
if obj.readable():
flags = select.POLLIN
if obj.writable():
flags = flags | select.POLLOUT
if flags:
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error, err:
if err[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
def loop(timeout=30.0, use_poll=0, map=None):
if map is None:
map = socket_map
if use_poll:
if hasattr(select, 'poll'):
poll_fun = poll3
else:
poll_fun = poll2
else:
poll_fun = poll
while map:
poll_fun(timeout, map)
class dispatcher:
debug = 0
connected = 0
accepting = 0
closing = 0
addr = None
def __init__(self, sock=None, map=None):
if sock:
self.set_socket(sock, map)
# I think it should inherit this anyway
self.socket.setblocking(0)
self.connected = 1
# XXX Does the constructor require that the socket passed
# be connected?
try:
self.addr = sock.getpeername()
except socket.error:
# The addr isn't crucial
pass
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__+"."+self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
# On some systems (RH10) id() can be a negative number.
# work around this.
MAX = 2L*sys.maxint+1
return '<%s at %#x>' % (' '.join(status), id(self)&MAX)
def add_channel(self, map=None):
#self.log_info('adding channel %s' % self)
if map is None:
if hasattr(self, '_map'):
map = self._map
del self._map
else:
map = socket_map
if not hasattr(self, '_fileno'):
self._fileno = self.socket.fileno()
map[self._fileno] = self
def del_channel(self, map=None):
fd = self._fileno
if map is None:
map = socket_map
if map.has_key(fd):
#self.log_info('closing channel %d:%s' % (fd, self))
del map[fd]
def create_socket(self, family, type):
self.family_and_type = family, type
self.socket = socket.socket(family, type)
self.socket.setblocking(0)
def set_socket(self, sock, map=None):
self.socket = sock
## self.__dict__['socket'] = sock
if sock.fileno():
self.add_channel(map)
else:
self._map = map
def set_reuse_addr(self):
# try to re-use a server port if possible
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
# ==================================================
# predicates for select()
# these are used as filters for the lists of sockets
# to pass to select().
# ==================================================
def readable(self):
return True
if os.name == 'mac':
# The macintosh will select a listening socket for
# write if you let it. What might this mean?
def writable(self):
return not self.accepting
else:
def writable(self):
return True
# ==================================================
# socket object methods.
# ==================================================
def listen(self, num):
self.accepting = 1
if os.name == 'nt' and num > 5:
num = 1
ret = self.socket.listen(num)
self.add_channel()
return ret
def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
def connect(self, address):
self.connected = 0
err = self.socket.connect_ex(address)
# XXX Should interpret Winsock return values
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
return
if err in (0, EISCONN):
self.add_channel()
self.addr = address
self.connected = 1
self.handle_connect()
else:
raise socket.error, err
def accept(self):
# XXX can return either an address pair or None
try:
conn, addr = self.socket.accept()
self.add_channel()
return conn, addr
except socket.error, why:
if why[0] == EWOULDBLOCK:
pass
else:
raise socket.error, why
def send(self, data):
try:
result = self.socket.send(data)
return result
except socket.error, why:
if why[0] == EWOULDBLOCK:
return 0
else:
raise socket.error, why
return 0
def recv(self, buffer_size):
try:
data = self.socket.recv(buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return ''
else:
return data
except socket.error, why:
# winsock sometimes throws ENOTCONN
if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
self.handle_close()
return ''
else:
raise socket.error, why
def close(self):
self.del_channel()
self.socket.close()
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
def __getattr__(self, attr):
return getattr(self.socket, attr)
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
def log(self, message):
sys.stderr.write('log: %s\n' % str(message))
def log_info(self, message, type='info'):
if __debug__ or type != 'info':
print '%s: %s' % (type, message)
def handle_read_event(self):
if self.accepting:
# for an accepting socket, getting a read implies
# that we are connected
if not self.connected:
self.connected = 1
self.handle_accept()
elif not self.connected:
self.handle_connect()
self.connected = 1
self.handle_read()
else:
self.handle_read()
def handle_write_event(self):
# getting a write implies that we are connected
if not self.connected:
self.handle_connect()
self.connected = 1
self.handle_write()
def handle_expt_event(self):
self.handle_expt()
def handle_error(self):
nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
self.close()
def handle_expt(self):
self.log_info('unhandled exception', 'warning')
def handle_read(self):
self.log_info('unhandled read event', 'warning')
def handle_write(self):
self.log_info('unhandled write event', 'warning')
def handle_connect(self):
self.log_info('unhandled connect event', 'warning')
def handle_accept(self):
self.log_info('unhandled accept event', 'warning')
def handle_close(self):
self.log_info('unhandled close event', 'warning')
self.close()
# ---------------------------------------------------------------------------
# adds simple buffered output capability, useful for simple clients.
# [for more sophisticated usage use asynchat.async_chat]
# ---------------------------------------------------------------------------
class dispatcher_with_send(dispatcher):
def __init__(self, sock=None):
dispatcher.__init__(self, sock)
self.out_buffer = ''
def initiate_send(self):
num_sent = 0
num_sent = dispatcher.send(self, self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
if self.debug:
self.log_info('sending %s' % repr(data))
self.out_buffer = self.out_buffer + data
self.initiate_send()
# ---------------------------------------------------------------------------
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback():
t, v, tb = sys.exc_info()
tbinfo = []
assert tb # Must have a traceback
while tb:
tbinfo.append((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str(tb.tb_lineno)
))
tb = tb.tb_next
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
return (file, function, line), t, v, info
def close_all(map=None):
if map is None:
map = socket_map
for x in map.values():
x.socket.close()
map.clear()
# Asynchronous File I/O:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
if os.name == 'posix':
import fcntl
class file_wrapper:
# here we override just enough to make a file
# look like a socket for the purposes of asyncore.
def __init__(self, fd):
self.fd = fd
def recv(self, *args):
return os.read(self.fd, *args)
def send(self, *args):
return os.write(self.fd, *args)
read = recv
write = send
def close(self):
return os.close(self.fd)
def fileno(self):
return self.fd
class file_dispatcher(dispatcher):
def __init__(self, fd):
dispatcher.__init__(self)
self.connected = 1
# set it to non-blocking mode
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
self.set_file(fd)
def set_file(self, fd):
self._fileno = fd
self.socket = file_wrapper(fd)
self.add_channel()
| apache-2.0 | 6,117,678,477,258,131,000 | 29.361854 | 78 | 0.540422 | false |
pjh/vm-analyze | analyze/ip_to_fn.py | 1 | 21352 | # Virtual memory analysis scripts.
# Developed 2012-2014 by Peter Hornyack, [email protected]
# Copyright (c) 2012-2014 Peter Hornyack and University of Washington
# This file contains methods that implement a wrapper around the
# binutils "addr2line" utility, which can be used to look up instruction
# pointer values in executable files and shared object files to find
# the function (and sometimes the source code file + line number) that
# contains the ip.
# Note that each instance of "addr2line -e /path/to/binary..." will load
# that entire binary into memory while it runs; this is annoying for
# enormous binaries like firefox's libxul.so.
from util.pjh_utils import *
from analyze.vm_mapping_class import UNKNOWN_FN
import fcntl
import os
import shlex
import subprocess
import sys
import time
cache_addr2line_lookups = True
# With caching disabled, less memory will be consumed, but it will take
# 14 minutes to analyze the function lookups of a firefox trace. With
# caching enabled, the analysis only takes 2 minutes.
addr2line_prog = '/usr/bin/addr2line'
file_prog = '/usr/bin/file'
linux_code_startaddr = int("0x400000", 16)
# On x86_64 Linux anyway, all non-relocatable executables are loaded
# into virtual address space at this address, I believe.
# Given the filename of an executable file or a shared object file,
# determines if the file is relocatable. All shared object files should
# be relocatable, and most executable files are non-relocatable, but it
# is possible to build "position independent executables" (see the "-fpic"
# and "-pie" flags in gcc(1)).
#
# This method is intended to be used when determining function names
# from instruction pointers using addr2line: if the file is relocatable,
# then an absolute ip should have the address of the file's memory mapping
# subtracted from it before passing it to addr2line. If the file is not
# relocatable, then the absolute ip can be passed directly to addr2line.
# Note that this method must create a child subprocess to check the file,
# so try not to call it too often.
#
# Returns: True/False if object file is relocatable or not, or None if an
# error occurred.
def is_objfile_relocatable(name):
tag = 'is_objfile_relocatable'
global file_prog
# Command line that I think makes sense:
# file -e apptype -e ascii -e encoding -e tokens -e cdf -e elf -e tar
# -bn <filename>
# This should return one of the following strings, indicating that the
# file is relocatable or not:
# ELF 64-bit LSB shared object, x86-64, version 1 (SYSV)
# ELF 64-bit LSB executable, x86-64, version 1 (SYSV)
# (even position-independent executables will be described as "shared
# object").
filecmd = ("{} -e apptype -e ascii -e encoding -e tokens -e cdf "
"-e elf -e tar -bn {}").format(file_prog, name)
# don't use -p flag, so that output will *always* have two lines
fileargs = shlex.split(filecmd)
print_debug(tag, ("fileargs: {}").format(fileargs))
p = subprocess.Popen(fileargs, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if not p:
print_error(tag, "Popen failed for command {}".format(filecmd))
return None
# communicate() will wait for the process to terminate and will
# read its output. A "timeout" arg was added for Python 3.3, but
# verbena is only running 3.2.3 right now, so hope that the process
# will always terminate.
(out, err) = p.communicate()
#retcode = p.poll() # unnecessary, I think
#retcode = p.wait() # unnecessary, I think
retcode = p.returncode
if retcode is None:
print_error(tag, ("unexpected: got a None retcode - subprocess "
"has not terminated yet?!").format())
return None
elif retcode != 0:
print_error(tag, ("file command returned a non-zero error code: "
"{}").format(retcode))
return None
if out:
# Convert from bytes back to string:
out = out.decode('utf-8').strip()
else:
print_error(tag, "got no output from file subprocess")
return None
if err:
err = err.decode('utf-8').strip()
else:
err = ''
print_debug(tag, ("call to file subprocess succeeded, got stdout "
"{} and stderr {}").format(out, err))
# It's probably not necessary to define the expected output strings
# so strictly here, but this will cause an error if we ever e.g.
# move to a different architecture, at which point we can double-
# check this code to make sure it makes sense for non-x86-64.
# Ah, I already found one thing that's not consistent: some files
# are "version 1 (SYSV)", others are "version 1 (GNU/Linux)".
reloc_str = 'ELF 64-bit LSB shared object, x86-64, version 1'
nonreloc_str = 'ELF 64-bit LSB executable, x86-64, version 1'
if reloc_str in out:
print_debug(tag, ("relocatable: {}").format(reloc_str))
return True
elif nonreloc_str in out:
print_debug(tag, ("nonrelocatable: {}").format(nonreloc_str))
return False
print_error(tag, ("unexpected output \"{}\", doesn't match "
"expected output from file command").format(out))
print_error(tag, ("output: {}").format(repr(out)))
print_error(tag, ("reloc_str: {}").format(repr(reloc_str)))
print_error(tag, ("nonreloc_str: {}").format(repr(nonreloc_str)))
return None
##############################################################################
# Creates an addr2line instance (subprocess) for a particular code module
# (executable file or shared object file).
# This class probably shouldn't be used directly; use the ip_to_fn_converter
# class below instead.
class addr2line_module:
tag = 'addr2line_module'
# Members:
objname = None
relocatable = None
a2l = None # Popen class instance representing an addr2line subprocess
cache = None
def __init__(self, objname):
tag = "{}.__init__".format(self.tag)
if not objname:
print_error_exit(tag, "must provide an object name")
self.objname = objname
self.tag = "addr2line_module-{}".format(objname)
self.relocatable = is_objfile_relocatable(objname)
if self.relocatable is None:
#print_error_exit(tag, ("is_objfile_relocatable() returned "
# "error, not sure how to handle gracefully inside of "
# "this constructor so aborting.").format())
print_error(tag, ("is_objfile_relocatable() returned "
"error, not sure how to handle gracefully inside of "
"this constructor so aborting...").format())
return None
elif self.relocatable is True:
print_debug(tag, ("determined that object file {} is "
"relocatable, will subtract vma_start_addr from ips "
"passed to this addr2line_module").format(objname))
else:
print_debug(tag, ("determined that object file {} is "
"not relocatable, will use absolute ips that are passed "
"to this addr2line_module").format(objname))
ret = self.start_addr2line()
if ret != 0:
print_error_exit(tag, ("failed to start addr2line "
"subprocess").format())
self.cache = dict()
return
# Returns: the fn corresponding to this ip if it is found in the
# cache map, or None if not found.
def cache_lookup(self, ip):
tag = "{}.cache_lookup".format(self.tag)
try:
fn = self.cache[ip]
except KeyError:
return None
return fn
# Inserts the specified ip, fn pair into the addr2line "cache" for
# this module.
# "Cache" isn't quite the right term, as nothing is ever evicted;
# it's just a dictionary...
def cache_insert(self, ip, fn):
tag = "{}.cache_insert".format(self.tag)
try:
fn = self.cache[ip]
print_error_exit(tag, ("unexpected: already a cache entry "
"for ip {} -> {}").format(hex(ip), fn))
except KeyError:
self.cache[ip] = fn
print_debug(tag, ("cache insert {} -> {}").format(hex(ip), fn))
return
# Passes the specified ip to addr2line and returns the function that
# it corresponds to, if found.
# ip should be a base-10 integer!
# Returns: the function name if addr2line was able to lookup the ip
# successfully, or '' if addr2line was unsuccessful. Returns None
# on error.
def ip_to_fn(self, ip, vma_start_addr):
tag = "{}.ip_to_fn".format(self.tag)
global linux_code_startaddr
global cache_addr2line_lookups
if not self.a2l:
print_debug(tag, ("self.a2l is None, addr2line subprocess "
"is already terminated (or was never started)").format())
return None
if type(ip) != int:
print_error(tag, ("ip argument {} is not an int").format(ip))
return None
if vma_start_addr is None or type(vma_start_addr) != int:
print_error(tag, ("invalid vma_start_addr: {}").format(
vma_start_addr))
return None
# For relocatable object files, we must subtract the vma start
# addr (the address where the file was mapped into the process'
# address space) from the ip, which is assumed to be an absolute
# ip from an execution's userstacktrace. For non-relocatable
# executables, we directly use the absolute ip.
if self.relocatable:
#print_debug(tag, ("file {} is relocatable, so subtracting "
# "vma_start_addr {} from absolute ip {} to get ip for "
# "addr2line function lookup: {}").format(self.objname,
# hex(vma_start_addr), hex(ip), hex(ip - vma_start_addr)))
if vma_start_addr > ip:
print_error_exit(tag, ("unexpected: vma_start_addr {} "
"> ip {}").format(hex(vma_start_addr), hex(ip)))
ip -= vma_start_addr
else:
#print_debug(tag, ("file {} is not relocatable, so directly "
# "using absolute ip {} and ignoring vma_start_addr "
# "{}").format(self.objname, hex(ip), hex(vma_start_addr)))
if vma_start_addr != linux_code_startaddr:
print_error_exit(tag, ("file is non-relocatable, but "
"its start addr {} doesn't match expected value for "
"64-bit Linux, {} - is this expected?").format(
hex(vma_start_addr), hex(linux_code_startaddr)))
# See if we've already looked up this ip for this module.
# Important: this must come after the ip is offset for relocatable
# modules; ip must not change between now and when it is inserted
# into the cache below.
if cache_addr2line_lookups:
cache_lookup_ip = ip # for sanity checking
fn = self.cache_lookup(ip)
if fn:
print_debug(tag, ("cache hit: ip {} -> fn '{}'").format(
hex(ip), fn))
else:
print_debug(tag, ("cache miss: ip {}").format(hex(ip)))
# Communicate with addr2line process if cache lookups are disabled
# or the cache lookup just missed.
if not cache_addr2line_lookups or fn is None:
# Stupidly, it appears that Python's subprocess module can't
# be used to communicate multiple times with an interactive
# subprocess.
# http://docs.python.org/3/library/subprocess.html#subprocess.Popen.communicate
# http://stackoverflow.com/questions/3065060/communicate-multiple-times-with-a-process-without-breaking-the-pipe
# http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
# http://stackoverflow.com/questions/11457931/running-an-interactive-command-from-within-python
# It appears that the subprocess' stdin and stdout can just be
# written and read directly instead. It appears that the input
# string written to stdin must be converted to bytes first, and
# then any output read from stdout must be converted from a byte
# string back to a standard str as well.
#print_debug(tag, ("addr2line: lookup ip {} in object file "
# "{}").format(hex(ip), self.objname))
ip_input = """{}
""".format(hex(ip))
# send Enter keypress: to enter in vim insert mode, hit
# Ctrl-v first
self.a2l.stdin.write(bytearray(ip_input, 'utf-8'))
#print_debug(tag, "a2l.stdin.write returned")
# Read the output from addr2line:
# http://docs.python.org/3/tutorial/inputoutput.html#methods-of-file-objects
# If self.a2l.stdout.readline() is used to read lines of output
# here, then after reading all of the lines, the next call to
# readline() will block forever. A possible workaround is to
# always just call readline() exactly twice, since that's what
# we expect addr2line's output to be, but this seems fragile.
# Instead, can we just call read(), which will read "the entire
# contents of the file"? This will block as well, since there
# is no EOF at the end of the output. According to some stack
# overflow answer for providing non-blocking reads in Python,
# we may be able to use the fcntl module to mark file
# descriptors as non-blocking.
# http://stackoverflow.com/a/1810703/1230197
# This seems to work a little better, although now the problem
# is that after writing to stdin, the python script here will
# likely attempt to read stdout before addr2line has had a
# chance to write to it. The problem is that we want to block
# <a little bit>, but not forever...
# Fragile but working solution: keep reading until two newlines
# have been encountered, or until the process has terminated.
# As far as I can tell addr2line will always return two lines
# of output when started with the "-Cif" flags, even if
# gibberish input is provided.
# $ addr2line -e test-programs/hello-world -Cif
# 1234
# ??
# ??:0
# 0x4006d9
# _start
# ??:0
fd = self.a2l.stdout.fileno()
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
output = ""
linecount = 0
loopcount = 0
while linecount < 2:
# In practice, it looks like this loop may run one or more
# times (e.g. 41 times) without reading anything from
# self.a2l.stdout, but then when there is data available
# for reading, it is all available at once (both lines that
# we expect).
bytestr = self.a2l.stdout.read()
if bytestr and len(bytestr) > 0:
buf = bytestr.decode('utf-8')
output += buf
linecount = len(output.splitlines())
if False:
# When this code is enabled and stderr is set to
# subprocess.PIPE when self.a2l if Popen'd, it
# didn't seem to help - stderr.read() here never
# ever returns.
bytestrerr = self.a2l.stderr.read()
if bytestrerr and len(bytestrerr) > 0:
buf = bytestrerr.decode('utf-8')
output += buf
linecount = len(output.splitlines())
print_error_exit(tag, ("stderr.read(): output={}, "
"linecount={}").format(output, linecount))
print_error_exit(tag, ("BUMMER: this code was broken for "
"some reason after upgrading from Ubuntu 12.04 to 13.04 "
"(or something else broke it, but I'm not sure what); "
"perhaps due to python3 upgrade, or maybe a change to "
"addr2line. In the loop below, the stdout.read() never "
"actually returns anything, and we will just loop "
"here forever.").format())
loopcount += 1
if loopcount % 50000 == 0:
# Lookup time appears to depend on the size of the object
# file, which makes sense I guess; for a test lookup in
# my version of libc, I saw loopcount up to 10,000.
#print_debug(tag, ("loopcount is {}, checking if "
# "addr2line is still alive").format(loopcount))
self.a2l.poll()
if self.a2l.returncode:
print_error(tag, ("addr2line subprocess has "
"terminated with retcode {}, returning error "
"from this fn").format(self.a2l.returncode))
return None
else:
print_debug(tag, ("addr2line subprocess is still "
"alive, will keep looping; output buffer so far "
"is {}").format(output))
pass
lines = output.splitlines()
# Ok, now, if addr2line was able to lookup the function name, it
# should be found in the first line of output; if not, then it
# should have printed "??".
fn = lines[0].strip()
if cache_addr2line_lookups:
if ip != cache_lookup_ip:
print_error_exit(tag, ("cache_insert ip {} doesn't match "
"cache_lookup_ip {}").format(hex(ip),
hex(cache_lookup_ip)))
self.cache_insert(ip, fn)
# This needs to happen for both the cache hit case and the
# just-looked-it-up case.
if '?' in fn:
#print_debug(tag, ("got unknown fn '{}' returned from addr2line, "
# "will return empty string from this fn").format(fn))
fn = ''
else:
#print_debug(tag, ("got fn '{}' from addr2line output {}").format(
# fn, output.replace('\n', '')))
pass
return fn
# The user should try to remember to call this function explicitly
# when done using the instance of the class, but if the user forgets,
# then the destructor (__del__) should eventually perform the same
# cleanup operations (i.e. terminating the addr2line process).
def close(self):
tag = "{}.close".format(self.tag)
self.stop_addr2line()
self.objname = None
self.relocatable = None
self.cache = None
return
# "private" method:
# Starts an instance of the addr2line program for converting ips into
# function names. Returns: 0 on success, -1 on error.
def start_addr2line(self):
tag = "{}.start_addr2line".format(self.tag)
global addr2line_prog
a2lcmd = ("{} -e {} -Cif").format(addr2line_prog, self.objname)
# don't use -p flag, so that output will *always* have two lines
a2largs = shlex.split(a2lcmd)
print_debug(tag, ("a2largs: {}").format(a2largs))
self.a2l = subprocess.Popen(a2largs, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
#stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not self.a2l:
print_error(tag, "Popen failed for command {}".format(a2lcmd))
return -1
retcode = self.a2l.poll()
if retcode:
print_error(tag, ("addr2line subprocess already "
"terminated, this is unexpected").format())
retcode = self.a2l.wait()
self.a2l = None
return -1
print_debug(tag, ("started addr2line subprocess with pid "
"{}").format(self.a2l.pid))
return 0
# "private" method:
def stop_addr2line(self):
tag = "{}.stop_addr2line".format(self.tag)
if not self.a2l:
print_debug(tag, ("self.a2l is None, addr2line subprocess "
"is already terminated (or was never started)").format())
return
# http://docs.python.org/3/library/subprocess.html#subprocess.Popen.communicate
print_debug(tag, ("sending Ctrl-d to addr2line subprocess {} to "
"terminate it").format(self.a2l.pid))
stop_input = ''
# Ctrl-d: hit Ctrl-v first in vim insert mode to 'type' this
# special key
#(out, err) = self.a2l.communicate(input=stop_input)
(out, err) = self.a2l.communicate(
input=bytearray(stop_input, 'utf-8'))
# does stop_input need to be converted to bytes?? Docs appear to
# say so, but code examples don't...
if self.a2l.returncode is None:
print_error_exit(tag, ("communicate() returned, but returncode "
"is not set yet!").format())
elif self.a2l.returncode != 0:
print_warning(tag, ("terminated addr2line subprocess returned "
"error code {}").format(self.a2l.returncode))
else:
print_debug(tag, ("addr2line subprocess terminated "
"successfully").format())
self.a2l = None
return
def __del__(self):
tag = "{}.__del__".format(self.tag)
if self.a2l:
self.stop_addr2line()
return
##############################################################################
# Converts instruction pointers to function names.
# Uses one addr2line_module object per file that we perform lookups in.
class ip_to_fn_converter:
tag = 'ip_to_fn_converter'
# Members:
a2lmap = None
def __init__(self):
tag = "{}.__init__".format(self.tag)
self.a2lmap = dict()
return
# Attempts to lookup the specified instruction pointer in the specified
# file (executable file or shared object file). vma_start_addr should
# be the address (as an int) where the file was mapped into the address
# space when the ip was captured. If this address is unknown, then
# setting it to 0 will likely still work for non-relocatable executable
# files, but the lookup will likely fail (or worse, succeed incorrectly)
# for relocatable object files or position-independent executables.
# Returns: function name on success, empty string '' if the lookup
# failed, or None if there was an error.
def lookup(self, objname, ip, vma_start_addr):
tag = "{}.lookup".format(self.tag)
if (not objname or not ip or type(objname) != str or type(ip) != int
or len(objname) == 0 or vma_start_addr is None or
type(vma_start_addr) != int):
print_error(tag, ("invalid argument: objname {} must be a "
"non-empty string, ip {} must be an int, vma_start_addr "
"must be an int").format(objname, ip, vma_start_addr))
return None
# We keep one addr2line_module object per file:
try:
a2l = self.a2lmap[objname]
print_debug(tag, ("got an existing addr2line instance for "
"objname {}").format(objname))
except KeyError:
print_debug(tag, ("creating a new addr2line instance for "
"objname {}").format(objname))
a2l = addr2line_module(objname)
if not a2l:
print_error(tag, ("addr2line_module constructor "
"failed, just returning {}").format(UNKNOWN_FN))
return UNKNOWN_FN
self.a2lmap[objname] = a2l
return a2l.ip_to_fn(ip, vma_start_addr)
def close(self):
tag = "{}.close".format(self.tag)
for a2l in self.a2lmap.values():
a2l.close()
self.a2lmap = None
return
def __del__(self):
tag = "{}.__del__".format(self.tag)
if self.a2lmap:
self.close()
return
if __name__ == '__main__':
print("Cannot run stand-alone")
sys.exit(1)
| bsd-3-clause | 8,448,444,619,364,010,000 | 37.747731 | 117 | 0.678344 | false |
boegel/easybuild-easyblocks | easybuild/easyblocks/b/blacs.py | 1 | 8124 | ##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing BLACS, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import glob
import re
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.run import run_cmd
# also used by ScaLAPACK
def det_interface(log, path):
"""Determine interface through 'xintface' heuristic tool"""
(out, _) = run_cmd(os.path.join(path, "xintface"), log_all=True, simple=False)
intregexp = re.compile(r".*INTFACE\s*=\s*-D(\S+)\s*")
res = intregexp.search(out)
if res:
return res.group(1)
else:
raise EasyBuildError("Failed to determine interface, output for xintface: %s", out)
class EB_BLACS(ConfigureMake):
"""
Support for building/installing BLACS
- configure: symlink BMAKES/Bmake.MPI-LINUX to Bmake.inc
- make install: copy files
"""
def configure_step(self):
"""Configure BLACS build by copying Bmake.inc file."""
src = os.path.join(self.cfg['start_dir'], 'BMAKES', 'Bmake.MPI-LINUX')
dest = os.path.join(self.cfg['start_dir'], 'Bmake.inc')
if not os.path.isfile(src):
raise EasyBuildError("Can't find source file %s", src)
if os.path.exists(dest):
raise EasyBuildError("Destination file %s exists", dest)
try:
shutil.copy(src, dest)
except OSError as err:
raise EasyBuildError("Copying %s to %s failed: %s", src, dest, err)
def build_step(self):
"""Build BLACS using build_step, after figuring out the make options based on the heuristic tools available."""
opts = {
'mpicc': "%s %s" % (os.getenv('MPICC'), os.getenv('CFLAGS')),
'mpif77': "%s %s" % (os.getenv('MPIF77'), os.getenv('FFLAGS')),
'f77': os.getenv('F77'),
'cc': os.getenv('CC'),
'builddir': os.getcwd(),
'mpidir': os.path.dirname(os.getenv('MPI_LIB_DIR')),
}
# determine interface and transcomm settings
comm = ''
interface = 'UNKNOWN'
try:
cwd = os.getcwd()
os.chdir('INSTALL')
# need to build
cmd = "make"
cmd += " CC='%(mpicc)s' F77='%(mpif77)s' MPIdir=%(mpidir)s" \
" MPILIB='' BTOPdir=%(builddir)s INTERFACE=NONE" % opts
# determine interface using xintface
run_cmd("%s xintface" % cmd, log_all=True, simple=True)
interface = det_interface(self.log, "./EXE")
# try and determine transcomm using xtc_CsameF77 and xtc_UseMpich
if not comm:
run_cmd("%s xtc_CsameF77" % cmd, log_all=True, simple=True)
(out, _) = run_cmd(self.toolchain.mpi_cmd_for("./EXE/xtc_CsameF77", 2), log_all=True, simple=False)
# get rid of first two lines, that inform about how to use this tool
out = '\n'.join(out.split('\n')[2:])
notregexp = re.compile("_NOT_")
if not notregexp.search(out):
# if it doesn't say '_NOT_', set it
comm = "TRANSCOMM='-DCSameF77'"
else:
(_, ec) = run_cmd("%s xtc_UseMpich" % cmd, log_all=False, log_ok=False, simple=False)
if ec == 0:
(out, _) = run_cmd(self.toolchain.mpi_cmd_for("./EXE/xtc_UseMpich", 2),
log_all=True, simple=False)
if not notregexp.search(out):
commregexp = re.compile(r'Set TRANSCOMM\s*=\s*(.*)$')
res = commregexp.search(out)
if res:
# found how to set TRANSCOMM, so set it
comm = "TRANSCOMM='%s'" % res.group(1)
else:
# no match, set empty TRANSCOMM
comm = "TRANSCOMM=''"
else:
# if it fails to compile, set empty TRANSCOMM
comm = "TRANSCOMM=''"
os.chdir(cwd)
except OSError as err:
raise EasyBuildError("Failed to determine interface and transcomm settings: %s", err)
opts.update({
'comm': comm,
'int': interface,
})
add_makeopts = ' MPICC="%(mpicc)s" MPIF77="%(mpif77)s" %(comm)s ' % opts
add_makeopts += ' INTERFACE=%(int)s MPIdir=%(mpidir)s BTOPdir=%(builddir)s mpi ' % opts
self.cfg.update('buildopts', add_makeopts)
super(EB_BLACS, self).build_step()
def install_step(self):
"""Install by copying files to install dir."""
# include files and libraries
for (srcdir, destdir, ext) in [
(os.path.join("SRC", "MPI"), "include", ".h"), # include files
("LIB", "lib", ".a"), # libraries
]:
src = os.path.join(self.cfg['start_dir'], srcdir)
dest = os.path.join(self.installdir, destdir)
try:
os.makedirs(dest)
os.chdir(src)
for lib in glob.glob('*%s' % ext):
# copy file
shutil.copy2(os.path.join(src, lib), dest)
self.log.debug("Copied %s to %s" % (lib, dest))
if destdir == 'lib':
# create symlink with more standard name for libraries
symlink_name = "lib%s.a" % lib.split('_')[0]
os.symlink(os.path.join(dest, lib), os.path.join(dest, symlink_name))
self.log.debug("Symlinked %s/%s to %s" % (dest, lib, symlink_name))
except OSError as err:
raise EasyBuildError("Copying %s/*.%s to installation dir %s failed: %s", src, ext, dest, err)
# utilities
src = os.path.join(self.cfg['start_dir'], 'INSTALL', 'EXE', 'xintface')
dest = os.path.join(self.installdir, 'bin')
try:
os.makedirs(dest)
shutil.copy2(src, dest)
self.log.debug("Copied %s to %s" % (src, dest))
except OSError as err:
raise EasyBuildError("Copying %s to installation dir %s failed: %s", src, dest, err)
def sanity_check_step(self):
"""Custom sanity check for BLACS."""
custom_paths = {
'files': [fil for filptrn in ["blacs", "blacsCinit", "blacsF77init"]
for fil in ["lib/lib%s.a" % filptrn,
"lib/%s_MPI-LINUX-0.a" % filptrn]] +
["bin/xintface"],
'dirs': []
}
super(EB_BLACS, self).sanity_check_step(custom_paths=custom_paths)
| gpl-2.0 | 4,030,611,416,693,584,400 | 35.430493 | 119 | 0.555268 | false |
lampertb/RPIAFIB | Software/afib_lib.py | 1 | 4157 | import sys
import numpy as np
from scipy import signal
# For testing
import csv
defaultWindowSize=120
defaultMinSNR=2
defaultNoisePercentage=10
defaultSampleRate=250
#The find peaks function takes in an array of data
#It returns an array of the peak locations after running the wavelet transform
def findPeaks(dataArray, windowSize=defaultWindowSize):
peakIndex=signal.find_peaks_cwt(dataArray, np.arange(1, windowSize), min_snr=defaultMinSNR, noise_perc=defaultNoisePercentage)
#print peakIndex
return peakIndex
#Calcuate the time interval between samples
def getRR(peakIndex, sampleRate=defaultSampleRate):
rr_data = []
for i in range(0, len(peakIndex)-1):
diff = peakIndex[i+1]-peakIndex[i]
#print "peak1 {0} - peak2 {1} Diff {2}".format(peakIndex[i+1], peakIndex[i], diff)
timeDelay = diff/float(sampleRate) #Get the time difference between samples
rr_data.append(timeDelay)
#sum+=timeDelay #create an average
#print "Sum {0}, len {1}".format(sum, len(peakIndex))
return rr_data
#AFib Detection Algorithm
from scipy.stats import norm
def Fib_Detection( x , seglen = 128):
N = len(x)
tprmean = 0.65625; tprvar = 0.001369222
# TPR mean and variance from rozinn database
afstats = {};
afstats['avg'] = [];
afstats['rmssd'] = [];
afstats['tpr'] = [];
afstats['se'] = [];
afstats['tprstat'] = [];
afstats['count'] = [];
for i in range (0,N-seglen+1):
perc = i/N*100
j = 0
segment = x[i:i+seglen]
#******************** Remove 16 outlier ********************************
#* In the outlier removal, 8 maximum and 8 minimum values are discarded
#***********************************************************************
segment_outlier = segment[:]
for j in range (0,8):
segment_outlier.remove(max(segment_outlier))
segment_outlier.remove(min(segment_outlier))
#print segment
#print segment_outlier
# Get mean
afstats['avg'].append(np.mean(segment))
# RMSSD
difference = np.subtract(segment_outlier[2:seglen-16], segment_outlier[1:seglen-17])
afstats['rmssd'].append(np.sqrt(np.sum(np.power(difference, 2))/(seglen-17))/afstats['avg'][i-1])
# TPR
j = 0
for k in range (1,seglen-1):
if ((segment[k]-segment[k-1])*(segment[k]-segment[k+1])>0):
j = j+1
afstats['tpr'].append(j/(seglen-2.0))
# Shannon Entropy
seg_max = np.max(segment_outlier)
seg_min = np.min(segment_outlier)
step = (seg_max-seg_min)/16.0;
entropy = 0;
if (step!=0):
group1 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for j in range(0,112):
z = int(np.around((segment_outlier[j]-seg_min)/step))
group1[z] = group1[z]+1
group1 = np.divide(group1,np.sum(group1)+0.0)
for j in range (0,16):
if (group1[j]>0):
entropy = entropy+group1[j]*np.log(group1[j])
afstats['se'].append(entropy/-2.7726)
# Compute the afstats
afstats['tprstat'].append(norm.cdf(afstats['tpr'][i-1], tprmean, np.sqrt(tprvar)));
if(afstats['rmssd'][i-1]>=0.1 and afstats['tprstat'][i-1]>0.0001 and afstats['tprstat'][i-1] <= 0.9999 and afstats['se'][i-1] >=0.7):
afstats['count'].append(1)
else:
afstats['count'].append(0)
return afstats
#AFib Detection from ECG file
def afib_dect():
inputFile="0403_Normal_tiny.csv"
ECG=[]
with open(inputFile,'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
ECG.append(float(row['ECG']))
data=np.asarray(ECG)
peakIndex=findPeaks(data, 200)
rr_data = getRR(peakIndex)
afstats = Fib_Detection(rr_data)
# Print result to result.txt file
outputFile = "result.txt"
result = "%d"%sum(afstats['count']);
fp = open(outputFile, 'r+')
fp.write(result);
fp.close()
return sum(afstats['count']) > 1
afib_dect();
| mit | 1,566,503,293,678,278,400 | 32.256 | 141 | 0.577099 | false |
eubr-bigsea/tahiti | migrations/versions/38745782554d_adding_missing_port_interfaces.py | 1 | 5671 | # -*- coding: utf-8 -*-}
"""Adding missing port interfaces
Revision ID: 38745782554d
Revises: b2b823fe47b1
Create Date: 2017-06-07 15:16:30.224298
"""
from alembic import op
from sqlalchemy import Integer, String
from sqlalchemy.sql import table, column, text
# revision identifiers, used by Alembic.
revision = '38745782554d'
down_revision = 'b2b823fe47b1'
branch_labels = None
depends_on = None
data = [
(34, 5),
(55, 1),
(56, 1),
(57, 11),
(37, 2),
(37, 18),
# (46, 2),
# (46, 18),
(63, 1),
(64, 1),
(73, 19),
(100, 2),
(100, 19),
(161, 17)
]
def upgrade():
try:
op.execute(text('START TRANSACTION'))
insert_operation_port_interface()
insert_operation_port_interface_translation()
insert_operation_port_interface_operation_port()
insert_operation_platform()
insert_operation_translation()
except:
op.execute(text('ROLLBACK'))
raise
def insert_operation_translation():
tb = table(
'operation_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String), )
columns = ('id', 'locale', 'name', 'description')
rows_data = [
(73, 'en', 'Regression Model', 'Regression Model'),
(73, 'pt', 'Modelo de Regressão', 'Modelo de Regressão'),
(74, 'en', 'Isotonic Regression', 'Isotonic Regression'),
(74, 'pt', 'Regressão Isotônica', 'Regressão Isotônica'),
(75, 'en', 'One Hot Encoder',
'One hot encoding transforms categorical '
'features to a format that works better with '
'classification and regression algorithms.'),
(75, 'pt', 'One Hot Encoder',
'One Hot encoding é uma transformação que fazemos nos '
'dados para representarmos uma variável categórica de '
'forma binária (indica presença ou ausência de um valor).'),
(76, 'en', 'AFT Survival Regression',
'Accelerated Failure Time (AFT) Model Survival Regression'),
(76, 'pt', 'Regressão AFT Survival',
'Accelerated Failure Time (AFT) Model Survival Regression'),
(77, 'en', 'GBT Regressor',
'Gradient-Boosted Trees (GBTs) learning algorithm for '
'regression. It supports both continuous and categorical featur'),
(77, 'pt', 'Regressor GBT',
'Gradient-Boosted Trees (GBTs) learning algorithm for '
'regression. It supports both continuous and categorical feature'),
(78, 'en', 'Random Forest Regressor',
'Random Forest learning algorithm for regression. '
'It supports both continuous and categorical features.'),
(78, 'pt', 'Regressor Random Forest',
'Random Forest learning algorithm for regression. '
'It supports both continuous and categorical features.'),
(79, 'en', 'Generalized Linear Regressor',
'Generalized Linear Regressor'),
(79, 'pt', 'Regressor Linear Generalizado',
'Regressor Linear Generalizado'),
]
rows = [dict(list(zip(columns, row))) for row in rows_data]
op.bulk_insert(tb, rows)
def insert_operation_platform():
tb = table(
'operation_platform',
column('operation_id', Integer),
column('platform_id', Integer), )
columns = ('operation_id', 'platform_id')
rows_data = [
(73, 1),
(74, 1),
(75, 1),
(76, 1),
(77, 1),
(78, 1),
(79, 1),
]
rows = [dict(list(zip(columns, row))) for row in rows_data]
op.bulk_insert(tb, rows)
def insert_operation_port_interface():
tb = table(
'operation_port_interface',
column('id', Integer),
column('color', String), )
columns = ('id', 'color')
interface_data = [
(19, '#AACC22')
]
rows = [dict(list(zip(columns, row))) for row in interface_data]
op.bulk_insert(tb, rows)
def insert_operation_port_interface_translation():
tb = table(
'operation_port_interface_translation',
column('id', Integer),
column('locale', String),
column('name', String), )
columns = ('id', 'locale', 'name')
interface_data = [
(19, 'pt', 'Visualização'),
(19, 'en', 'Visualization'),
]
rows = [dict(list(zip(columns, row))) for row in interface_data]
op.bulk_insert(tb, rows)
def insert_operation_port_interface_operation_port():
tb = table(
'operation_port_interface_operation_port',
column('operation_port_id', Integer),
column('operation_port_interface_id', Integer), )
columns = ('operation_port_id', 'operation_port_interface_id')
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def downgrade():
try:
for d in data:
op.execute(
text('DELETE FROM '
'operation_port_interface_operation_port '
'WHERE operation_port_id = {} '
' AND operation_port_interface_id = {}'.format(*d)))
op.execute(text('DELETE FROM operation_port_interface_translation '
'WHERE id = 19'))
op.execute(text('DELETE FROM operation_port_interface '
'WHERE id = 19'))
op.execute(text('DELETE FROM operation_platform '
'WHERE operation_id BETWEEN 73 AND 79'))
op.execute(text('DELETE FROM operation_translation '
'WHERE id BETWEEN 73 AND 79'))
except:
op.execute(text('ROLLBACK'))
raise
| apache-2.0 | 7,238,215,253,415,339,000 | 28.915344 | 76 | 0.582773 | false |
Barrog/C4-Datapack | data/jscript/quests/329_CuriosityOfDwarf/__init__.py | 1 | 2487 | # Made by Mr. - Version 0.3 by DrLecter
import sys
from net.sf.l2j.gameserver.model.quest import State
from net.sf.l2j.gameserver.model.quest import QuestState
from net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest
GOLEM_HEARTSTONE = 1346
BROKEN_HEARTSTONE = 1365
ADENA = 57
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "7437-03.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "7437-06.htm" :
st.exitQuest(1)
st.playSound("ItemSound.quest_finish")
return htmltext
def onTalk (Self,npc,st) :
npcId = npc.getNpcId()
htmltext = "<html><head><body>I have nothing to say you</body></html>"
id = st.getState()
if id == CREATED :
st.set("cond","0")
if int(st.get("cond"))==0 :
if st.getPlayer().getLevel() >= 33 :
htmltext = "7437-02.htm"
else:
htmltext = "7437-01.htm"
st.exitQuest(1)
else :
heart=st.getQuestItemsCount(GOLEM_HEARTSTONE)
broken=st.getQuestItemsCount(BROKEN_HEARTSTONE)
if broken+heart>0 :
st.giveItems(ADENA,50*broken+1000*heart)
st.takeItems(BROKEN_HEARTSTONE,-1)
st.takeItems(GOLEM_HEARTSTONE,-1)
htmltext = "7437-05.htm"
else:
htmltext = "7437-04.htm"
return htmltext
def onKill (self,npc,st):
npcId = npc.getNpcId()
n = st.getRandom(100)
if npcId == 85 :
if n<5 :
st.giveItems(GOLEM_HEARTSTONE,1)
st.playSound("ItemSound.quest_itemget")
elif n<58 :
st.giveItems(BROKEN_HEARTSTONE,1)
st.playSound("ItemSound.quest_itemget")
elif npcId == 83 :
if n<6 :
st.giveItems(GOLEM_HEARTSTONE,1)
st.playSound("ItemSound.quest_itemget")
elif n<56 :
st.giveItems(BROKEN_HEARTSTONE,1)
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(329,"329_CuriosityOfDwarf","Curiosity Of Dwarf")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(7437)
CREATED.addTalkId(7437)
STARTED.addTalkId(7437)
STARTED.addKillId(83)
STARTED.addKillId(85)
STARTED.addQuestDrop(85,BROKEN_HEARTSTONE,1)
STARTED.addQuestDrop(85,GOLEM_HEARTSTONE,1)
print "importing quests: 329: Curiosity Of Dwarf"
| gpl-2.0 | 4,741,307,329,138,383,000 | 27.918605 | 74 | 0.650985 | false |
skuda/client-python | kubernetes/client/models/v1beta1_role_ref.py | 1 | 4581 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1RoleRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_group=None, kind=None, name=None):
"""
V1beta1RoleRef - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_group': 'str',
'kind': 'str',
'name': 'str'
}
self.attribute_map = {
'api_group': 'apiGroup',
'kind': 'kind',
'name': 'name'
}
self._api_group = api_group
self._kind = kind
self._name = name
@property
def api_group(self):
"""
Gets the api_group of this V1beta1RoleRef.
APIGroup is the group for the resource being referenced
:return: The api_group of this V1beta1RoleRef.
:rtype: str
"""
return self._api_group
@api_group.setter
def api_group(self, api_group):
"""
Sets the api_group of this V1beta1RoleRef.
APIGroup is the group for the resource being referenced
:param api_group: The api_group of this V1beta1RoleRef.
:type: str
"""
if api_group is None:
raise ValueError("Invalid value for `api_group`, must not be `None`")
self._api_group = api_group
@property
def kind(self):
"""
Gets the kind of this V1beta1RoleRef.
Kind is the type of resource being referenced
:return: The kind of this V1beta1RoleRef.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1RoleRef.
Kind is the type of resource being referenced
:param kind: The kind of this V1beta1RoleRef.
:type: str
"""
if kind is None:
raise ValueError("Invalid value for `kind`, must not be `None`")
self._kind = kind
@property
def name(self):
"""
Gets the name of this V1beta1RoleRef.
Name is the name of resource being referenced
:return: The name of this V1beta1RoleRef.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1beta1RoleRef.
Name is the name of resource being referenced
:param name: The name of this V1beta1RoleRef.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | -5,183,765,010,237,596,000 | 25.633721 | 105 | 0.531107 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/multiprocessing/queues.py | 1 | 9842 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: queues.py
__all__ = [
'Queue', 'SimpleQueue', 'JoinableQueue']
import sys
import os
import threading
import collections
import time
import atexit
import weakref
from Queue import Empty, Full
import _multiprocessing
from multiprocessing import Pipe
from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition
from multiprocessing.util import debug, info, Finalize, register_after_fork
from multiprocessing.forking import assert_spawning
class Queue(object):
def __init__(self, maxsize=0):
if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
return
def __getstate__(self):
assert_spawning(self)
return (
self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
def __setstate__(self, state):
self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid = state
self._after_fork()
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send = self._writer.send
self._recv = self._reader.recv
self._poll = self._reader.poll
return
def put(self, obj, block=True, timeout=None):
if not self._sem.acquire(block, timeout):
raise Full
self._notempty.acquire()
try:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._notempty.notify()
finally:
self._notempty.release()
return
def get(self, block=True, timeout=None):
if block and timeout is None:
self._rlock.acquire()
try:
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()
else:
if block:
deadline = time.time() + timeout
if not self._rlock.acquire(block, timeout):
raise Empty
try:
if not self._poll(block and deadline - time.time() or 0.0):
raise Empty
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()
return
def qsize(self):
return self._maxsize - self._sem._semlock._get_value()
def empty(self):
return not self._poll()
def full(self):
return self._sem._semlock._is_zero()
def get_nowait(self):
return self.get(False)
def put_nowait(self, obj):
return self.put(obj, False)
def close(self):
self._closed = True
self._reader.close()
if self._close:
self._close()
def join_thread(self):
debug('Queue.join_thread()')
if self._jointhread:
self._jointhread()
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
try:
self._jointhread.cancel()
except AttributeError:
pass
def _start_thread(self):
debug('Queue._start_thread()')
self._buffer.clear()
self._thread = threading.Thread(target=Queue._feed, args=(
self._buffer, self._notempty, self._send,
self._wlock, self._writer.close), name='QueueFeederThread')
self._thread.daemon = True
debug('doing self._thread.start()')
self._thread.start()
debug('... done self._thread.start()')
created_by_this_process = self._opid == os.getpid()
if not self._joincancelled and not created_by_this_process:
self._jointhread = Finalize(self._thread, Queue._finalize_join, [
weakref.ref(self._thread)], exitpriority=-5)
self._close = Finalize(self, Queue._finalize_close, [
self._buffer, self._notempty], exitpriority=10)
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
thread = twr()
if thread is not None:
thread.join()
debug('... queue thread joined')
else:
debug('... queue thread already dead')
return
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
notempty.acquire()
try:
buffer.append(_sentinel)
notempty.notify()
finally:
notempty.release()
@staticmethod
def _feed(buffer, notempty, send, writelock, close):
debug('starting thread to feed data to pipe')
from .util import is_exiting
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
try:
while 1:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return
if wacquire is None:
send(obj)
else:
wacquire()
try:
send(obj)
finally:
wrelease()
except IndexError:
pass
except Exception as e:
try:
if is_exiting():
info('error in queue thread: %s', e)
else:
import traceback
traceback.print_exc()
except Exception:
pass
return
_sentinel = object()
class JoinableQueue(Queue):
def __init__(self, maxsize=0):
Queue.__init__(self, maxsize)
self._unfinished_tasks = Semaphore(0)
self._cond = Condition()
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
def __setstate__(self, state):
Queue.__setstate__(self, state[:-2])
self._cond, self._unfinished_tasks = state[-2:]
def put(self, obj, block=True, timeout=None):
if not self._sem.acquire(block, timeout):
raise Full
self._notempty.acquire()
self._cond.acquire()
try:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._unfinished_tasks.release()
self._notempty.notify()
finally:
self._cond.release()
self._notempty.release()
return
def task_done(self):
self._cond.acquire()
try:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks._semlock._is_zero():
self._cond.notify_all()
finally:
self._cond.release()
def join(self):
self._cond.acquire()
try:
if not self._unfinished_tasks._semlock._is_zero():
self._cond.wait()
finally:
self._cond.release()
class SimpleQueue(object):
def __init__(self):
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._make_methods()
return
def empty(self):
return not self._reader.poll()
def __getstate__(self):
assert_spawning(self)
return (
self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state):
self._reader, self._writer, self._rlock, self._wlock = state
self._make_methods()
def _make_methods(self):
recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._wlock is None:
self.put = self._writer.send
else:
send = self._writer.send
wacquire, wrelease = self._wlock.acquire, self._wlock.release
def put(obj):
wacquire()
try:
return send(obj)
finally:
wrelease()
self.put = put
return | unlicense | 1,957,727,825,150,317,600 | 28.47006 | 106 | 0.517578 | false |
Subsets and Splits